chauhoang commited on
Commit
261cabb
·
verified ·
1 Parent(s): 8d26d0d

Training in progress, step 1, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "o_proj",
24
  "up_proj",
25
- "gate_proj",
26
  "k_proj",
 
27
  "down_proj",
28
- "v_proj",
29
- "q_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "up_proj",
 
24
  "k_proj",
25
+ "gate_proj",
26
  "down_proj",
27
+ "q_proj",
28
+ "o_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:292eadaa61de5dd99e6bf9ec43a5dde3d7c676fce4bff127477199d42e858834
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ef85cd79f7d6a1c7af7c3b5d2da59f4d707d0c6fd2f556b38c62de65f8ba9cf
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:437e45006286a072af82af68bd745809fe0be8546e9680fc08dc720540a85606
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2712ec3feac87df3ec27ccf2e2d9420ece6f66f240fc1b384c2478b37ac4eb98
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c591c3b0b5aaa3bace339eb4070c47d0383ae432026f20b06621bbff93668b9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce18d2d551de9284109c0e2530e5f04b91a8696554c7740e97ea2b7bdd6d1090
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae751897b8e87ff08962a91d1d3485984775a96aa89e29a1caac3d6f449228f7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.1949317738791423,
5
- "eval_steps": 10,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,127 +11,17 @@
11
  {
12
  "epoch": 0.003898635477582846,
13
  "eval_loss": 0.8528332114219666,
14
- "eval_runtime": 12.8176,
15
- "eval_samples_per_second": 8.426,
16
- "eval_steps_per_second": 4.213,
17
  "step": 1
18
- },
19
- {
20
- "epoch": 0.01949317738791423,
21
- "grad_norm": 4.119136333465576,
22
- "learning_rate": 5e-05,
23
- "loss": 3.3918,
24
- "step": 5
25
- },
26
- {
27
- "epoch": 0.03898635477582846,
28
- "grad_norm": 3.6297383308410645,
29
- "learning_rate": 0.0001,
30
- "loss": 3.1245,
31
- "step": 10
32
- },
33
- {
34
- "epoch": 0.03898635477582846,
35
- "eval_loss": 0.6827734708786011,
36
- "eval_runtime": 13.0392,
37
- "eval_samples_per_second": 8.283,
38
- "eval_steps_per_second": 4.141,
39
- "step": 10
40
- },
41
- {
42
- "epoch": 0.05847953216374269,
43
- "grad_norm": 4.023770809173584,
44
- "learning_rate": 9.619397662556435e-05,
45
- "loss": 2.5966,
46
- "step": 15
47
- },
48
- {
49
- "epoch": 0.07797270955165692,
50
- "grad_norm": 3.661518096923828,
51
- "learning_rate": 8.535533905932738e-05,
52
- "loss": 2.4105,
53
- "step": 20
54
- },
55
- {
56
- "epoch": 0.07797270955165692,
57
- "eval_loss": 0.6242299675941467,
58
- "eval_runtime": 13.0453,
59
- "eval_samples_per_second": 8.279,
60
- "eval_steps_per_second": 4.139,
61
- "step": 20
62
- },
63
- {
64
- "epoch": 0.09746588693957114,
65
- "grad_norm": 3.441826581954956,
66
- "learning_rate": 6.91341716182545e-05,
67
- "loss": 2.5813,
68
- "step": 25
69
- },
70
- {
71
- "epoch": 0.11695906432748537,
72
- "grad_norm": 3.5918526649475098,
73
- "learning_rate": 5e-05,
74
- "loss": 2.5341,
75
- "step": 30
76
- },
77
- {
78
- "epoch": 0.11695906432748537,
79
- "eval_loss": 0.6086655855178833,
80
- "eval_runtime": 13.1265,
81
- "eval_samples_per_second": 8.228,
82
- "eval_steps_per_second": 4.114,
83
- "step": 30
84
- },
85
- {
86
- "epoch": 0.1364522417153996,
87
- "grad_norm": 3.5770630836486816,
88
- "learning_rate": 3.086582838174551e-05,
89
- "loss": 2.5353,
90
- "step": 35
91
- },
92
- {
93
- "epoch": 0.15594541910331383,
94
- "grad_norm": 3.4191396236419678,
95
- "learning_rate": 1.4644660940672627e-05,
96
- "loss": 2.3267,
97
- "step": 40
98
- },
99
- {
100
- "epoch": 0.15594541910331383,
101
- "eval_loss": 0.6020417809486389,
102
- "eval_runtime": 13.2295,
103
- "eval_samples_per_second": 8.164,
104
- "eval_steps_per_second": 4.082,
105
- "step": 40
106
- },
107
- {
108
- "epoch": 0.17543859649122806,
109
- "grad_norm": 3.3189620971679688,
110
- "learning_rate": 3.8060233744356633e-06,
111
- "loss": 2.351,
112
- "step": 45
113
- },
114
- {
115
- "epoch": 0.1949317738791423,
116
- "grad_norm": 3.7775397300720215,
117
- "learning_rate": 0.0,
118
- "loss": 2.543,
119
- "step": 50
120
- },
121
- {
122
- "epoch": 0.1949317738791423,
123
- "eval_loss": 0.601226806640625,
124
- "eval_runtime": 13.2095,
125
- "eval_samples_per_second": 8.176,
126
- "eval_steps_per_second": 4.088,
127
- "step": 50
128
  }
129
  ],
130
  "logging_steps": 5,
131
- "max_steps": 50,
132
  "num_input_tokens_seen": 0,
133
  "num_train_epochs": 1,
134
- "save_steps": 13,
135
  "stateful_callbacks": {
136
  "TrainerControl": {
137
  "args": {
@@ -144,7 +34,7 @@
144
  "attributes": {}
145
  }
146
  },
147
- "total_flos": 1.7439084402180096e+16,
148
  "train_batch_size": 2,
149
  "trial_name": null,
150
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.003898635477582846,
5
+ "eval_steps": 1,
6
+ "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 0.003898635477582846,
13
  "eval_loss": 0.8528332114219666,
14
+ "eval_runtime": 13.0599,
15
+ "eval_samples_per_second": 8.27,
16
+ "eval_steps_per_second": 4.135,
17
  "step": 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  ],
20
  "logging_steps": 5,
21
+ "max_steps": 1,
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
+ "save_steps": 1,
25
  "stateful_callbacks": {
26
  "TrainerControl": {
27
  "args": {
 
34
  "attributes": {}
35
  }
36
  },
37
+ "total_flos": 350534359842816.0,
38
  "train_batch_size": 2,
39
  "trial_name": null,
40
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68f2d8d172f297e9444c68fa0d9a882ea3108657ab700be8fcd9ce3587988d09
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ede6b6fdee903a3074da3a171c93ab5343e1258adcee4206296f452294c79e95
3
  size 6776