Sliden commited on
Commit
ebcf144
·
1 Parent(s): dbb8c70

Upload folder using huggingface_hub

Browse files
Files changed (45) hide show
  1. README.md +21 -0
  2. adapter_config.json +26 -0
  3. adapter_model.bin +3 -0
  4. checkpoint-117/README.md +21 -0
  5. checkpoint-117/adapter_config.json +26 -0
  6. checkpoint-117/adapter_model.bin +3 -0
  7. checkpoint-117/adapter_model/README.md +21 -0
  8. checkpoint-117/adapter_model/adapter_config.json +26 -0
  9. checkpoint-117/adapter_model/adapter_model.bin +3 -0
  10. checkpoint-117/optimizer.pt +3 -0
  11. checkpoint-117/rng_state.pth +3 -0
  12. checkpoint-117/scheduler.pt +3 -0
  13. checkpoint-117/trainer_state.json +737 -0
  14. checkpoint-117/training_args.bin +3 -0
  15. checkpoint-174/README.md +21 -0
  16. checkpoint-174/adapter_config.json +26 -0
  17. checkpoint-174/adapter_model.bin +3 -0
  18. checkpoint-174/adapter_model/README.md +21 -0
  19. checkpoint-174/adapter_model/adapter_config.json +26 -0
  20. checkpoint-174/adapter_model/adapter_model.bin +3 -0
  21. checkpoint-174/optimizer.pt +3 -0
  22. checkpoint-174/rng_state.pth +3 -0
  23. checkpoint-174/scheduler.pt +3 -0
  24. checkpoint-174/trainer_state.json +1087 -0
  25. checkpoint-174/training_args.bin +3 -0
  26. checkpoint-58/README.md +21 -0
  27. checkpoint-58/adapter_config.json +26 -0
  28. checkpoint-58/adapter_model.bin +3 -0
  29. checkpoint-58/adapter_model/README.md +21 -0
  30. checkpoint-58/adapter_model/adapter_config.json +26 -0
  31. checkpoint-58/adapter_model/adapter_model.bin +3 -0
  32. checkpoint-58/optimizer.pt +3 -0
  33. checkpoint-58/rng_state.pth +3 -0
  34. checkpoint-58/scheduler.pt +3 -0
  35. checkpoint-58/trainer_state.json +375 -0
  36. checkpoint-58/training_args.bin +3 -0
  37. merged/config.json +26 -0
  38. merged/generation_config.json +7 -0
  39. merged/pytorch_model.bin +3 -0
  40. merged/special_tokens_map.json +6 -0
  41. merged/tokenizer.model +3 -0
  42. merged/tokenizer_config.json +36 -0
  43. special_tokens_map.json +6 -0
  44. tokenizer.model +3 -0
  45. tokenizer_config.json +36 -0
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "openlm-research/open_llama_3b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe5aa95043aa532cd69c2de2275ff0da626799b146f8b1fc0d9a27104e97ae1
3
+ size 50982397
checkpoint-117/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0.dev0
checkpoint-117/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "openlm-research/open_llama_3b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-117/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e498eeab1531f405933578e0969b54f08ea3f427d4f392974f0dfdb55d47fc52
3
+ size 50982397
checkpoint-117/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0.dev0
checkpoint-117/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "openlm-research/open_llama_3b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-117/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e498eeab1531f405933578e0969b54f08ea3f427d4f392974f0dfdb55d47fc52
3
+ size 50982397
checkpoint-117/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e3c392d3c1811b6366e03455bbd7c4a67ebee3df2febdeab2254e191fee92f
3
+ size 25871055
checkpoint-117/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7508d4b8dd267de5cc58e972da25236687927651336a28f292c92f7f23951475
3
+ size 14575
checkpoint-117/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7700418cf1a4cd51a29cd186ae503e313a55b0836cdcdd770aaf67afad70dc8
3
+ size 627
checkpoint-117/trainer_state.json ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9914893617021276,
5
+ "eval_steps": 50,
6
+ "global_step": 117,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 2e-05,
14
+ "loss": 1.6896,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.03,
19
+ "learning_rate": 4e-05,
20
+ "loss": 1.632,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.05,
25
+ "learning_rate": 6e-05,
26
+ "loss": 1.7197,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.07,
31
+ "learning_rate": 8e-05,
32
+ "loss": 1.6988,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.09,
37
+ "learning_rate": 0.0001,
38
+ "loss": 1.4945,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.1,
43
+ "learning_rate": 0.00012,
44
+ "loss": 1.4763,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.12,
49
+ "learning_rate": 0.00014,
50
+ "loss": 1.4645,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.14,
55
+ "learning_rate": 0.00016,
56
+ "loss": 1.1569,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.15,
61
+ "learning_rate": 0.00018,
62
+ "loss": 1.4501,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.17,
67
+ "learning_rate": 0.0002,
68
+ "loss": 1.1972,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.19,
73
+ "learning_rate": 0.00019998209226697376,
74
+ "loss": 1.1479,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.2,
79
+ "learning_rate": 0.00019992837548163316,
80
+ "loss": 1.1791,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.22,
85
+ "learning_rate": 0.00019983886888289514,
86
+ "loss": 1.1471,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.24,
91
+ "learning_rate": 0.00019971360452796522,
92
+ "loss": 0.9662,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.26,
97
+ "learning_rate": 0.0001995526272808559,
98
+ "loss": 1.2166,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.27,
103
+ "learning_rate": 0.0001993559947963185,
104
+ "loss": 1.1023,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.29,
109
+ "learning_rate": 0.00019912377749919374,
110
+ "loss": 0.9719,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.31,
115
+ "learning_rate": 0.00019885605855918885,
116
+ "loss": 0.9122,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.32,
121
+ "learning_rate": 0.00019855293386108992,
122
+ "loss": 0.8808,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.34,
127
+ "learning_rate": 0.00019821451197042026,
128
+ "loss": 1.0081,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.36,
133
+ "learning_rate": 0.00019784091409455728,
134
+ "loss": 1.0147,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.37,
139
+ "learning_rate": 0.00019743227403932134,
140
+ "loss": 0.8792,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.39,
145
+ "learning_rate": 0.00019698873816105273,
146
+ "loss": 0.8762,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.41,
151
+ "learning_rate": 0.00019651046531419332,
152
+ "loss": 0.7916,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.43,
157
+ "learning_rate": 0.0001959976267943923,
158
+ "loss": 0.8967,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.44,
163
+ "learning_rate": 0.0001954504062771555,
164
+ "loss": 1.0575,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.46,
169
+ "learning_rate": 0.00019486899975206166,
170
+ "loss": 0.9374,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.48,
175
+ "learning_rate": 0.00019425361545256727,
176
+ "loss": 0.8697,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.49,
181
+ "learning_rate": 0.00019360447378142728,
182
+ "loss": 0.788,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.51,
187
+ "learning_rate": 0.00019292180723175654,
188
+ "loss": 0.8215,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.53,
193
+ "learning_rate": 0.00019220586030376134,
194
+ "loss": 0.8943,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.54,
199
+ "learning_rate": 0.00019145688941717075,
200
+ "loss": 0.7825,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.56,
205
+ "learning_rate": 0.00019067516281939825,
206
+ "loss": 0.9878,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.58,
211
+ "learning_rate": 0.00018986096048946824,
212
+ "loss": 0.8517,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.6,
217
+ "learning_rate": 0.00018901457403773967,
218
+ "loss": 0.8132,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.61,
223
+ "learning_rate": 0.00018813630660146488,
224
+ "loss": 0.9143,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.63,
229
+ "learning_rate": 0.0001872264727362194,
230
+ "loss": 0.8659,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.65,
235
+ "learning_rate": 0.00018628539830324229,
236
+ "loss": 0.7977,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.66,
241
+ "learning_rate": 0.00018531342035272766,
242
+ "loss": 0.8188,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.68,
247
+ "learning_rate": 0.00018431088700310844,
248
+ "loss": 0.871,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.7,
253
+ "learning_rate": 0.00018327815731637612,
254
+ "loss": 0.6939,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.71,
259
+ "learning_rate": 0.00018221560116948103,
260
+ "loss": 0.8147,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.73,
265
+ "learning_rate": 0.00018112359912185924,
266
+ "loss": 0.8898,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 0.75,
271
+ "learning_rate": 0.00018000254227913348,
272
+ "loss": 0.8074,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 0.77,
277
+ "learning_rate": 0.0001788528321530366,
278
+ "loss": 0.8351,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 0.78,
283
+ "learning_rate": 0.00017767488051760857,
284
+ "loss": 0.8811,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 0.8,
289
+ "learning_rate": 0.00017646910926171747,
290
+ "loss": 0.792,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 0.82,
295
+ "learning_rate": 0.00017523595023795813,
296
+ "loss": 0.8558,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 0.83,
301
+ "learning_rate": 0.0001739758451079821,
302
+ "loss": 0.8308,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 0.85,
307
+ "learning_rate": 0.00017268924518431438,
308
+ "loss": 0.8504,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 0.85,
313
+ "eval_loss": 0.8740054368972778,
314
+ "eval_runtime": 0.8959,
315
+ "eval_samples_per_second": 22.324,
316
+ "eval_steps_per_second": 5.581,
317
+ "step": 50
318
+ },
319
+ {
320
+ "epoch": 0.87,
321
+ "learning_rate": 0.0001713766112687139,
322
+ "loss": 0.83,
323
+ "step": 51
324
+ },
325
+ {
326
+ "epoch": 0.89,
327
+ "learning_rate": 0.0001700384134871351,
328
+ "loss": 0.7499,
329
+ "step": 52
330
+ },
331
+ {
332
+ "epoch": 0.9,
333
+ "learning_rate": 0.00016867513112135013,
334
+ "loss": 0.8685,
335
+ "step": 53
336
+ },
337
+ {
338
+ "epoch": 0.92,
339
+ "learning_rate": 0.0001672872524372919,
340
+ "loss": 0.807,
341
+ "step": 54
342
+ },
343
+ {
344
+ "epoch": 0.94,
345
+ "learning_rate": 0.00016587527451017938,
346
+ "loss": 0.8627,
347
+ "step": 55
348
+ },
349
+ {
350
+ "epoch": 0.95,
351
+ "learning_rate": 0.0001644397030464877,
352
+ "loss": 0.7606,
353
+ "step": 56
354
+ },
355
+ {
356
+ "epoch": 0.97,
357
+ "learning_rate": 0.00016298105220282713,
358
+ "loss": 0.8284,
359
+ "step": 57
360
+ },
361
+ {
362
+ "epoch": 0.99,
363
+ "learning_rate": 0.00016149984440179537,
364
+ "loss": 0.8585,
365
+ "step": 58
366
+ },
367
+ {
368
+ "epoch": 1.0,
369
+ "learning_rate": 0.00015999661014486956,
370
+ "loss": 0.8819,
371
+ "step": 59
372
+ },
373
+ {
374
+ "epoch": 1.02,
375
+ "learning_rate": 0.0001584718878224047,
376
+ "loss": 0.6723,
377
+ "step": 60
378
+ },
379
+ {
380
+ "epoch": 1.04,
381
+ "learning_rate": 0.00015692622352080662,
382
+ "loss": 0.7435,
383
+ "step": 61
384
+ },
385
+ {
386
+ "epoch": 1.06,
387
+ "learning_rate": 0.00015536017082694846,
388
+ "loss": 0.7002,
389
+ "step": 62
390
+ },
391
+ {
392
+ "epoch": 1.07,
393
+ "learning_rate": 0.00015377429062990122,
394
+ "loss": 0.7433,
395
+ "step": 63
396
+ },
397
+ {
398
+ "epoch": 1.09,
399
+ "learning_rate": 0.00015216915092004847,
400
+ "loss": 0.7356,
401
+ "step": 64
402
+ },
403
+ {
404
+ "epoch": 1.11,
405
+ "learning_rate": 0.0001505453265856581,
406
+ "loss": 0.7616,
407
+ "step": 65
408
+ },
409
+ {
410
+ "epoch": 1.12,
411
+ "learning_rate": 0.00014890339920698334,
412
+ "loss": 0.7192,
413
+ "step": 66
414
+ },
415
+ {
416
+ "epoch": 1.14,
417
+ "learning_rate": 0.0001472439568479671,
418
+ "loss": 0.7354,
419
+ "step": 67
420
+ },
421
+ {
422
+ "epoch": 1.16,
423
+ "learning_rate": 0.00014556759384562416,
424
+ "loss": 0.7623,
425
+ "step": 68
426
+ },
427
+ {
428
+ "epoch": 1.17,
429
+ "learning_rate": 0.00014387491059717652,
430
+ "loss": 0.724,
431
+ "step": 69
432
+ },
433
+ {
434
+ "epoch": 1.19,
435
+ "learning_rate": 0.0001421665133450184,
436
+ "loss": 0.8136,
437
+ "step": 70
438
+ },
439
+ {
440
+ "epoch": 1.21,
441
+ "learning_rate": 0.0001404430139595877,
442
+ "loss": 0.8083,
443
+ "step": 71
444
+ },
445
+ {
446
+ "epoch": 1.23,
447
+ "learning_rate": 0.00013870502972022173,
448
+ "loss": 0.7054,
449
+ "step": 72
450
+ },
451
+ {
452
+ "epoch": 1.24,
453
+ "learning_rate": 0.0001369531830940757,
454
+ "loss": 0.7301,
455
+ "step": 73
456
+ },
457
+ {
458
+ "epoch": 1.26,
459
+ "learning_rate": 0.0001351881015131833,
460
+ "loss": 0.6925,
461
+ "step": 74
462
+ },
463
+ {
464
+ "epoch": 1.28,
465
+ "learning_rate": 0.000133410417149739,
466
+ "loss": 0.7191,
467
+ "step": 75
468
+ },
469
+ {
470
+ "epoch": 1.29,
471
+ "learning_rate": 0.0001316207666896824,
472
+ "loss": 0.6814,
473
+ "step": 76
474
+ },
475
+ {
476
+ "epoch": 1.31,
477
+ "learning_rate": 0.00012981979110466654,
478
+ "loss": 0.6916,
479
+ "step": 77
480
+ },
481
+ {
482
+ "epoch": 1.33,
483
+ "learning_rate": 0.00012800813542249072,
484
+ "loss": 0.5781,
485
+ "step": 78
486
+ },
487
+ {
488
+ "epoch": 1.34,
489
+ "learning_rate": 0.0001261864484960807,
490
+ "loss": 0.8074,
491
+ "step": 79
492
+ },
493
+ {
494
+ "epoch": 1.36,
495
+ "learning_rate": 0.0001243553827710992,
496
+ "loss": 0.6563,
497
+ "step": 80
498
+ },
499
+ {
500
+ "epoch": 1.38,
501
+ "learning_rate": 0.00012251559405226941,
502
+ "loss": 0.6436,
503
+ "step": 81
504
+ },
505
+ {
506
+ "epoch": 1.4,
507
+ "learning_rate": 0.00012066774126849529,
508
+ "loss": 0.7692,
509
+ "step": 82
510
+ },
511
+ {
512
+ "epoch": 1.41,
513
+ "learning_rate": 0.00011881248623686338,
514
+ "loss": 0.6265,
515
+ "step": 83
516
+ },
517
+ {
518
+ "epoch": 1.43,
519
+ "learning_rate": 0.00011695049342560968,
520
+ "loss": 0.5772,
521
+ "step": 84
522
+ },
523
+ {
524
+ "epoch": 1.45,
525
+ "learning_rate": 0.00011508242971613741,
526
+ "loss": 0.6841,
527
+ "step": 85
528
+ },
529
+ {
530
+ "epoch": 1.46,
531
+ "learning_rate": 0.00011320896416417026,
532
+ "loss": 0.7439,
533
+ "step": 86
534
+ },
535
+ {
536
+ "epoch": 1.48,
537
+ "learning_rate": 0.000111330767760127,
538
+ "loss": 0.7303,
539
+ "step": 87
540
+ },
541
+ {
542
+ "epoch": 1.5,
543
+ "learning_rate": 0.00010944851318880314,
544
+ "loss": 0.7767,
545
+ "step": 88
546
+ },
547
+ {
548
+ "epoch": 1.51,
549
+ "learning_rate": 0.00010756287458844569,
550
+ "loss": 0.6361,
551
+ "step": 89
552
+ },
553
+ {
554
+ "epoch": 1.53,
555
+ "learning_rate": 0.00010567452730930743,
556
+ "loss": 0.6535,
557
+ "step": 90
558
+ },
559
+ {
560
+ "epoch": 1.55,
561
+ "learning_rate": 0.00010378414767176705,
562
+ "loss": 0.7178,
563
+ "step": 91
564
+ },
565
+ {
566
+ "epoch": 1.57,
567
+ "learning_rate": 0.0001018924127241019,
568
+ "loss": 0.6977,
569
+ "step": 92
570
+ },
571
+ {
572
+ "epoch": 1.58,
573
+ "learning_rate": 0.0001,
574
+ "loss": 0.704,
575
+ "step": 93
576
+ },
577
+ {
578
+ "epoch": 1.6,
579
+ "learning_rate": 9.810758727589813e-05,
580
+ "loss": 0.8041,
581
+ "step": 94
582
+ },
583
+ {
584
+ "epoch": 1.62,
585
+ "learning_rate": 9.621585232823298e-05,
586
+ "loss": 0.7026,
587
+ "step": 95
588
+ },
589
+ {
590
+ "epoch": 1.63,
591
+ "learning_rate": 9.432547269069261e-05,
592
+ "loss": 0.6892,
593
+ "step": 96
594
+ },
595
+ {
596
+ "epoch": 1.65,
597
+ "learning_rate": 9.243712541155436e-05,
598
+ "loss": 0.7073,
599
+ "step": 97
600
+ },
601
+ {
602
+ "epoch": 1.67,
603
+ "learning_rate": 9.055148681119688e-05,
604
+ "loss": 0.7765,
605
+ "step": 98
606
+ },
607
+ {
608
+ "epoch": 1.69,
609
+ "learning_rate": 8.866923223987302e-05,
610
+ "loss": 0.6896,
611
+ "step": 99
612
+ },
613
+ {
614
+ "epoch": 1.7,
615
+ "learning_rate": 8.679103583582979e-05,
616
+ "loss": 0.6527,
617
+ "step": 100
618
+ },
619
+ {
620
+ "epoch": 1.7,
621
+ "eval_loss": 0.8596932291984558,
622
+ "eval_runtime": 0.897,
623
+ "eval_samples_per_second": 22.297,
624
+ "eval_steps_per_second": 5.574,
625
+ "step": 100
626
+ },
627
+ {
628
+ "epoch": 1.72,
629
+ "learning_rate": 8.491757028386263e-05,
630
+ "loss": 0.6335,
631
+ "step": 101
632
+ },
633
+ {
634
+ "epoch": 1.74,
635
+ "learning_rate": 8.304950657439033e-05,
636
+ "loss": 0.6348,
637
+ "step": 102
638
+ },
639
+ {
640
+ "epoch": 1.75,
641
+ "learning_rate": 8.118751376313664e-05,
642
+ "loss": 0.6983,
643
+ "step": 103
644
+ },
645
+ {
646
+ "epoch": 1.77,
647
+ "learning_rate": 7.93322587315047e-05,
648
+ "loss": 0.7995,
649
+ "step": 104
650
+ },
651
+ {
652
+ "epoch": 1.79,
653
+ "learning_rate": 7.74844059477306e-05,
654
+ "loss": 0.654,
655
+ "step": 105
656
+ },
657
+ {
658
+ "epoch": 1.8,
659
+ "learning_rate": 7.564461722890081e-05,
660
+ "loss": 0.6507,
661
+ "step": 106
662
+ },
663
+ {
664
+ "epoch": 1.82,
665
+ "learning_rate": 7.381355150391933e-05,
666
+ "loss": 0.6623,
667
+ "step": 107
668
+ },
669
+ {
670
+ "epoch": 1.84,
671
+ "learning_rate": 7.19918645775093e-05,
672
+ "loss": 0.7539,
673
+ "step": 108
674
+ },
675
+ {
676
+ "epoch": 1.86,
677
+ "learning_rate": 7.018020889533348e-05,
678
+ "loss": 0.755,
679
+ "step": 109
680
+ },
681
+ {
682
+ "epoch": 1.87,
683
+ "learning_rate": 6.83792333103176e-05,
684
+ "loss": 0.6394,
685
+ "step": 110
686
+ },
687
+ {
688
+ "epoch": 1.89,
689
+ "learning_rate": 6.658958285026102e-05,
690
+ "loss": 0.7545,
691
+ "step": 111
692
+ },
693
+ {
694
+ "epoch": 1.91,
695
+ "learning_rate": 6.48118984868167e-05,
696
+ "loss": 0.7215,
697
+ "step": 112
698
+ },
699
+ {
700
+ "epoch": 1.92,
701
+ "learning_rate": 6.304681690592431e-05,
702
+ "loss": 0.581,
703
+ "step": 113
704
+ },
705
+ {
706
+ "epoch": 1.94,
707
+ "learning_rate": 6.129497027977829e-05,
708
+ "loss": 0.673,
709
+ "step": 114
710
+ },
711
+ {
712
+ "epoch": 1.96,
713
+ "learning_rate": 5.955698604041231e-05,
714
+ "loss": 0.6878,
715
+ "step": 115
716
+ },
717
+ {
718
+ "epoch": 1.97,
719
+ "learning_rate": 5.7833486654981606e-05,
720
+ "loss": 0.738,
721
+ "step": 116
722
+ },
723
+ {
724
+ "epoch": 1.99,
725
+ "learning_rate": 5.6125089402823485e-05,
726
+ "loss": 0.812,
727
+ "step": 117
728
+ }
729
+ ],
730
+ "logging_steps": 1,
731
+ "max_steps": 174,
732
+ "num_train_epochs": 3,
733
+ "save_steps": 500,
734
+ "total_flos": 5053549897973760.0,
735
+ "trial_name": null,
736
+ "trial_params": null
737
+ }
checkpoint-117/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:559e05578fbf1f6e78fc368cebf479663b2065af16b04f27bd99e7e2b2e8f04f
3
+ size 4155
checkpoint-174/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0.dev0
checkpoint-174/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "openlm-research/open_llama_3b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-174/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe5aa95043aa532cd69c2de2275ff0da626799b146f8b1fc0d9a27104e97ae1
3
+ size 50982397
checkpoint-174/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0.dev0
checkpoint-174/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "openlm-research/open_llama_3b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-174/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe5aa95043aa532cd69c2de2275ff0da626799b146f8b1fc0d9a27104e97ae1
3
+ size 50982397
checkpoint-174/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f7882c325d071ed5373d6f2f92186cb51ad9a34bd205cbe3b4bc515a50b671f
3
+ size 25871055
checkpoint-174/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dd3a816ab8628e6038ecf426e93a907752049203fbc39b63fcde557182a866f
3
+ size 14575
checkpoint-174/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd8538969d1b6e3c157459906d597cb3ca4c31e75dc24b6f36711f2acf24b57c
3
+ size 627
checkpoint-174/trainer_state.json ADDED
@@ -0,0 +1,1087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9617021276595743,
5
+ "eval_steps": 50,
6
+ "global_step": 174,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 2e-05,
14
+ "loss": 1.6896,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.03,
19
+ "learning_rate": 4e-05,
20
+ "loss": 1.632,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.05,
25
+ "learning_rate": 6e-05,
26
+ "loss": 1.7197,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.07,
31
+ "learning_rate": 8e-05,
32
+ "loss": 1.6988,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.09,
37
+ "learning_rate": 0.0001,
38
+ "loss": 1.4945,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.1,
43
+ "learning_rate": 0.00012,
44
+ "loss": 1.4763,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.12,
49
+ "learning_rate": 0.00014,
50
+ "loss": 1.4645,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.14,
55
+ "learning_rate": 0.00016,
56
+ "loss": 1.1569,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.15,
61
+ "learning_rate": 0.00018,
62
+ "loss": 1.4501,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.17,
67
+ "learning_rate": 0.0002,
68
+ "loss": 1.1972,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.19,
73
+ "learning_rate": 0.00019998209226697376,
74
+ "loss": 1.1479,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.2,
79
+ "learning_rate": 0.00019992837548163316,
80
+ "loss": 1.1791,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.22,
85
+ "learning_rate": 0.00019983886888289514,
86
+ "loss": 1.1471,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.24,
91
+ "learning_rate": 0.00019971360452796522,
92
+ "loss": 0.9662,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.26,
97
+ "learning_rate": 0.0001995526272808559,
98
+ "loss": 1.2166,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.27,
103
+ "learning_rate": 0.0001993559947963185,
104
+ "loss": 1.1023,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.29,
109
+ "learning_rate": 0.00019912377749919374,
110
+ "loss": 0.9719,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.31,
115
+ "learning_rate": 0.00019885605855918885,
116
+ "loss": 0.9122,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.32,
121
+ "learning_rate": 0.00019855293386108992,
122
+ "loss": 0.8808,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.34,
127
+ "learning_rate": 0.00019821451197042026,
128
+ "loss": 1.0081,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.36,
133
+ "learning_rate": 0.00019784091409455728,
134
+ "loss": 1.0147,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.37,
139
+ "learning_rate": 0.00019743227403932134,
140
+ "loss": 0.8792,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.39,
145
+ "learning_rate": 0.00019698873816105273,
146
+ "loss": 0.8762,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.41,
151
+ "learning_rate": 0.00019651046531419332,
152
+ "loss": 0.7916,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.43,
157
+ "learning_rate": 0.0001959976267943923,
158
+ "loss": 0.8967,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.44,
163
+ "learning_rate": 0.0001954504062771555,
164
+ "loss": 1.0575,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.46,
169
+ "learning_rate": 0.00019486899975206166,
170
+ "loss": 0.9374,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.48,
175
+ "learning_rate": 0.00019425361545256727,
176
+ "loss": 0.8697,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.49,
181
+ "learning_rate": 0.00019360447378142728,
182
+ "loss": 0.788,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.51,
187
+ "learning_rate": 0.00019292180723175654,
188
+ "loss": 0.8215,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.53,
193
+ "learning_rate": 0.00019220586030376134,
194
+ "loss": 0.8943,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.54,
199
+ "learning_rate": 0.00019145688941717075,
200
+ "loss": 0.7825,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.56,
205
+ "learning_rate": 0.00019067516281939825,
206
+ "loss": 0.9878,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.58,
211
+ "learning_rate": 0.00018986096048946824,
212
+ "loss": 0.8517,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.6,
217
+ "learning_rate": 0.00018901457403773967,
218
+ "loss": 0.8132,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.61,
223
+ "learning_rate": 0.00018813630660146488,
224
+ "loss": 0.9143,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.63,
229
+ "learning_rate": 0.0001872264727362194,
230
+ "loss": 0.8659,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.65,
235
+ "learning_rate": 0.00018628539830324229,
236
+ "loss": 0.7977,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.66,
241
+ "learning_rate": 0.00018531342035272766,
242
+ "loss": 0.8188,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.68,
247
+ "learning_rate": 0.00018431088700310844,
248
+ "loss": 0.871,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.7,
253
+ "learning_rate": 0.00018327815731637612,
254
+ "loss": 0.6939,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.71,
259
+ "learning_rate": 0.00018221560116948103,
260
+ "loss": 0.8147,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.73,
265
+ "learning_rate": 0.00018112359912185924,
266
+ "loss": 0.8898,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 0.75,
271
+ "learning_rate": 0.00018000254227913348,
272
+ "loss": 0.8074,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 0.77,
277
+ "learning_rate": 0.0001788528321530366,
278
+ "loss": 0.8351,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 0.78,
283
+ "learning_rate": 0.00017767488051760857,
284
+ "loss": 0.8811,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 0.8,
289
+ "learning_rate": 0.00017646910926171747,
290
+ "loss": 0.792,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 0.82,
295
+ "learning_rate": 0.00017523595023795813,
296
+ "loss": 0.8558,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 0.83,
301
+ "learning_rate": 0.0001739758451079821,
302
+ "loss": 0.8308,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 0.85,
307
+ "learning_rate": 0.00017268924518431438,
308
+ "loss": 0.8504,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 0.85,
313
+ "eval_loss": 0.8740054368972778,
314
+ "eval_runtime": 0.8959,
315
+ "eval_samples_per_second": 22.324,
316
+ "eval_steps_per_second": 5.581,
317
+ "step": 50
318
+ },
319
+ {
320
+ "epoch": 0.87,
321
+ "learning_rate": 0.0001713766112687139,
322
+ "loss": 0.83,
323
+ "step": 51
324
+ },
325
+ {
326
+ "epoch": 0.89,
327
+ "learning_rate": 0.0001700384134871351,
328
+ "loss": 0.7499,
329
+ "step": 52
330
+ },
331
+ {
332
+ "epoch": 0.9,
333
+ "learning_rate": 0.00016867513112135013,
334
+ "loss": 0.8685,
335
+ "step": 53
336
+ },
337
+ {
338
+ "epoch": 0.92,
339
+ "learning_rate": 0.0001672872524372919,
340
+ "loss": 0.807,
341
+ "step": 54
342
+ },
343
+ {
344
+ "epoch": 0.94,
345
+ "learning_rate": 0.00016587527451017938,
346
+ "loss": 0.8627,
347
+ "step": 55
348
+ },
349
+ {
350
+ "epoch": 0.95,
351
+ "learning_rate": 0.0001644397030464877,
352
+ "loss": 0.7606,
353
+ "step": 56
354
+ },
355
+ {
356
+ "epoch": 0.97,
357
+ "learning_rate": 0.00016298105220282713,
358
+ "loss": 0.8284,
359
+ "step": 57
360
+ },
361
+ {
362
+ "epoch": 0.99,
363
+ "learning_rate": 0.00016149984440179537,
364
+ "loss": 0.8585,
365
+ "step": 58
366
+ },
367
+ {
368
+ "epoch": 1.0,
369
+ "learning_rate": 0.00015999661014486956,
370
+ "loss": 0.8819,
371
+ "step": 59
372
+ },
373
+ {
374
+ "epoch": 1.02,
375
+ "learning_rate": 0.0001584718878224047,
376
+ "loss": 0.6723,
377
+ "step": 60
378
+ },
379
+ {
380
+ "epoch": 1.04,
381
+ "learning_rate": 0.00015692622352080662,
382
+ "loss": 0.7435,
383
+ "step": 61
384
+ },
385
+ {
386
+ "epoch": 1.06,
387
+ "learning_rate": 0.00015536017082694846,
388
+ "loss": 0.7002,
389
+ "step": 62
390
+ },
391
+ {
392
+ "epoch": 1.07,
393
+ "learning_rate": 0.00015377429062990122,
394
+ "loss": 0.7433,
395
+ "step": 63
396
+ },
397
+ {
398
+ "epoch": 1.09,
399
+ "learning_rate": 0.00015216915092004847,
400
+ "loss": 0.7356,
401
+ "step": 64
402
+ },
403
+ {
404
+ "epoch": 1.11,
405
+ "learning_rate": 0.0001505453265856581,
406
+ "loss": 0.7616,
407
+ "step": 65
408
+ },
409
+ {
410
+ "epoch": 1.12,
411
+ "learning_rate": 0.00014890339920698334,
412
+ "loss": 0.7192,
413
+ "step": 66
414
+ },
415
+ {
416
+ "epoch": 1.14,
417
+ "learning_rate": 0.0001472439568479671,
418
+ "loss": 0.7354,
419
+ "step": 67
420
+ },
421
+ {
422
+ "epoch": 1.16,
423
+ "learning_rate": 0.00014556759384562416,
424
+ "loss": 0.7623,
425
+ "step": 68
426
+ },
427
+ {
428
+ "epoch": 1.17,
429
+ "learning_rate": 0.00014387491059717652,
430
+ "loss": 0.724,
431
+ "step": 69
432
+ },
433
+ {
434
+ "epoch": 1.19,
435
+ "learning_rate": 0.0001421665133450184,
436
+ "loss": 0.8136,
437
+ "step": 70
438
+ },
439
+ {
440
+ "epoch": 1.21,
441
+ "learning_rate": 0.0001404430139595877,
442
+ "loss": 0.8083,
443
+ "step": 71
444
+ },
445
+ {
446
+ "epoch": 1.23,
447
+ "learning_rate": 0.00013870502972022173,
448
+ "loss": 0.7054,
449
+ "step": 72
450
+ },
451
+ {
452
+ "epoch": 1.24,
453
+ "learning_rate": 0.0001369531830940757,
454
+ "loss": 0.7301,
455
+ "step": 73
456
+ },
457
+ {
458
+ "epoch": 1.26,
459
+ "learning_rate": 0.0001351881015131833,
460
+ "loss": 0.6925,
461
+ "step": 74
462
+ },
463
+ {
464
+ "epoch": 1.28,
465
+ "learning_rate": 0.000133410417149739,
466
+ "loss": 0.7191,
467
+ "step": 75
468
+ },
469
+ {
470
+ "epoch": 1.29,
471
+ "learning_rate": 0.0001316207666896824,
472
+ "loss": 0.6814,
473
+ "step": 76
474
+ },
475
+ {
476
+ "epoch": 1.31,
477
+ "learning_rate": 0.00012981979110466654,
478
+ "loss": 0.6916,
479
+ "step": 77
480
+ },
481
+ {
482
+ "epoch": 1.33,
483
+ "learning_rate": 0.00012800813542249072,
484
+ "loss": 0.5781,
485
+ "step": 78
486
+ },
487
+ {
488
+ "epoch": 1.34,
489
+ "learning_rate": 0.0001261864484960807,
490
+ "loss": 0.8074,
491
+ "step": 79
492
+ },
493
+ {
494
+ "epoch": 1.36,
495
+ "learning_rate": 0.0001243553827710992,
496
+ "loss": 0.6563,
497
+ "step": 80
498
+ },
499
+ {
500
+ "epoch": 1.38,
501
+ "learning_rate": 0.00012251559405226941,
502
+ "loss": 0.6436,
503
+ "step": 81
504
+ },
505
+ {
506
+ "epoch": 1.4,
507
+ "learning_rate": 0.00012066774126849529,
508
+ "loss": 0.7692,
509
+ "step": 82
510
+ },
511
+ {
512
+ "epoch": 1.41,
513
+ "learning_rate": 0.00011881248623686338,
514
+ "loss": 0.6265,
515
+ "step": 83
516
+ },
517
+ {
518
+ "epoch": 1.43,
519
+ "learning_rate": 0.00011695049342560968,
520
+ "loss": 0.5772,
521
+ "step": 84
522
+ },
523
+ {
524
+ "epoch": 1.45,
525
+ "learning_rate": 0.00011508242971613741,
526
+ "loss": 0.6841,
527
+ "step": 85
528
+ },
529
+ {
530
+ "epoch": 1.46,
531
+ "learning_rate": 0.00011320896416417026,
532
+ "loss": 0.7439,
533
+ "step": 86
534
+ },
535
+ {
536
+ "epoch": 1.48,
537
+ "learning_rate": 0.000111330767760127,
538
+ "loss": 0.7303,
539
+ "step": 87
540
+ },
541
+ {
542
+ "epoch": 1.5,
543
+ "learning_rate": 0.00010944851318880314,
544
+ "loss": 0.7767,
545
+ "step": 88
546
+ },
547
+ {
548
+ "epoch": 1.51,
549
+ "learning_rate": 0.00010756287458844569,
550
+ "loss": 0.6361,
551
+ "step": 89
552
+ },
553
+ {
554
+ "epoch": 1.53,
555
+ "learning_rate": 0.00010567452730930743,
556
+ "loss": 0.6535,
557
+ "step": 90
558
+ },
559
+ {
560
+ "epoch": 1.55,
561
+ "learning_rate": 0.00010378414767176705,
562
+ "loss": 0.7178,
563
+ "step": 91
564
+ },
565
+ {
566
+ "epoch": 1.57,
567
+ "learning_rate": 0.0001018924127241019,
568
+ "loss": 0.6977,
569
+ "step": 92
570
+ },
571
+ {
572
+ "epoch": 1.58,
573
+ "learning_rate": 0.0001,
574
+ "loss": 0.704,
575
+ "step": 93
576
+ },
577
+ {
578
+ "epoch": 1.6,
579
+ "learning_rate": 9.810758727589813e-05,
580
+ "loss": 0.8041,
581
+ "step": 94
582
+ },
583
+ {
584
+ "epoch": 1.62,
585
+ "learning_rate": 9.621585232823298e-05,
586
+ "loss": 0.7026,
587
+ "step": 95
588
+ },
589
+ {
590
+ "epoch": 1.63,
591
+ "learning_rate": 9.432547269069261e-05,
592
+ "loss": 0.6892,
593
+ "step": 96
594
+ },
595
+ {
596
+ "epoch": 1.65,
597
+ "learning_rate": 9.243712541155436e-05,
598
+ "loss": 0.7073,
599
+ "step": 97
600
+ },
601
+ {
602
+ "epoch": 1.67,
603
+ "learning_rate": 9.055148681119688e-05,
604
+ "loss": 0.7765,
605
+ "step": 98
606
+ },
607
+ {
608
+ "epoch": 1.69,
609
+ "learning_rate": 8.866923223987302e-05,
610
+ "loss": 0.6896,
611
+ "step": 99
612
+ },
613
+ {
614
+ "epoch": 1.7,
615
+ "learning_rate": 8.679103583582979e-05,
616
+ "loss": 0.6527,
617
+ "step": 100
618
+ },
619
+ {
620
+ "epoch": 1.7,
621
+ "eval_loss": 0.8596932291984558,
622
+ "eval_runtime": 0.897,
623
+ "eval_samples_per_second": 22.297,
624
+ "eval_steps_per_second": 5.574,
625
+ "step": 100
626
+ },
627
+ {
628
+ "epoch": 1.72,
629
+ "learning_rate": 8.491757028386263e-05,
630
+ "loss": 0.6335,
631
+ "step": 101
632
+ },
633
+ {
634
+ "epoch": 1.74,
635
+ "learning_rate": 8.304950657439033e-05,
636
+ "loss": 0.6348,
637
+ "step": 102
638
+ },
639
+ {
640
+ "epoch": 1.75,
641
+ "learning_rate": 8.118751376313664e-05,
642
+ "loss": 0.6983,
643
+ "step": 103
644
+ },
645
+ {
646
+ "epoch": 1.77,
647
+ "learning_rate": 7.93322587315047e-05,
648
+ "loss": 0.7995,
649
+ "step": 104
650
+ },
651
+ {
652
+ "epoch": 1.79,
653
+ "learning_rate": 7.74844059477306e-05,
654
+ "loss": 0.654,
655
+ "step": 105
656
+ },
657
+ {
658
+ "epoch": 1.8,
659
+ "learning_rate": 7.564461722890081e-05,
660
+ "loss": 0.6507,
661
+ "step": 106
662
+ },
663
+ {
664
+ "epoch": 1.82,
665
+ "learning_rate": 7.381355150391933e-05,
666
+ "loss": 0.6623,
667
+ "step": 107
668
+ },
669
+ {
670
+ "epoch": 1.84,
671
+ "learning_rate": 7.19918645775093e-05,
672
+ "loss": 0.7539,
673
+ "step": 108
674
+ },
675
+ {
676
+ "epoch": 1.86,
677
+ "learning_rate": 7.018020889533348e-05,
678
+ "loss": 0.755,
679
+ "step": 109
680
+ },
681
+ {
682
+ "epoch": 1.87,
683
+ "learning_rate": 6.83792333103176e-05,
684
+ "loss": 0.6394,
685
+ "step": 110
686
+ },
687
+ {
688
+ "epoch": 1.89,
689
+ "learning_rate": 6.658958285026102e-05,
690
+ "loss": 0.7545,
691
+ "step": 111
692
+ },
693
+ {
694
+ "epoch": 1.91,
695
+ "learning_rate": 6.48118984868167e-05,
696
+ "loss": 0.7215,
697
+ "step": 112
698
+ },
699
+ {
700
+ "epoch": 1.92,
701
+ "learning_rate": 6.304681690592431e-05,
702
+ "loss": 0.581,
703
+ "step": 113
704
+ },
705
+ {
706
+ "epoch": 1.94,
707
+ "learning_rate": 6.129497027977829e-05,
708
+ "loss": 0.673,
709
+ "step": 114
710
+ },
711
+ {
712
+ "epoch": 1.96,
713
+ "learning_rate": 5.955698604041231e-05,
714
+ "loss": 0.6878,
715
+ "step": 115
716
+ },
717
+ {
718
+ "epoch": 1.97,
719
+ "learning_rate": 5.7833486654981606e-05,
720
+ "loss": 0.738,
721
+ "step": 116
722
+ },
723
+ {
724
+ "epoch": 1.99,
725
+ "learning_rate": 5.6125089402823485e-05,
726
+ "loss": 0.812,
727
+ "step": 117
728
+ },
729
+ {
730
+ "epoch": 2.01,
731
+ "learning_rate": 5.443240615437586e-05,
732
+ "loss": 0.5997,
733
+ "step": 118
734
+ },
735
+ {
736
+ "epoch": 2.03,
737
+ "learning_rate": 5.275604315203293e-05,
738
+ "loss": 0.6187,
739
+ "step": 119
740
+ },
741
+ {
742
+ "epoch": 2.04,
743
+ "learning_rate": 5.109660079301668e-05,
744
+ "loss": 0.7232,
745
+ "step": 120
746
+ },
747
+ {
748
+ "epoch": 2.06,
749
+ "learning_rate": 4.945467341434195e-05,
750
+ "loss": 0.7089,
751
+ "step": 121
752
+ },
753
+ {
754
+ "epoch": 2.08,
755
+ "learning_rate": 4.783084907995156e-05,
756
+ "loss": 0.6914,
757
+ "step": 122
758
+ },
759
+ {
760
+ "epoch": 2.09,
761
+ "learning_rate": 4.622570937009879e-05,
762
+ "loss": 0.6114,
763
+ "step": 123
764
+ },
765
+ {
766
+ "epoch": 2.11,
767
+ "learning_rate": 4.4639829173051554e-05,
768
+ "loss": 0.5577,
769
+ "step": 124
770
+ },
771
+ {
772
+ "epoch": 2.13,
773
+ "learning_rate": 4.307377647919343e-05,
774
+ "loss": 0.6421,
775
+ "step": 125
776
+ },
777
+ {
778
+ "epoch": 2.14,
779
+ "learning_rate": 4.152811217759529e-05,
780
+ "loss": 0.6418,
781
+ "step": 126
782
+ },
783
+ {
784
+ "epoch": 2.16,
785
+ "learning_rate": 4.000338985513046e-05,
786
+ "loss": 0.6358,
787
+ "step": 127
788
+ },
789
+ {
790
+ "epoch": 2.18,
791
+ "learning_rate": 3.8500155598204644e-05,
792
+ "loss": 0.6426,
793
+ "step": 128
794
+ },
795
+ {
796
+ "epoch": 2.2,
797
+ "learning_rate": 3.701894779717286e-05,
798
+ "loss": 0.5322,
799
+ "step": 129
800
+ },
801
+ {
802
+ "epoch": 2.21,
803
+ "learning_rate": 3.5560296953512295e-05,
804
+ "loss": 0.635,
805
+ "step": 130
806
+ },
807
+ {
808
+ "epoch": 2.23,
809
+ "learning_rate": 3.4124725489820645e-05,
810
+ "loss": 0.5777,
811
+ "step": 131
812
+ },
813
+ {
814
+ "epoch": 2.25,
815
+ "learning_rate": 3.2712747562708115e-05,
816
+ "loss": 0.7093,
817
+ "step": 132
818
+ },
819
+ {
820
+ "epoch": 2.26,
821
+ "learning_rate": 3.132486887864992e-05,
822
+ "loss": 0.5785,
823
+ "step": 133
824
+ },
825
+ {
826
+ "epoch": 2.28,
827
+ "learning_rate": 2.9961586512864947e-05,
828
+ "loss": 0.5838,
829
+ "step": 134
830
+ },
831
+ {
832
+ "epoch": 2.3,
833
+ "learning_rate": 2.8623388731286093e-05,
834
+ "loss": 0.5222,
835
+ "step": 135
836
+ },
837
+ {
838
+ "epoch": 2.31,
839
+ "learning_rate": 2.7310754815685624e-05,
840
+ "loss": 0.6518,
841
+ "step": 136
842
+ },
843
+ {
844
+ "epoch": 2.33,
845
+ "learning_rate": 2.6024154892017937e-05,
846
+ "loss": 0.5586,
847
+ "step": 137
848
+ },
849
+ {
850
+ "epoch": 2.35,
851
+ "learning_rate": 2.4764049762041874e-05,
852
+ "loss": 0.5685,
853
+ "step": 138
854
+ },
855
+ {
856
+ "epoch": 2.37,
857
+ "learning_rate": 2.353089073828255e-05,
858
+ "loss": 0.5602,
859
+ "step": 139
860
+ },
861
+ {
862
+ "epoch": 2.38,
863
+ "learning_rate": 2.2325119482391467e-05,
864
+ "loss": 0.6436,
865
+ "step": 140
866
+ },
867
+ {
868
+ "epoch": 2.4,
869
+ "learning_rate": 2.1147167846963422e-05,
870
+ "loss": 0.6939,
871
+ "step": 141
872
+ },
873
+ {
874
+ "epoch": 2.42,
875
+ "learning_rate": 1.999745772086655e-05,
876
+ "loss": 0.7147,
877
+ "step": 142
878
+ },
879
+ {
880
+ "epoch": 2.43,
881
+ "learning_rate": 1.8876400878140775e-05,
882
+ "loss": 0.6701,
883
+ "step": 143
884
+ },
885
+ {
886
+ "epoch": 2.45,
887
+ "learning_rate": 1.7784398830519e-05,
888
+ "loss": 0.691,
889
+ "step": 144
890
+ },
891
+ {
892
+ "epoch": 2.47,
893
+ "learning_rate": 1.672184268362391e-05,
894
+ "loss": 0.6358,
895
+ "step": 145
896
+ },
897
+ {
898
+ "epoch": 2.49,
899
+ "learning_rate": 1.5689112996891576e-05,
900
+ "loss": 0.5967,
901
+ "step": 146
902
+ },
903
+ {
904
+ "epoch": 2.5,
905
+ "learning_rate": 1.4686579647272336e-05,
906
+ "loss": 0.6047,
907
+ "step": 147
908
+ },
909
+ {
910
+ "epoch": 2.52,
911
+ "learning_rate": 1.3714601696757712e-05,
912
+ "loss": 0.6241,
913
+ "step": 148
914
+ },
915
+ {
916
+ "epoch": 2.54,
917
+ "learning_rate": 1.2773527263780626e-05,
918
+ "loss": 0.6553,
919
+ "step": 149
920
+ },
921
+ {
922
+ "epoch": 2.55,
923
+ "learning_rate": 1.1863693398535114e-05,
924
+ "loss": 0.5639,
925
+ "step": 150
926
+ },
927
+ {
928
+ "epoch": 2.55,
929
+ "eval_loss": 0.8483209609985352,
930
+ "eval_runtime": 0.9949,
931
+ "eval_samples_per_second": 20.102,
932
+ "eval_steps_per_second": 5.025,
933
+ "step": 150
934
+ },
935
+ {
936
+ "epoch": 2.57,
937
+ "learning_rate": 1.0985425962260343e-05,
938
+ "loss": 0.57,
939
+ "step": 151
940
+ },
941
+ {
942
+ "epoch": 2.59,
943
+ "learning_rate": 1.01390395105318e-05,
944
+ "loss": 0.5924,
945
+ "step": 152
946
+ },
947
+ {
948
+ "epoch": 2.6,
949
+ "learning_rate": 9.324837180601741e-06,
950
+ "loss": 0.6156,
951
+ "step": 153
952
+ },
953
+ {
954
+ "epoch": 2.62,
955
+ "learning_rate": 8.543110582829272e-06,
956
+ "loss": 0.5449,
957
+ "step": 154
958
+ },
959
+ {
960
+ "epoch": 2.64,
961
+ "learning_rate": 7.794139696238645e-06,
962
+ "loss": 0.6187,
963
+ "step": 155
964
+ },
965
+ {
966
+ "epoch": 2.66,
967
+ "learning_rate": 7.078192768243486e-06,
968
+ "loss": 0.5636,
969
+ "step": 156
970
+ },
971
+ {
972
+ "epoch": 2.67,
973
+ "learning_rate": 6.395526218572723e-06,
974
+ "loss": 0.6078,
975
+ "step": 157
976
+ },
977
+ {
978
+ "epoch": 2.69,
979
+ "learning_rate": 5.746384547432737e-06,
980
+ "loss": 0.5925,
981
+ "step": 158
982
+ },
983
+ {
984
+ "epoch": 2.71,
985
+ "learning_rate": 5.131000247938367e-06,
986
+ "loss": 0.5647,
987
+ "step": 159
988
+ },
989
+ {
990
+ "epoch": 2.72,
991
+ "learning_rate": 4.549593722844492e-06,
992
+ "loss": 0.5503,
993
+ "step": 160
994
+ },
995
+ {
996
+ "epoch": 2.74,
997
+ "learning_rate": 4.002373205607723e-06,
998
+ "loss": 0.605,
999
+ "step": 161
1000
+ },
1001
+ {
1002
+ "epoch": 2.76,
1003
+ "learning_rate": 3.4895346858066724e-06,
1004
+ "loss": 0.6888,
1005
+ "step": 162
1006
+ },
1007
+ {
1008
+ "epoch": 2.77,
1009
+ "learning_rate": 3.011261838947277e-06,
1010
+ "loss": 0.668,
1011
+ "step": 163
1012
+ },
1013
+ {
1014
+ "epoch": 2.79,
1015
+ "learning_rate": 2.5677259606786684e-06,
1016
+ "loss": 0.5715,
1017
+ "step": 164
1018
+ },
1019
+ {
1020
+ "epoch": 2.81,
1021
+ "learning_rate": 2.159085905442737e-06,
1022
+ "loss": 0.6772,
1023
+ "step": 165
1024
+ },
1025
+ {
1026
+ "epoch": 2.83,
1027
+ "learning_rate": 1.7854880295797405e-06,
1028
+ "loss": 0.5826,
1029
+ "step": 166
1030
+ },
1031
+ {
1032
+ "epoch": 2.84,
1033
+ "learning_rate": 1.4470661389100804e-06,
1034
+ "loss": 0.7116,
1035
+ "step": 167
1036
+ },
1037
+ {
1038
+ "epoch": 2.86,
1039
+ "learning_rate": 1.143941440811147e-06,
1040
+ "loss": 0.512,
1041
+ "step": 168
1042
+ },
1043
+ {
1044
+ "epoch": 2.88,
1045
+ "learning_rate": 8.762225008062674e-07,
1046
+ "loss": 0.6045,
1047
+ "step": 169
1048
+ },
1049
+ {
1050
+ "epoch": 2.89,
1051
+ "learning_rate": 6.440052036815081e-07,
1052
+ "loss": 0.5852,
1053
+ "step": 170
1054
+ },
1055
+ {
1056
+ "epoch": 2.91,
1057
+ "learning_rate": 4.4737271914411236e-07,
1058
+ "loss": 0.5565,
1059
+ "step": 171
1060
+ },
1061
+ {
1062
+ "epoch": 2.93,
1063
+ "learning_rate": 2.86395472034795e-07,
1064
+ "loss": 0.7202,
1065
+ "step": 172
1066
+ },
1067
+ {
1068
+ "epoch": 2.94,
1069
+ "learning_rate": 1.611311171048735e-07,
1070
+ "loss": 0.6077,
1071
+ "step": 173
1072
+ },
1073
+ {
1074
+ "epoch": 2.96,
1075
+ "learning_rate": 7.162451836685291e-08,
1076
+ "loss": 0.5935,
1077
+ "step": 174
1078
+ }
1079
+ ],
1080
+ "logging_steps": 1,
1081
+ "max_steps": 174,
1082
+ "num_train_epochs": 3,
1083
+ "save_steps": 500,
1084
+ "total_flos": 7493194676305920.0,
1085
+ "trial_name": null,
1086
+ "trial_params": null
1087
+ }
checkpoint-174/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:559e05578fbf1f6e78fc368cebf479663b2065af16b04f27bd99e7e2b2e8f04f
3
+ size 4155
checkpoint-58/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0.dev0
checkpoint-58/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "openlm-research/open_llama_3b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-58/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08d8d95a2be0f5363874736804a98386ca5f228eb66423900564477cac9f0882
3
+ size 50982397
checkpoint-58/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0.dev0
checkpoint-58/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "openlm-research/open_llama_3b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-58/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08d8d95a2be0f5363874736804a98386ca5f228eb66423900564477cac9f0882
3
+ size 50982397
checkpoint-58/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:691d6dcb5ae651c580cda18c098a72e195894258965523266c8ca073247ea722
3
+ size 25871055
checkpoint-58/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1784c9e20ffdc46b706882695c2108245d7626a328b6d70a37d079ad1fbbc989
3
+ size 14575
checkpoint-58/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f691665837dc2e76de0eb8d9e5e7aedb01a74e8a8842c8acb9b4a4a0ae6a4f
3
+ size 627
checkpoint-58/trainer_state.json ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9872340425531915,
5
+ "eval_steps": 50,
6
+ "global_step": 58,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 2e-05,
14
+ "loss": 1.6896,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.03,
19
+ "learning_rate": 4e-05,
20
+ "loss": 1.632,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.05,
25
+ "learning_rate": 6e-05,
26
+ "loss": 1.7197,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.07,
31
+ "learning_rate": 8e-05,
32
+ "loss": 1.6988,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.09,
37
+ "learning_rate": 0.0001,
38
+ "loss": 1.4945,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.1,
43
+ "learning_rate": 0.00012,
44
+ "loss": 1.4763,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.12,
49
+ "learning_rate": 0.00014,
50
+ "loss": 1.4645,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.14,
55
+ "learning_rate": 0.00016,
56
+ "loss": 1.1569,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.15,
61
+ "learning_rate": 0.00018,
62
+ "loss": 1.4501,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.17,
67
+ "learning_rate": 0.0002,
68
+ "loss": 1.1972,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.19,
73
+ "learning_rate": 0.00019998209226697376,
74
+ "loss": 1.1479,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.2,
79
+ "learning_rate": 0.00019992837548163316,
80
+ "loss": 1.1791,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.22,
85
+ "learning_rate": 0.00019983886888289514,
86
+ "loss": 1.1471,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.24,
91
+ "learning_rate": 0.00019971360452796522,
92
+ "loss": 0.9662,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.26,
97
+ "learning_rate": 0.0001995526272808559,
98
+ "loss": 1.2166,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.27,
103
+ "learning_rate": 0.0001993559947963185,
104
+ "loss": 1.1023,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.29,
109
+ "learning_rate": 0.00019912377749919374,
110
+ "loss": 0.9719,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.31,
115
+ "learning_rate": 0.00019885605855918885,
116
+ "loss": 0.9122,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.32,
121
+ "learning_rate": 0.00019855293386108992,
122
+ "loss": 0.8808,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.34,
127
+ "learning_rate": 0.00019821451197042026,
128
+ "loss": 1.0081,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.36,
133
+ "learning_rate": 0.00019784091409455728,
134
+ "loss": 1.0147,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.37,
139
+ "learning_rate": 0.00019743227403932134,
140
+ "loss": 0.8792,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.39,
145
+ "learning_rate": 0.00019698873816105273,
146
+ "loss": 0.8762,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.41,
151
+ "learning_rate": 0.00019651046531419332,
152
+ "loss": 0.7916,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.43,
157
+ "learning_rate": 0.0001959976267943923,
158
+ "loss": 0.8967,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.44,
163
+ "learning_rate": 0.0001954504062771555,
164
+ "loss": 1.0575,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.46,
169
+ "learning_rate": 0.00019486899975206166,
170
+ "loss": 0.9374,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.48,
175
+ "learning_rate": 0.00019425361545256727,
176
+ "loss": 0.8697,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.49,
181
+ "learning_rate": 0.00019360447378142728,
182
+ "loss": 0.788,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.51,
187
+ "learning_rate": 0.00019292180723175654,
188
+ "loss": 0.8215,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.53,
193
+ "learning_rate": 0.00019220586030376134,
194
+ "loss": 0.8943,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.54,
199
+ "learning_rate": 0.00019145688941717075,
200
+ "loss": 0.7825,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.56,
205
+ "learning_rate": 0.00019067516281939825,
206
+ "loss": 0.9878,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.58,
211
+ "learning_rate": 0.00018986096048946824,
212
+ "loss": 0.8517,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.6,
217
+ "learning_rate": 0.00018901457403773967,
218
+ "loss": 0.8132,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.61,
223
+ "learning_rate": 0.00018813630660146488,
224
+ "loss": 0.9143,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.63,
229
+ "learning_rate": 0.0001872264727362194,
230
+ "loss": 0.8659,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.65,
235
+ "learning_rate": 0.00018628539830324229,
236
+ "loss": 0.7977,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.66,
241
+ "learning_rate": 0.00018531342035272766,
242
+ "loss": 0.8188,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.68,
247
+ "learning_rate": 0.00018431088700310844,
248
+ "loss": 0.871,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.7,
253
+ "learning_rate": 0.00018327815731637612,
254
+ "loss": 0.6939,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.71,
259
+ "learning_rate": 0.00018221560116948103,
260
+ "loss": 0.8147,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.73,
265
+ "learning_rate": 0.00018112359912185924,
266
+ "loss": 0.8898,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 0.75,
271
+ "learning_rate": 0.00018000254227913348,
272
+ "loss": 0.8074,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 0.77,
277
+ "learning_rate": 0.0001788528321530366,
278
+ "loss": 0.8351,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 0.78,
283
+ "learning_rate": 0.00017767488051760857,
284
+ "loss": 0.8811,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 0.8,
289
+ "learning_rate": 0.00017646910926171747,
290
+ "loss": 0.792,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 0.82,
295
+ "learning_rate": 0.00017523595023795813,
296
+ "loss": 0.8558,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 0.83,
301
+ "learning_rate": 0.0001739758451079821,
302
+ "loss": 0.8308,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 0.85,
307
+ "learning_rate": 0.00017268924518431438,
308
+ "loss": 0.8504,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 0.85,
313
+ "eval_loss": 0.8740054368972778,
314
+ "eval_runtime": 0.8959,
315
+ "eval_samples_per_second": 22.324,
316
+ "eval_steps_per_second": 5.581,
317
+ "step": 50
318
+ },
319
+ {
320
+ "epoch": 0.87,
321
+ "learning_rate": 0.0001713766112687139,
322
+ "loss": 0.83,
323
+ "step": 51
324
+ },
325
+ {
326
+ "epoch": 0.89,
327
+ "learning_rate": 0.0001700384134871351,
328
+ "loss": 0.7499,
329
+ "step": 52
330
+ },
331
+ {
332
+ "epoch": 0.9,
333
+ "learning_rate": 0.00016867513112135013,
334
+ "loss": 0.8685,
335
+ "step": 53
336
+ },
337
+ {
338
+ "epoch": 0.92,
339
+ "learning_rate": 0.0001672872524372919,
340
+ "loss": 0.807,
341
+ "step": 54
342
+ },
343
+ {
344
+ "epoch": 0.94,
345
+ "learning_rate": 0.00016587527451017938,
346
+ "loss": 0.8627,
347
+ "step": 55
348
+ },
349
+ {
350
+ "epoch": 0.95,
351
+ "learning_rate": 0.0001644397030464877,
352
+ "loss": 0.7606,
353
+ "step": 56
354
+ },
355
+ {
356
+ "epoch": 0.97,
357
+ "learning_rate": 0.00016298105220282713,
358
+ "loss": 0.8284,
359
+ "step": 57
360
+ },
361
+ {
362
+ "epoch": 0.99,
363
+ "learning_rate": 0.00016149984440179537,
364
+ "loss": 0.8585,
365
+ "step": 58
366
+ }
367
+ ],
368
+ "logging_steps": 1,
369
+ "max_steps": 174,
370
+ "num_train_epochs": 3,
371
+ "save_steps": 500,
372
+ "total_flos": 2526774948986880.0,
373
+ "trial_name": null,
374
+ "trial_params": null
375
+ }
checkpoint-58/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:559e05578fbf1f6e78fc368cebf479663b2065af16b04f27bd99e7e2b2e8f04f
3
+ size 4155
merged/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openlm-research/open_llama_3b",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 3200,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 8640,
12
+ "max_position_embeddings": 2048,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 26,
16
+ "num_key_value_heads": 32,
17
+ "pad_token_id": 0,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.32.0.dev0",
24
+ "use_cache": false,
25
+ "vocab_size": 32000
26
+ }
merged/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.32.0.dev0"
7
+ }
merged/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e56879fbda600867b4539a13e2ec8d555c248048096af0ccda294049d65e12c9
3
+ size 6853024617
merged/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "<unk>"
6
+ }
merged/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab1b681ec7fc02fed5edd3026687d7a692a918c4dd8e150ca2e3994a6229843b
3
+ size 534194
merged/tokenizer_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": null,
22
+ "model_max_length": 2048,
23
+ "pad_token": null,
24
+ "sp_model_kwargs": {},
25
+ "tokenizer_class": "LlamaTokenizer",
26
+ "trust_remote_code": false,
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "use_fast": true
36
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "<unk>"
6
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab1b681ec7fc02fed5edd3026687d7a692a918c4dd8e150ca2e3994a6229843b
3
+ size 534194
tokenizer_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": null,
22
+ "model_max_length": 2048,
23
+ "pad_token": null,
24
+ "sp_model_kwargs": {},
25
+ "tokenizer_class": "LlamaTokenizer",
26
+ "trust_remote_code": false,
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "use_fast": true
36
+ }