Incomple commited on
Commit
a493281
·
verified ·
1 Parent(s): c0116b5

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,10 @@ library_name: peft
3
  license: llama3.1
4
  base_model: meta-llama/Llama-3.1-8B-Instruct
5
  tags:
 
 
6
  - trl
7
  - dpo
8
- - llama-factory
9
  - generated_from_trainer
10
  model-index:
11
  - name: Llama-3.1-8B-Instruct_dpo_sg_values
@@ -17,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # Llama-3.1-8B-Instruct_dpo_sg_values
19
 
20
- This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on an unknown dataset.
21
 
22
  ## Model description
23
 
 
3
  license: llama3.1
4
  base_model: meta-llama/Llama-3.1-8B-Instruct
5
  tags:
6
+ - llama-factory
7
+ - lora
8
  - trl
9
  - dpo
 
10
  - generated_from_trainer
11
  model-index:
12
  - name: Llama-3.1-8B-Instruct_dpo_sg_values
 
18
 
19
  # Llama-3.1-8B-Instruct_dpo_sg_values
20
 
21
+ This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on the dpo_sg_values dataset.
22
 
23
  ## Model description
24
 
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9996261682242991,
3
+ "total_flos": 1.8353312732676096e+17,
4
+ "train_loss": 0.2906364963049307,
5
+ "train_runtime": 4209.7283,
6
+ "train_samples_per_second": 2.542,
7
+ "train_steps_per_second": 0.318
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9996261682242991,
3
+ "total_flos": 1.8353312732676096e+17,
4
+ "train_loss": 0.2906364963049307,
5
+ "train_runtime": 4209.7283,
6
+ "train_samples_per_second": 2.542,
7
+ "train_steps_per_second": 0.318
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9996261682242991,
5
+ "eval_steps": 134,
6
+ "global_step": 1337,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05009345794392523,
13
+ "grad_norm": 1.3687894344329834,
14
+ "learning_rate": 5e-07,
15
+ "logits/chosen": -0.6576648950576782,
16
+ "logits/rejected": -0.6672008633613586,
17
+ "logps/chosen": -2.227020502090454,
18
+ "logps/rejected": -5.950113296508789,
19
+ "loss": 0.6927,
20
+ "rewards/accuracies": 0.5,
21
+ "rewards/chosen": 0.0005052318447269499,
22
+ "rewards/margins": 0.0008711821283213794,
23
+ "rewards/rejected": -0.0003659502835944295,
24
+ "step": 67
25
+ },
26
+ {
27
+ "epoch": 0.10018691588785046,
28
+ "grad_norm": 2.2226035594940186,
29
+ "learning_rate": 1e-06,
30
+ "logits/chosen": -0.643012285232544,
31
+ "logits/rejected": -0.6527656316757202,
32
+ "logps/chosen": -2.1846628189086914,
33
+ "logps/rejected": -6.394010066986084,
34
+ "loss": 0.6883,
35
+ "rewards/accuracies": 0.7276118993759155,
36
+ "rewards/chosen": -0.0008213059627451003,
37
+ "rewards/margins": 0.00972337368875742,
38
+ "rewards/rejected": -0.01054468099027872,
39
+ "step": 134
40
+ },
41
+ {
42
+ "epoch": 0.1502803738317757,
43
+ "grad_norm": 2.133711576461792,
44
+ "learning_rate": 9.443059019118869e-07,
45
+ "logits/chosen": -0.6600874662399292,
46
+ "logits/rejected": -0.6701396703720093,
47
+ "logps/chosen": -2.2199361324310303,
48
+ "logps/rejected": -7.209847450256348,
49
+ "loss": 0.6524,
50
+ "rewards/accuracies": 0.8563432693481445,
51
+ "rewards/chosen": -0.0032122363336384296,
52
+ "rewards/margins": 0.08549854904413223,
53
+ "rewards/rejected": -0.08871078491210938,
54
+ "step": 201
55
+ },
56
+ {
57
+ "epoch": 0.20037383177570092,
58
+ "grad_norm": 3.0340538024902344,
59
+ "learning_rate": 8.886118038237738e-07,
60
+ "logits/chosen": -0.6653879284858704,
61
+ "logits/rejected": -0.677828848361969,
62
+ "logps/chosen": -2.533212184906006,
63
+ "logps/rejected": -10.03713321685791,
64
+ "loss": 0.5533,
65
+ "rewards/accuracies": 0.8488805890083313,
66
+ "rewards/chosen": -0.02541249990463257,
67
+ "rewards/margins": 0.32844945788383484,
68
+ "rewards/rejected": -0.3538619577884674,
69
+ "step": 268
70
+ },
71
+ {
72
+ "epoch": 0.2504672897196262,
73
+ "grad_norm": 2.396055221557617,
74
+ "learning_rate": 8.329177057356608e-07,
75
+ "logits/chosen": -0.6770755052566528,
76
+ "logits/rejected": -0.6937317848205566,
77
+ "logps/chosen": -3.257599353790283,
78
+ "logps/rejected": -14.972702980041504,
79
+ "loss": 0.4397,
80
+ "rewards/accuracies": 0.8526118993759155,
81
+ "rewards/chosen": -0.10955464839935303,
82
+ "rewards/margins": 0.7154061198234558,
83
+ "rewards/rejected": -0.8249607682228088,
84
+ "step": 335
85
+ },
86
+ {
87
+ "epoch": 0.3005607476635514,
88
+ "grad_norm": 2.1619019508361816,
89
+ "learning_rate": 7.772236076475478e-07,
90
+ "logits/chosen": -0.7039727568626404,
91
+ "logits/rejected": -0.7222999334335327,
92
+ "logps/chosen": -3.5498507022857666,
93
+ "logps/rejected": -20.35832405090332,
94
+ "loss": 0.3224,
95
+ "rewards/accuracies": 0.9029850363731384,
96
+ "rewards/chosen": -0.15284313261508942,
97
+ "rewards/margins": 1.2502268552780151,
98
+ "rewards/rejected": -1.4030699729919434,
99
+ "step": 402
100
+ },
101
+ {
102
+ "epoch": 0.3506542056074766,
103
+ "grad_norm": 4.72946834564209,
104
+ "learning_rate": 7.215295095594347e-07,
105
+ "logits/chosen": -0.7181271910667419,
106
+ "logits/rejected": -0.7374780774116516,
107
+ "logps/chosen": -5.401639461517334,
108
+ "logps/rejected": -25.78311538696289,
109
+ "loss": 0.2782,
110
+ "rewards/accuracies": 0.9048507213592529,
111
+ "rewards/chosen": -0.3204249441623688,
112
+ "rewards/margins": 1.6534833908081055,
113
+ "rewards/rejected": -1.9739083051681519,
114
+ "step": 469
115
+ },
116
+ {
117
+ "epoch": 0.40074766355140184,
118
+ "grad_norm": 0.9766824245452881,
119
+ "learning_rate": 6.658354114713217e-07,
120
+ "logits/chosen": -0.756517231464386,
121
+ "logits/rejected": -0.7776155471801758,
122
+ "logps/chosen": -5.15533971786499,
123
+ "logps/rejected": -30.16547203063965,
124
+ "loss": 0.2283,
125
+ "rewards/accuracies": 0.9123134016990662,
126
+ "rewards/chosen": -0.3061079978942871,
127
+ "rewards/margins": 2.072856903076172,
128
+ "rewards/rejected": -2.37896466255188,
129
+ "step": 536
130
+ },
131
+ {
132
+ "epoch": 0.4508411214953271,
133
+ "grad_norm": 4.220199108123779,
134
+ "learning_rate": 6.101413133832086e-07,
135
+ "logits/chosen": -0.7682312726974487,
136
+ "logits/rejected": -0.7901257872581482,
137
+ "logps/chosen": -6.049707412719727,
138
+ "logps/rejected": -31.82540512084961,
139
+ "loss": 0.2276,
140
+ "rewards/accuracies": 0.9048507213592529,
141
+ "rewards/chosen": -0.37660378217697144,
142
+ "rewards/margins": 2.2089908123016357,
143
+ "rewards/rejected": -2.585594415664673,
144
+ "step": 603
145
+ },
146
+ {
147
+ "epoch": 0.5009345794392523,
148
+ "grad_norm": 6.615551471710205,
149
+ "learning_rate": 5.544472152950955e-07,
150
+ "logits/chosen": -0.7774823904037476,
151
+ "logits/rejected": -0.8002229332923889,
152
+ "logps/chosen": -6.285848617553711,
153
+ "logps/rejected": -35.35755157470703,
154
+ "loss": 0.1959,
155
+ "rewards/accuracies": 0.9197760820388794,
156
+ "rewards/chosen": -0.40754997730255127,
157
+ "rewards/margins": 2.4701759815216064,
158
+ "rewards/rejected": -2.8777260780334473,
159
+ "step": 670
160
+ },
161
+ {
162
+ "epoch": 0.5510280373831775,
163
+ "grad_norm": 3.6465256214141846,
164
+ "learning_rate": 4.987531172069825e-07,
165
+ "logits/chosen": -0.7746462821960449,
166
+ "logits/rejected": -0.7996317148208618,
167
+ "logps/chosen": -7.126999378204346,
168
+ "logps/rejected": -37.35933303833008,
169
+ "loss": 0.1813,
170
+ "rewards/accuracies": 0.9309701323509216,
171
+ "rewards/chosen": -0.4826628267765045,
172
+ "rewards/margins": 2.6208343505859375,
173
+ "rewards/rejected": -3.10349702835083,
174
+ "step": 737
175
+ },
176
+ {
177
+ "epoch": 0.6011214953271028,
178
+ "grad_norm": 9.422818183898926,
179
+ "learning_rate": 4.4305901911886947e-07,
180
+ "logits/chosen": -0.7760910391807556,
181
+ "logits/rejected": -0.802481472492218,
182
+ "logps/chosen": -6.253903388977051,
183
+ "logps/rejected": -37.6789436340332,
184
+ "loss": 0.1769,
185
+ "rewards/accuracies": 0.9402984976768494,
186
+ "rewards/chosen": -0.3954426348209381,
187
+ "rewards/margins": 2.7266414165496826,
188
+ "rewards/rejected": -3.122084140777588,
189
+ "step": 804
190
+ },
191
+ {
192
+ "epoch": 0.6512149532710281,
193
+ "grad_norm": 6.800887107849121,
194
+ "learning_rate": 3.873649210307564e-07,
195
+ "logits/chosen": -0.7794383764266968,
196
+ "logits/rejected": -0.8046081066131592,
197
+ "logps/chosen": -6.785355567932129,
198
+ "logps/rejected": -38.823604583740234,
199
+ "loss": 0.1737,
200
+ "rewards/accuracies": 0.9291044473648071,
201
+ "rewards/chosen": -0.43714043498039246,
202
+ "rewards/margins": 2.833800792694092,
203
+ "rewards/rejected": -3.2709412574768066,
204
+ "step": 871
205
+ },
206
+ {
207
+ "epoch": 0.7013084112149532,
208
+ "grad_norm": 4.8703718185424805,
209
+ "learning_rate": 3.3167082294264335e-07,
210
+ "logits/chosen": -0.7899920344352722,
211
+ "logits/rejected": -0.8165501952171326,
212
+ "logps/chosen": -7.20438814163208,
213
+ "logps/rejected": -39.9392204284668,
214
+ "loss": 0.1891,
215
+ "rewards/accuracies": 0.9272387623786926,
216
+ "rewards/chosen": -0.47980982065200806,
217
+ "rewards/margins": 2.8853275775909424,
218
+ "rewards/rejected": -3.3651373386383057,
219
+ "step": 938
220
+ },
221
+ {
222
+ "epoch": 0.7514018691588785,
223
+ "grad_norm": 0.2955164611339569,
224
+ "learning_rate": 2.7597672485453034e-07,
225
+ "logits/chosen": -0.7796534895896912,
226
+ "logits/rejected": -0.8110276460647583,
227
+ "logps/chosen": -4.945315361022949,
228
+ "logps/rejected": -41.536590576171875,
229
+ "loss": 0.1215,
230
+ "rewards/accuracies": 0.9570895433425903,
231
+ "rewards/chosen": -0.2934176027774811,
232
+ "rewards/margins": 3.1789510250091553,
233
+ "rewards/rejected": -3.4723687171936035,
234
+ "step": 1005
235
+ },
236
+ {
237
+ "epoch": 0.8014953271028037,
238
+ "grad_norm": 6.8209075927734375,
239
+ "learning_rate": 2.2028262676641728e-07,
240
+ "logits/chosen": -0.7921628952026367,
241
+ "logits/rejected": -0.8196142315864563,
242
+ "logps/chosen": -6.230679512023926,
243
+ "logps/rejected": -41.968841552734375,
244
+ "loss": 0.1502,
245
+ "rewards/accuracies": 0.9477611780166626,
246
+ "rewards/chosen": -0.4138028919696808,
247
+ "rewards/margins": 3.128687858581543,
248
+ "rewards/rejected": -3.5424907207489014,
249
+ "step": 1072
250
+ },
251
+ {
252
+ "epoch": 0.851588785046729,
253
+ "grad_norm": 6.143770217895508,
254
+ "learning_rate": 1.6458852867830422e-07,
255
+ "logits/chosen": -0.7991640567779541,
256
+ "logits/rejected": -0.8256679773330688,
257
+ "logps/chosen": -6.564340114593506,
258
+ "logps/rejected": -42.35133361816406,
259
+ "loss": 0.1459,
260
+ "rewards/accuracies": 0.9402984976768494,
261
+ "rewards/chosen": -0.44414621591567993,
262
+ "rewards/margins": 3.182438611984253,
263
+ "rewards/rejected": -3.626584529876709,
264
+ "step": 1139
265
+ },
266
+ {
267
+ "epoch": 0.9016822429906542,
268
+ "grad_norm": 5.307474613189697,
269
+ "learning_rate": 1.0889443059019118e-07,
270
+ "logits/chosen": -0.7985506057739258,
271
+ "logits/rejected": -0.8279980421066284,
272
+ "logps/chosen": -5.217541217803955,
273
+ "logps/rejected": -43.41652297973633,
274
+ "loss": 0.1275,
275
+ "rewards/accuracies": 0.9496268630027771,
276
+ "rewards/chosen": -0.3293641209602356,
277
+ "rewards/margins": 3.362889289855957,
278
+ "rewards/rejected": -3.692253351211548,
279
+ "step": 1206
280
+ },
281
+ {
282
+ "epoch": 0.9517757009345794,
283
+ "grad_norm": 5.2688984870910645,
284
+ "learning_rate": 5.320033250207814e-08,
285
+ "logits/chosen": -0.7938746809959412,
286
+ "logits/rejected": -0.8232108354568481,
287
+ "logps/chosen": -5.571805477142334,
288
+ "logps/rejected": -43.23577880859375,
289
+ "loss": 0.1313,
290
+ "rewards/accuracies": 0.9589552283287048,
291
+ "rewards/chosen": -0.33297136425971985,
292
+ "rewards/margins": 3.3113536834716797,
293
+ "rewards/rejected": -3.644325017929077,
294
+ "step": 1273
295
+ },
296
+ {
297
+ "epoch": 0.9996261682242991,
298
+ "step": 1337,
299
+ "total_flos": 1.8353312732676096e+17,
300
+ "train_loss": 0.2906364963049307,
301
+ "train_runtime": 4209.7283,
302
+ "train_samples_per_second": 2.542,
303
+ "train_steps_per_second": 0.318
304
+ }
305
+ ],
306
+ "logging_steps": 67,
307
+ "max_steps": 1337,
308
+ "num_input_tokens_seen": 0,
309
+ "num_train_epochs": 1,
310
+ "save_steps": 500,
311
+ "stateful_callbacks": {
312
+ "TrainerControl": {
313
+ "args": {
314
+ "should_epoch_stop": false,
315
+ "should_evaluate": false,
316
+ "should_log": false,
317
+ "should_save": true,
318
+ "should_training_stop": true
319
+ },
320
+ "attributes": {}
321
+ }
322
+ },
323
+ "total_flos": 1.8353312732676096e+17,
324
+ "train_batch_size": 2,
325
+ "trial_name": null,
326
+ "trial_params": null
327
+ }
training_loss.png ADDED
training_rewards_accuracies.png ADDED