File size: 21,953 Bytes
491f467
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.6281078251766553,
  "eval_steps": 400,
  "global_step": 300,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.010468463752944255,
      "grad_norm": 593.169353105105,
      "learning_rate": 3.125e-08,
      "loss": 124.0676,
      "rewards/accuracies": 0.4312500059604645,
      "rewards/chosen": -3.0608115196228027,
      "rewards/margins": -0.4392074644565582,
      "rewards/rejected": -2.6216037273406982,
      "step": 5
    },
    {
      "epoch": 0.02093692750588851,
      "grad_norm": 548.9400555797266,
      "learning_rate": 6.25e-08,
      "loss": 130.3453,
      "rewards/accuracies": 0.5062500238418579,
      "rewards/chosen": -2.993642807006836,
      "rewards/margins": -0.3124019205570221,
      "rewards/rejected": -2.6812407970428467,
      "step": 10
    },
    {
      "epoch": 0.031405391258832765,
      "grad_norm": 704.2858617618903,
      "learning_rate": 9.375e-08,
      "loss": 125.1116,
      "rewards/accuracies": 0.5062500238418579,
      "rewards/chosen": -2.592876672744751,
      "rewards/margins": -0.21878299117088318,
      "rewards/rejected": -2.374093770980835,
      "step": 15
    },
    {
      "epoch": 0.04187385501177702,
      "grad_norm": 722.3141898590777,
      "learning_rate": 1.25e-07,
      "loss": 123.8169,
      "rewards/accuracies": 0.4000000059604645,
      "rewards/chosen": -3.1637253761291504,
      "rewards/margins": -0.7121697664260864,
      "rewards/rejected": -2.4515557289123535,
      "step": 20
    },
    {
      "epoch": 0.05234231876472128,
      "grad_norm": 601.8286611145769,
      "learning_rate": 1.5625e-07,
      "loss": 119.0515,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -2.760319232940674,
      "rewards/margins": -0.1618678867816925,
      "rewards/rejected": -2.5984511375427246,
      "step": 25
    },
    {
      "epoch": 0.06281078251766553,
      "grad_norm": 553078.6522561809,
      "learning_rate": 1.875e-07,
      "loss": 126.5249,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": -2.58254075050354,
      "rewards/margins": -0.2875169515609741,
      "rewards/rejected": -2.2950234413146973,
      "step": 30
    },
    {
      "epoch": 0.07327924627060979,
      "grad_norm": 452.74589821293057,
      "learning_rate": 2.1874999999999997e-07,
      "loss": 131.5394,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": -2.9655470848083496,
      "rewards/margins": -0.2771367132663727,
      "rewards/rejected": -2.6884102821350098,
      "step": 35
    },
    {
      "epoch": 0.08374771002355404,
      "grad_norm": 576.7615571641037,
      "learning_rate": 2.5e-07,
      "loss": 122.3868,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -2.83229398727417,
      "rewards/margins": -0.11761734634637833,
      "rewards/rejected": -2.71467661857605,
      "step": 40
    },
    {
      "epoch": 0.0942161737764983,
      "grad_norm": 865.0302510462254,
      "learning_rate": 2.8125e-07,
      "loss": 125.0924,
      "rewards/accuracies": 0.4437499940395355,
      "rewards/chosen": -2.822023391723633,
      "rewards/margins": -0.1981198638677597,
      "rewards/rejected": -2.623903274536133,
      "step": 45
    },
    {
      "epoch": 0.10468463752944256,
      "grad_norm": 456.96059937844296,
      "learning_rate": 3.125e-07,
      "loss": 115.896,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -2.5448055267333984,
      "rewards/margins": 0.30294448137283325,
      "rewards/rejected": -2.847749948501587,
      "step": 50
    },
    {
      "epoch": 0.11515310128238682,
      "grad_norm": 452.1527178293467,
      "learning_rate": 3.4374999999999994e-07,
      "loss": 119.0052,
      "rewards/accuracies": 0.4437499940395355,
      "rewards/chosen": -3.0407936573028564,
      "rewards/margins": -0.31493765115737915,
      "rewards/rejected": -2.725855827331543,
      "step": 55
    },
    {
      "epoch": 0.12562156503533106,
      "grad_norm": 694.2012955877877,
      "learning_rate": 3.75e-07,
      "loss": 125.2438,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -2.8351409435272217,
      "rewards/margins": -0.34137240052223206,
      "rewards/rejected": -2.4937686920166016,
      "step": 60
    },
    {
      "epoch": 0.1360900287882753,
      "grad_norm": 38333.04765141771,
      "learning_rate": 4.0625000000000003e-07,
      "loss": 116.4045,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": -3.2388644218444824,
      "rewards/margins": -0.13560602068901062,
      "rewards/rejected": -3.1032581329345703,
      "step": 65
    },
    {
      "epoch": 0.14655849254121958,
      "grad_norm": 3057.251910698748,
      "learning_rate": 4.3749999999999994e-07,
      "loss": 120.8262,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": -2.921536922454834,
      "rewards/margins": -0.23646602034568787,
      "rewards/rejected": -2.6850709915161133,
      "step": 70
    },
    {
      "epoch": 0.15702695629416383,
      "grad_norm": 564.0200282953291,
      "learning_rate": 4.6874999999999996e-07,
      "loss": 113.6765,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -3.173576831817627,
      "rewards/margins": -0.23844210803508759,
      "rewards/rejected": -2.9351348876953125,
      "step": 75
    },
    {
      "epoch": 0.16749542004710807,
      "grad_norm": 3677.4811770963215,
      "learning_rate": 5e-07,
      "loss": 119.9495,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -2.5878632068634033,
      "rewards/margins": 0.04798383638262749,
      "rewards/rejected": -2.6358470916748047,
      "step": 80
    },
    {
      "epoch": 0.17796388380005235,
      "grad_norm": 2606.920467755308,
      "learning_rate": 5.3125e-07,
      "loss": 116.3195,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": -3.0098717212677,
      "rewards/margins": -0.18883180618286133,
      "rewards/rejected": -2.8210396766662598,
      "step": 85
    },
    {
      "epoch": 0.1884323475529966,
      "grad_norm": 541.7406315411172,
      "learning_rate": 5.625e-07,
      "loss": 120.4695,
      "rewards/accuracies": 0.4312500059604645,
      "rewards/chosen": -3.168757200241089,
      "rewards/margins": -0.5669826865196228,
      "rewards/rejected": -2.6017744541168213,
      "step": 90
    },
    {
      "epoch": 0.19890081130594087,
      "grad_norm": 23078.97685832551,
      "learning_rate": 5.9375e-07,
      "loss": 14460.1375,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -2.794193744659424,
      "rewards/margins": 0.09161223471164703,
      "rewards/rejected": -2.88580584526062,
      "step": 95
    },
    {
      "epoch": 0.2093692750588851,
      "grad_norm": 780.8702285562881,
      "learning_rate": 5.999678242522831e-07,
      "loss": 121.0745,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": -3.182065486907959,
      "rewards/margins": -0.2521217167377472,
      "rewards/rejected": -2.9299440383911133,
      "step": 100
    },
    {
      "epoch": 0.21983773881182936,
      "grad_norm": 6602253074.10331,
      "learning_rate": 5.998371221059621e-07,
      "loss": 60582.2688,
      "rewards/accuracies": 0.5062500238418579,
      "rewards/chosen": -3.2734615802764893,
      "rewards/margins": -0.1844327747821808,
      "rewards/rejected": -3.089028835296631,
      "step": 105
    },
    {
      "epoch": 0.23030620256477363,
      "grad_norm": 8875.788468209948,
      "learning_rate": 5.996059263493219e-07,
      "loss": 174.1966,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -3.194960594177246,
      "rewards/margins": -0.4171196520328522,
      "rewards/rejected": -2.7778408527374268,
      "step": 110
    },
    {
      "epoch": 0.24077466631771788,
      "grad_norm": 108204.03617629108,
      "learning_rate": 5.992743144700869e-07,
      "loss": 116.7799,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -3.3109021186828613,
      "rewards/margins": -0.11277161538600922,
      "rewards/rejected": -3.1981301307678223,
      "step": 115
    },
    {
      "epoch": 0.2512431300706621,
      "grad_norm": 7059.818834658902,
      "learning_rate": 5.988423976115163e-07,
      "loss": 5014.2578,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -3.1635494232177734,
      "rewards/margins": -0.12058776617050171,
      "rewards/rejected": -3.042961359024048,
      "step": 120
    },
    {
      "epoch": 0.26171159382360637,
      "grad_norm": 1129.3881271781593,
      "learning_rate": 5.983103205351532e-07,
      "loss": 115.2389,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -3.1486246585845947,
      "rewards/margins": 0.13379120826721191,
      "rewards/rejected": -3.2824161052703857,
      "step": 125
    },
    {
      "epoch": 0.2721800575765506,
      "grad_norm": 12292.768269150538,
      "learning_rate": 5.976782615723061e-07,
      "loss": 1857.4367,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": -3.6433582305908203,
      "rewards/margins": -0.09961327910423279,
      "rewards/rejected": -3.5437445640563965,
      "step": 130
    },
    {
      "epoch": 0.2826485213294949,
      "grad_norm": 1427.4537532483553,
      "learning_rate": 5.969464325642798e-07,
      "loss": 777.1236,
      "rewards/accuracies": 0.543749988079071,
      "rewards/chosen": -3.222156524658203,
      "rewards/margins": 0.17556187510490417,
      "rewards/rejected": -3.397718906402588,
      "step": 135
    },
    {
      "epoch": 0.29311698508243916,
      "grad_norm": 1363.6683253204878,
      "learning_rate": 5.961150787913738e-07,
      "loss": 112.3685,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": -3.6276211738586426,
      "rewards/margins": -0.25700777769088745,
      "rewards/rejected": -3.3706130981445312,
      "step": 140
    },
    {
      "epoch": 0.3035854488353834,
      "grad_norm": 384148.96614330093,
      "learning_rate": 5.951844788906746e-07,
      "loss": 250560.75,
      "rewards/accuracies": 0.4437499940395355,
      "rewards/chosen": -3.4407284259796143,
      "rewards/margins": -0.5382715463638306,
      "rewards/rejected": -2.902456760406494,
      "step": 145
    },
    {
      "epoch": 0.31405391258832765,
      "grad_norm": 1107.0521725751462,
      "learning_rate": 5.941549447626671e-07,
      "loss": 1637.1979,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -3.568209171295166,
      "rewards/margins": 0.11036701500415802,
      "rewards/rejected": -3.6785759925842285,
      "step": 150
    },
    {
      "epoch": 0.3245223763412719,
      "grad_norm": 2872.1199690802287,
      "learning_rate": 5.930268214666979e-07,
      "loss": 108.2358,
      "rewards/accuracies": 0.543749988079071,
      "rewards/chosen": -3.5584754943847656,
      "rewards/margins": 0.09230315685272217,
      "rewards/rejected": -3.650778293609619,
      "step": 155
    },
    {
      "epoch": 0.33499084009421615,
      "grad_norm": 11786.541153253775,
      "learning_rate": 5.918004871053251e-07,
      "loss": 108.8458,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -3.5462379455566406,
      "rewards/margins": 0.0011147856712341309,
      "rewards/rejected": -3.5473525524139404,
      "step": 160
    },
    {
      "epoch": 0.34545930384716045,
      "grad_norm": 9463.059096379828,
      "learning_rate": 5.904763526975934e-07,
      "loss": 109.3413,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -3.705939531326294,
      "rewards/margins": -0.08292806148529053,
      "rewards/rejected": -3.623011350631714,
      "step": 165
    },
    {
      "epoch": 0.3559277676001047,
      "grad_norm": 3646.416064715657,
      "learning_rate": 5.890548620412763e-07,
      "loss": 107.8934,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -3.977457046508789,
      "rewards/margins": -0.3523085415363312,
      "rewards/rejected": -3.6251487731933594,
      "step": 170
    },
    {
      "epoch": 0.36639623135304894,
      "grad_norm": 16128.4409869884,
      "learning_rate": 5.875364915641322e-07,
      "loss": 104.2532,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": -4.031493186950684,
      "rewards/margins": -0.0011030196910724044,
      "rewards/rejected": -4.030389785766602,
      "step": 175
    },
    {
      "epoch": 0.3768646951059932,
      "grad_norm": 3466.2786999376217,
      "learning_rate": 5.859217501642258e-07,
      "loss": 94.8179,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": -3.672518491744995,
      "rewards/margins": 0.14635656774044037,
      "rewards/rejected": -3.8188750743865967,
      "step": 180
    },
    {
      "epoch": 0.38733315885893743,
      "grad_norm": 599025903.193245,
      "learning_rate": 5.842111790393642e-07,
      "loss": 877.2918,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -3.8185837268829346,
      "rewards/margins": 0.026774680241942406,
      "rewards/rejected": -3.845358371734619,
      "step": 185
    },
    {
      "epoch": 0.39780162261188173,
      "grad_norm": 3160.8519199140915,
      "learning_rate": 5.824053515057091e-07,
      "loss": 104.2913,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -3.647284746170044,
      "rewards/margins": 0.01915259286761284,
      "rewards/rejected": -3.6664366722106934,
      "step": 190
    },
    {
      "epoch": 0.408270086364826,
      "grad_norm": 2388.0156485727625,
      "learning_rate": 5.805048728056245e-07,
      "loss": 91.419,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -3.7797279357910156,
      "rewards/margins": 0.11267204582691193,
      "rewards/rejected": -3.8924002647399902,
      "step": 195
    },
    {
      "epoch": 0.4187385501177702,
      "grad_norm": 4402.18438060161,
      "learning_rate": 5.785103799048218e-07,
      "loss": 747998310.4,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -4.032955169677734,
      "rewards/margins": -0.02619754709303379,
      "rewards/rejected": -4.006758213043213,
      "step": 200
    },
    {
      "epoch": 0.42920701387071447,
      "grad_norm": 6962.862335261392,
      "learning_rate": 5.764225412788754e-07,
      "loss": 98.2311,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": -4.277915954589844,
      "rewards/margins": 0.07609972357749939,
      "rewards/rejected": -4.354015827178955,
      "step": 205
    },
    {
      "epoch": 0.4396754776236587,
      "grad_norm": 7244.6911777886135,
      "learning_rate": 5.742420566891749e-07,
      "loss": 93.3634,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": -3.5269923210144043,
      "rewards/margins": 0.3214404582977295,
      "rewards/rejected": -3.848433017730713,
      "step": 210
    },
    {
      "epoch": 0.45014394137660296,
      "grad_norm": 3123.685458090081,
      "learning_rate": 5.719696569483936e-07,
      "loss": 89.675,
      "rewards/accuracies": 0.543749988079071,
      "rewards/chosen": -3.8964924812316895,
      "rewards/margins": 0.18084397912025452,
      "rewards/rejected": -4.077336311340332,
      "step": 215
    },
    {
      "epoch": 0.46061240512954726,
      "grad_norm": 2152.0086272100416,
      "learning_rate": 5.696061036755478e-07,
      "loss": 106.4152,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -4.532608985900879,
      "rewards/margins": -0.047448158264160156,
      "rewards/rejected": -4.485161781311035,
      "step": 220
    },
    {
      "epoch": 0.4710808688824915,
      "grad_norm": 2592.99066686016,
      "learning_rate": 5.671521890407327e-07,
      "loss": 94.0079,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -4.172202110290527,
      "rewards/margins": 0.39335229992866516,
      "rewards/rejected": -4.565554618835449,
      "step": 225
    },
    {
      "epoch": 0.48154933263543576,
      "grad_norm": 7858.776511813048,
      "learning_rate": 5.64608735499618e-07,
      "loss": 88.0795,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -4.502499580383301,
      "rewards/margins": 0.30180323123931885,
      "rewards/rejected": -4.804303169250488,
      "step": 230
    },
    {
      "epoch": 0.49201779638838,
      "grad_norm": 1382.8034293376832,
      "learning_rate": 5.619765955177932e-07,
      "loss": 93.8786,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -4.486794948577881,
      "rewards/margins": 0.32596614956855774,
      "rewards/rejected": -4.812760829925537,
      "step": 235
    },
    {
      "epoch": 0.5024862601413242,
      "grad_norm": 4928.227610002493,
      "learning_rate": 5.592566512850545e-07,
      "loss": 94.1374,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -4.479926109313965,
      "rewards/margins": 0.3255046606063843,
      "rewards/rejected": -4.805431365966797,
      "step": 240
    },
    {
      "epoch": 0.5129547238942685,
      "grad_norm": 1395.0486891907535,
      "learning_rate": 5.564498144197293e-07,
      "loss": 97.436,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": -4.590472221374512,
      "rewards/margins": 0.24266347289085388,
      "rewards/rejected": -4.83313512802124,
      "step": 245
    },
    {
      "epoch": 0.5234231876472127,
      "grad_norm": 1576.6760282156056,
      "learning_rate": 5.535570256631384e-07,
      "loss": 87.0744,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -4.965784072875977,
      "rewards/margins": 0.13402244448661804,
      "rewards/rejected": -5.099806308746338,
      "step": 250
    },
    {
      "epoch": 0.533891651400157,
      "grad_norm": 2053.0087890177797,
      "learning_rate": 5.505792545642954e-07,
      "loss": 95.9195,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": -4.659627437591553,
      "rewards/margins": -0.013512322679162025,
      "rewards/rejected": -4.646115303039551,
      "step": 255
    },
    {
      "epoch": 0.5443601151531012,
      "grad_norm": 2545.9069911001143,
      "learning_rate": 5.475174991549528e-07,
      "loss": 94.0787,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -5.084115028381348,
      "rewards/margins": 0.1735546588897705,
      "rewards/rejected": -5.257669925689697,
      "step": 260
    },
    {
      "epoch": 0.5548285789060455,
      "grad_norm": 2099.8332950134763,
      "learning_rate": 5.443727856151007e-07,
      "loss": 82.0312,
      "rewards/accuracies": 0.581250011920929,
      "rewards/chosen": -5.3893656730651855,
      "rewards/margins": 0.2717081904411316,
      "rewards/rejected": -5.661074161529541,
      "step": 265
    },
    {
      "epoch": 0.5652970426589898,
      "grad_norm": 2084.0841217249636,
      "learning_rate": 5.411461679290317e-07,
      "loss": 87.9464,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": -4.924943447113037,
      "rewards/margins": 0.4480757713317871,
      "rewards/rejected": -5.373019218444824,
      "step": 270
    },
    {
      "epoch": 0.575765506411934,
      "grad_norm": 5465.824792138828,
      "learning_rate": 5.378387275320869e-07,
      "loss": 81.2104,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -4.579545021057129,
      "rewards/margins": 0.3236454427242279,
      "rewards/rejected": -4.903190612792969,
      "step": 275
    },
    {
      "epoch": 0.5862339701648783,
      "grad_norm": 2418.7040228663905,
      "learning_rate": 5.34451572948201e-07,
      "loss": 86.9114,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -5.001852989196777,
      "rewards/margins": 0.3705536723136902,
      "rewards/rejected": -5.372406482696533,
      "step": 280
    },
    {
      "epoch": 0.5967024339178225,
      "grad_norm": 3506.716092691064,
      "learning_rate": 5.309858394183691e-07,
      "loss": 85.3161,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": -4.8695902824401855,
      "rewards/margins": 0.26995033025741577,
      "rewards/rejected": -5.139540672302246,
      "step": 285
    },
    {
      "epoch": 0.6071708976707668,
      "grad_norm": 1696.185949418209,
      "learning_rate": 5.274426885201582e-07,
      "loss": 93.2688,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -5.414190292358398,
      "rewards/margins": 0.10774167627096176,
      "rewards/rejected": -5.521932601928711,
      "step": 290
    },
    {
      "epoch": 0.6176393614237111,
      "grad_norm": 7077.764617811095,
      "learning_rate": 5.238233077783925e-07,
      "loss": 76.6937,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -4.4273905754089355,
      "rewards/margins": 0.37667155265808105,
      "rewards/rejected": -4.804062843322754,
      "step": 295
    },
    {
      "epoch": 0.6281078251766553,
      "grad_norm": 5057.201800303853,
      "learning_rate": 5.201289102671411e-07,
      "loss": 82.1286,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -4.985184669494629,
      "rewards/margins": 0.5431011915206909,
      "rewards/rejected": -5.528285026550293,
      "step": 300
    }
  ],
  "logging_steps": 5,
  "max_steps": 954,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}