Incomple's picture
End of training
a493281 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9996261682242991,
"eval_steps": 134,
"global_step": 1337,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05009345794392523,
"grad_norm": 1.3687894344329834,
"learning_rate": 5e-07,
"logits/chosen": -0.6576648950576782,
"logits/rejected": -0.6672008633613586,
"logps/chosen": -2.227020502090454,
"logps/rejected": -5.950113296508789,
"loss": 0.6927,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.0005052318447269499,
"rewards/margins": 0.0008711821283213794,
"rewards/rejected": -0.0003659502835944295,
"step": 67
},
{
"epoch": 0.10018691588785046,
"grad_norm": 2.2226035594940186,
"learning_rate": 1e-06,
"logits/chosen": -0.643012285232544,
"logits/rejected": -0.6527656316757202,
"logps/chosen": -2.1846628189086914,
"logps/rejected": -6.394010066986084,
"loss": 0.6883,
"rewards/accuracies": 0.7276118993759155,
"rewards/chosen": -0.0008213059627451003,
"rewards/margins": 0.00972337368875742,
"rewards/rejected": -0.01054468099027872,
"step": 134
},
{
"epoch": 0.1502803738317757,
"grad_norm": 2.133711576461792,
"learning_rate": 9.443059019118869e-07,
"logits/chosen": -0.6600874662399292,
"logits/rejected": -0.6701396703720093,
"logps/chosen": -2.2199361324310303,
"logps/rejected": -7.209847450256348,
"loss": 0.6524,
"rewards/accuracies": 0.8563432693481445,
"rewards/chosen": -0.0032122363336384296,
"rewards/margins": 0.08549854904413223,
"rewards/rejected": -0.08871078491210938,
"step": 201
},
{
"epoch": 0.20037383177570092,
"grad_norm": 3.0340538024902344,
"learning_rate": 8.886118038237738e-07,
"logits/chosen": -0.6653879284858704,
"logits/rejected": -0.677828848361969,
"logps/chosen": -2.533212184906006,
"logps/rejected": -10.03713321685791,
"loss": 0.5533,
"rewards/accuracies": 0.8488805890083313,
"rewards/chosen": -0.02541249990463257,
"rewards/margins": 0.32844945788383484,
"rewards/rejected": -0.3538619577884674,
"step": 268
},
{
"epoch": 0.2504672897196262,
"grad_norm": 2.396055221557617,
"learning_rate": 8.329177057356608e-07,
"logits/chosen": -0.6770755052566528,
"logits/rejected": -0.6937317848205566,
"logps/chosen": -3.257599353790283,
"logps/rejected": -14.972702980041504,
"loss": 0.4397,
"rewards/accuracies": 0.8526118993759155,
"rewards/chosen": -0.10955464839935303,
"rewards/margins": 0.7154061198234558,
"rewards/rejected": -0.8249607682228088,
"step": 335
},
{
"epoch": 0.3005607476635514,
"grad_norm": 2.1619019508361816,
"learning_rate": 7.772236076475478e-07,
"logits/chosen": -0.7039727568626404,
"logits/rejected": -0.7222999334335327,
"logps/chosen": -3.5498507022857666,
"logps/rejected": -20.35832405090332,
"loss": 0.3224,
"rewards/accuracies": 0.9029850363731384,
"rewards/chosen": -0.15284313261508942,
"rewards/margins": 1.2502268552780151,
"rewards/rejected": -1.4030699729919434,
"step": 402
},
{
"epoch": 0.3506542056074766,
"grad_norm": 4.72946834564209,
"learning_rate": 7.215295095594347e-07,
"logits/chosen": -0.7181271910667419,
"logits/rejected": -0.7374780774116516,
"logps/chosen": -5.401639461517334,
"logps/rejected": -25.78311538696289,
"loss": 0.2782,
"rewards/accuracies": 0.9048507213592529,
"rewards/chosen": -0.3204249441623688,
"rewards/margins": 1.6534833908081055,
"rewards/rejected": -1.9739083051681519,
"step": 469
},
{
"epoch": 0.40074766355140184,
"grad_norm": 0.9766824245452881,
"learning_rate": 6.658354114713217e-07,
"logits/chosen": -0.756517231464386,
"logits/rejected": -0.7776155471801758,
"logps/chosen": -5.15533971786499,
"logps/rejected": -30.16547203063965,
"loss": 0.2283,
"rewards/accuracies": 0.9123134016990662,
"rewards/chosen": -0.3061079978942871,
"rewards/margins": 2.072856903076172,
"rewards/rejected": -2.37896466255188,
"step": 536
},
{
"epoch": 0.4508411214953271,
"grad_norm": 4.220199108123779,
"learning_rate": 6.101413133832086e-07,
"logits/chosen": -0.7682312726974487,
"logits/rejected": -0.7901257872581482,
"logps/chosen": -6.049707412719727,
"logps/rejected": -31.82540512084961,
"loss": 0.2276,
"rewards/accuracies": 0.9048507213592529,
"rewards/chosen": -0.37660378217697144,
"rewards/margins": 2.2089908123016357,
"rewards/rejected": -2.585594415664673,
"step": 603
},
{
"epoch": 0.5009345794392523,
"grad_norm": 6.615551471710205,
"learning_rate": 5.544472152950955e-07,
"logits/chosen": -0.7774823904037476,
"logits/rejected": -0.8002229332923889,
"logps/chosen": -6.285848617553711,
"logps/rejected": -35.35755157470703,
"loss": 0.1959,
"rewards/accuracies": 0.9197760820388794,
"rewards/chosen": -0.40754997730255127,
"rewards/margins": 2.4701759815216064,
"rewards/rejected": -2.8777260780334473,
"step": 670
},
{
"epoch": 0.5510280373831775,
"grad_norm": 3.6465256214141846,
"learning_rate": 4.987531172069825e-07,
"logits/chosen": -0.7746462821960449,
"logits/rejected": -0.7996317148208618,
"logps/chosen": -7.126999378204346,
"logps/rejected": -37.35933303833008,
"loss": 0.1813,
"rewards/accuracies": 0.9309701323509216,
"rewards/chosen": -0.4826628267765045,
"rewards/margins": 2.6208343505859375,
"rewards/rejected": -3.10349702835083,
"step": 737
},
{
"epoch": 0.6011214953271028,
"grad_norm": 9.422818183898926,
"learning_rate": 4.4305901911886947e-07,
"logits/chosen": -0.7760910391807556,
"logits/rejected": -0.802481472492218,
"logps/chosen": -6.253903388977051,
"logps/rejected": -37.6789436340332,
"loss": 0.1769,
"rewards/accuracies": 0.9402984976768494,
"rewards/chosen": -0.3954426348209381,
"rewards/margins": 2.7266414165496826,
"rewards/rejected": -3.122084140777588,
"step": 804
},
{
"epoch": 0.6512149532710281,
"grad_norm": 6.800887107849121,
"learning_rate": 3.873649210307564e-07,
"logits/chosen": -0.7794383764266968,
"logits/rejected": -0.8046081066131592,
"logps/chosen": -6.785355567932129,
"logps/rejected": -38.823604583740234,
"loss": 0.1737,
"rewards/accuracies": 0.9291044473648071,
"rewards/chosen": -0.43714043498039246,
"rewards/margins": 2.833800792694092,
"rewards/rejected": -3.2709412574768066,
"step": 871
},
{
"epoch": 0.7013084112149532,
"grad_norm": 4.8703718185424805,
"learning_rate": 3.3167082294264335e-07,
"logits/chosen": -0.7899920344352722,
"logits/rejected": -0.8165501952171326,
"logps/chosen": -7.20438814163208,
"logps/rejected": -39.9392204284668,
"loss": 0.1891,
"rewards/accuracies": 0.9272387623786926,
"rewards/chosen": -0.47980982065200806,
"rewards/margins": 2.8853275775909424,
"rewards/rejected": -3.3651373386383057,
"step": 938
},
{
"epoch": 0.7514018691588785,
"grad_norm": 0.2955164611339569,
"learning_rate": 2.7597672485453034e-07,
"logits/chosen": -0.7796534895896912,
"logits/rejected": -0.8110276460647583,
"logps/chosen": -4.945315361022949,
"logps/rejected": -41.536590576171875,
"loss": 0.1215,
"rewards/accuracies": 0.9570895433425903,
"rewards/chosen": -0.2934176027774811,
"rewards/margins": 3.1789510250091553,
"rewards/rejected": -3.4723687171936035,
"step": 1005
},
{
"epoch": 0.8014953271028037,
"grad_norm": 6.8209075927734375,
"learning_rate": 2.2028262676641728e-07,
"logits/chosen": -0.7921628952026367,
"logits/rejected": -0.8196142315864563,
"logps/chosen": -6.230679512023926,
"logps/rejected": -41.968841552734375,
"loss": 0.1502,
"rewards/accuracies": 0.9477611780166626,
"rewards/chosen": -0.4138028919696808,
"rewards/margins": 3.128687858581543,
"rewards/rejected": -3.5424907207489014,
"step": 1072
},
{
"epoch": 0.851588785046729,
"grad_norm": 6.143770217895508,
"learning_rate": 1.6458852867830422e-07,
"logits/chosen": -0.7991640567779541,
"logits/rejected": -0.8256679773330688,
"logps/chosen": -6.564340114593506,
"logps/rejected": -42.35133361816406,
"loss": 0.1459,
"rewards/accuracies": 0.9402984976768494,
"rewards/chosen": -0.44414621591567993,
"rewards/margins": 3.182438611984253,
"rewards/rejected": -3.626584529876709,
"step": 1139
},
{
"epoch": 0.9016822429906542,
"grad_norm": 5.307474613189697,
"learning_rate": 1.0889443059019118e-07,
"logits/chosen": -0.7985506057739258,
"logits/rejected": -0.8279980421066284,
"logps/chosen": -5.217541217803955,
"logps/rejected": -43.41652297973633,
"loss": 0.1275,
"rewards/accuracies": 0.9496268630027771,
"rewards/chosen": -0.3293641209602356,
"rewards/margins": 3.362889289855957,
"rewards/rejected": -3.692253351211548,
"step": 1206
},
{
"epoch": 0.9517757009345794,
"grad_norm": 5.2688984870910645,
"learning_rate": 5.320033250207814e-08,
"logits/chosen": -0.7938746809959412,
"logits/rejected": -0.8232108354568481,
"logps/chosen": -5.571805477142334,
"logps/rejected": -43.23577880859375,
"loss": 0.1313,
"rewards/accuracies": 0.9589552283287048,
"rewards/chosen": -0.33297136425971985,
"rewards/margins": 3.3113536834716797,
"rewards/rejected": -3.644325017929077,
"step": 1273
},
{
"epoch": 0.9996261682242991,
"step": 1337,
"total_flos": 1.8353312732676096e+17,
"train_loss": 0.2906364963049307,
"train_runtime": 4209.7283,
"train_samples_per_second": 2.542,
"train_steps_per_second": 0.318
}
],
"logging_steps": 67,
"max_steps": 1337,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.8353312732676096e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}