|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984301412872841, |
|
"eval_steps": 100, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-07, |
|
"logits/chosen": -2.6547884941101074, |
|
"logits/rejected": -2.5468616485595703, |
|
"logps/chosen": -285.215576171875, |
|
"logps/rejected": -258.8069152832031, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.125e-06, |
|
"logits/chosen": -2.5754165649414062, |
|
"logits/rejected": -2.5153017044067383, |
|
"logps/chosen": -295.195556640625, |
|
"logps/rejected": -293.4971008300781, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.00017905117420013994, |
|
"rewards/margins": 0.0014612883096560836, |
|
"rewards/rejected": -0.001640339381992817, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.9903533134293035e-06, |
|
"logits/chosen": -2.5756356716156006, |
|
"logits/rejected": -2.5089950561523438, |
|
"logps/chosen": -291.3115539550781, |
|
"logps/rejected": -287.61431884765625, |
|
"loss": 0.6851, |
|
"rewards/accuracies": 0.628125011920929, |
|
"rewards/chosen": -0.061528198421001434, |
|
"rewards/margins": 0.019807469099760056, |
|
"rewards/rejected": -0.08133566379547119, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368549e-06, |
|
"logits/chosen": -2.6055867671966553, |
|
"logits/rejected": -2.5165815353393555, |
|
"logps/chosen": -283.285888671875, |
|
"logps/rejected": -284.13726806640625, |
|
"loss": 0.6711, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.06080762296915054, |
|
"rewards/margins": 0.05497027188539505, |
|
"rewards/rejected": -0.115777887403965, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.660472094042121e-06, |
|
"logits/chosen": -2.5286662578582764, |
|
"logits/rejected": -2.4570558071136475, |
|
"logps/chosen": -309.78094482421875, |
|
"logps/rejected": -316.354736328125, |
|
"loss": 0.6628, |
|
"rewards/accuracies": 0.6343749761581421, |
|
"rewards/chosen": -0.09531812369823456, |
|
"rewards/margins": 0.07932458072900772, |
|
"rewards/rejected": -0.17464269697666168, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.33440758555951e-06, |
|
"logits/chosen": -2.564868211746216, |
|
"logits/rejected": -2.4932563304901123, |
|
"logps/chosen": -324.691162109375, |
|
"logps/rejected": -325.16461181640625, |
|
"loss": 0.6505, |
|
"rewards/accuracies": 0.659375011920929, |
|
"rewards/chosen": -0.2168252170085907, |
|
"rewards/margins": 0.10023742914199829, |
|
"rewards/rejected": -0.317062646150589, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.92016186682789e-06, |
|
"logits/chosen": -2.571204423904419, |
|
"logits/rejected": -2.4519267082214355, |
|
"logps/chosen": -334.2891845703125, |
|
"logps/rejected": -332.8462219238281, |
|
"loss": 0.6419, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.19313496351242065, |
|
"rewards/margins": 0.12622517347335815, |
|
"rewards/rejected": -0.3193601667881012, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.437648009023905e-06, |
|
"logits/chosen": -2.497055768966675, |
|
"logits/rejected": -2.415569543838501, |
|
"logps/chosen": -311.2139587402344, |
|
"logps/rejected": -327.3946838378906, |
|
"loss": 0.6447, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.23866800963878632, |
|
"rewards/margins": 0.13377706706523895, |
|
"rewards/rejected": -0.37244507670402527, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.9100607788275547e-06, |
|
"logits/chosen": -2.4995005130767822, |
|
"logits/rejected": -2.3666462898254395, |
|
"logps/chosen": -337.9864196777344, |
|
"logps/rejected": -344.3404541015625, |
|
"loss": 0.6348, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.33487486839294434, |
|
"rewards/margins": 0.14860984683036804, |
|
"rewards/rejected": -0.48348474502563477, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3627616503391813e-06, |
|
"logits/chosen": -2.476839065551758, |
|
"logits/rejected": -2.4053282737731934, |
|
"logps/chosen": -312.2710266113281, |
|
"logps/rejected": -342.49163818359375, |
|
"loss": 0.6268, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -0.27689170837402344, |
|
"rewards/margins": 0.19764092564582825, |
|
"rewards/rejected": -0.4745326042175293, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089576e-06, |
|
"logits/chosen": -2.4677796363830566, |
|
"logits/rejected": -2.409015655517578, |
|
"logps/chosen": -310.72576904296875, |
|
"logps/rejected": -347.15087890625, |
|
"loss": 0.6262, |
|
"rewards/accuracies": 0.653124988079071, |
|
"rewards/chosen": -0.29292207956314087, |
|
"rewards/margins": 0.17808273434638977, |
|
"rewards/rejected": -0.47100481390953064, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_logits/chosen": -2.482710123062134, |
|
"eval_logits/rejected": -2.386967658996582, |
|
"eval_logps/chosen": -331.1879577636719, |
|
"eval_logps/rejected": -340.92962646484375, |
|
"eval_loss": 0.6278669238090515, |
|
"eval_rewards/accuracies": 0.6740000247955322, |
|
"eval_rewards/chosen": -0.3175167143344879, |
|
"eval_rewards/margins": 0.1821039915084839, |
|
"eval_rewards/rejected": -0.4996207058429718, |
|
"eval_runtime": 384.3163, |
|
"eval_samples_per_second": 5.204, |
|
"eval_steps_per_second": 0.651, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135999e-06, |
|
"logits/chosen": -2.4531397819519043, |
|
"logits/rejected": -2.3680739402770996, |
|
"logps/chosen": -311.6913757324219, |
|
"logps/rejected": -324.017333984375, |
|
"loss": 0.6164, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.3056855797767639, |
|
"rewards/margins": 0.18076516687870026, |
|
"rewards/rejected": -0.4864506721496582, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367876e-07, |
|
"logits/chosen": -2.4865543842315674, |
|
"logits/rejected": -2.437178611755371, |
|
"logps/chosen": -330.9514465332031, |
|
"logps/rejected": -378.96832275390625, |
|
"loss": 0.623, |
|
"rewards/accuracies": 0.659375011920929, |
|
"rewards/chosen": -0.35980406403541565, |
|
"rewards/margins": 0.21202397346496582, |
|
"rewards/rejected": -0.5718280076980591, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-07, |
|
"logits/chosen": -2.5110232830047607, |
|
"logits/rejected": -2.399928569793701, |
|
"logps/chosen": -355.139404296875, |
|
"logps/rejected": -360.427490234375, |
|
"loss": 0.6234, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.37438246607780457, |
|
"rewards/margins": 0.19766977429389954, |
|
"rewards/rejected": -0.5720522999763489, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020857e-07, |
|
"logits/chosen": -2.435955286026001, |
|
"logits/rejected": -2.3834517002105713, |
|
"logps/chosen": -297.09466552734375, |
|
"logps/rejected": -351.209716796875, |
|
"loss": 0.6268, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.33036094903945923, |
|
"rewards/margins": 0.1953088939189911, |
|
"rewards/rejected": -0.5256698131561279, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.870879364444109e-08, |
|
"logits/chosen": -2.497584104537964, |
|
"logits/rejected": -2.3675169944763184, |
|
"logps/chosen": -344.45770263671875, |
|
"logps/rejected": -349.06170654296875, |
|
"loss": 0.6177, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.32350295782089233, |
|
"rewards/margins": 0.21759824454784393, |
|
"rewards/rejected": -0.5411011576652527, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6422481574352432, |
|
"train_runtime": 7267.2932, |
|
"train_samples_per_second": 2.804, |
|
"train_steps_per_second": 0.022 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|