|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9879931389365352, |
|
"eval_steps": 500, |
|
"global_step": 72, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0137221269296741, |
|
"grad_norm": 23.312597274780273, |
|
"learning_rate": 0.0, |
|
"logits/chosen": -3.0826492309570312, |
|
"logits/rejected": -3.1289501190185547, |
|
"logps/chosen": -224.05235290527344, |
|
"logps/rejected": -239.40707397460938, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0686106346483705, |
|
"grad_norm": 27.118257522583008, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -2.9749486446380615, |
|
"logits/rejected": -3.077843427658081, |
|
"logps/chosen": -187.49241638183594, |
|
"logps/rejected": -283.5939025878906, |
|
"loss": 0.6834, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.0007624387508258224, |
|
"rewards/margins": 0.020107731223106384, |
|
"rewards/rejected": -0.02087016962468624, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.137221269296741, |
|
"grad_norm": 21.591798782348633, |
|
"learning_rate": 4.996988640512931e-07, |
|
"logits/chosen": -2.9997000694274902, |
|
"logits/rejected": -3.1022396087646484, |
|
"logps/chosen": -200.90689086914062, |
|
"logps/rejected": -292.61444091796875, |
|
"loss": 0.5854, |
|
"rewards/accuracies": 0.940625011920929, |
|
"rewards/chosen": -0.014221695251762867, |
|
"rewards/margins": 0.24389581382274628, |
|
"rewards/rejected": -0.2581174969673157, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2058319039451115, |
|
"grad_norm": 12.920186996459961, |
|
"learning_rate": 4.892350839330522e-07, |
|
"logits/chosen": -2.883420944213867, |
|
"logits/rejected": -2.9923837184906006, |
|
"logps/chosen": -175.34622192382812, |
|
"logps/rejected": -294.7455749511719, |
|
"loss": 0.4103, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -0.10586054623126984, |
|
"rewards/margins": 0.8374508619308472, |
|
"rewards/rejected": -0.943311333656311, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.274442538593482, |
|
"grad_norm": 14.314050674438477, |
|
"learning_rate": 4.64432152500068e-07, |
|
"logits/chosen": -2.740008592605591, |
|
"logits/rejected": -2.9438371658325195, |
|
"logps/chosen": -192.83287048339844, |
|
"logps/rejected": -330.67755126953125, |
|
"loss": 0.2578, |
|
"rewards/accuracies": 0.9593750238418579, |
|
"rewards/chosen": -0.345708966255188, |
|
"rewards/margins": 1.699308156967163, |
|
"rewards/rejected": -2.0450172424316406, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.34305317324185247, |
|
"grad_norm": 10.350852012634277, |
|
"learning_rate": 4.2677669529663686e-07, |
|
"logits/chosen": -2.7626330852508545, |
|
"logits/rejected": -2.9129269123077393, |
|
"logps/chosen": -187.16873168945312, |
|
"logps/rejected": -338.9030456542969, |
|
"loss": 0.2415, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -0.5644805431365967, |
|
"rewards/margins": 1.97623610496521, |
|
"rewards/rejected": -2.5407166481018066, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.411663807890223, |
|
"grad_norm": 12.666106224060059, |
|
"learning_rate": 3.7852568604830535e-07, |
|
"logits/chosen": -2.6965413093566895, |
|
"logits/rejected": -2.8506977558135986, |
|
"logps/chosen": -190.6026611328125, |
|
"logps/rejected": -337.8497009277344, |
|
"loss": 0.2079, |
|
"rewards/accuracies": 0.953125, |
|
"rewards/chosen": -0.7271584272384644, |
|
"rewards/margins": 2.3706743717193604, |
|
"rewards/rejected": -3.0978329181671143, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.48027444253859347, |
|
"grad_norm": 7.95162296295166, |
|
"learning_rate": 3.2257116931361555e-07, |
|
"logits/chosen": -2.6175200939178467, |
|
"logits/rejected": -2.8301939964294434, |
|
"logps/chosen": -177.00990295410156, |
|
"logps/rejected": -325.2242736816406, |
|
"loss": 0.1944, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -0.6376256346702576, |
|
"rewards/margins": 2.4539847373962402, |
|
"rewards/rejected": -3.0916104316711426, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.548885077186964, |
|
"grad_norm": 12.024945259094238, |
|
"learning_rate": 2.6226691858185454e-07, |
|
"logits/chosen": -2.6264350414276123, |
|
"logits/rejected": -2.80271315574646, |
|
"logps/chosen": -157.8688507080078, |
|
"logps/rejected": -315.71124267578125, |
|
"loss": 0.1893, |
|
"rewards/accuracies": 0.953125, |
|
"rewards/chosen": -0.7045091986656189, |
|
"rewards/margins": 2.5911941528320312, |
|
"rewards/rejected": -3.295703411102295, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6174957118353345, |
|
"grad_norm": 12.804183006286621, |
|
"learning_rate": 2.0122741949596793e-07, |
|
"logits/chosen": -2.5655877590179443, |
|
"logits/rejected": -2.7770166397094727, |
|
"logps/chosen": -186.1898956298828, |
|
"logps/rejected": -350.7716979980469, |
|
"loss": 0.1893, |
|
"rewards/accuracies": 0.953125, |
|
"rewards/chosen": -0.8316699266433716, |
|
"rewards/margins": 2.6264994144439697, |
|
"rewards/rejected": -3.4581692218780518, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6861063464837049, |
|
"grad_norm": 8.851630210876465, |
|
"learning_rate": 1.4311122664242953e-07, |
|
"logits/chosen": -2.564091205596924, |
|
"logits/rejected": -2.7729170322418213, |
|
"logps/chosen": -173.6750030517578, |
|
"logps/rejected": -336.9339294433594, |
|
"loss": 0.1599, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -0.8224509358406067, |
|
"rewards/margins": 2.8290677070617676, |
|
"rewards/rejected": -3.6515185832977295, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7547169811320755, |
|
"grad_norm": 11.309371948242188, |
|
"learning_rate": 9.140167895908865e-08, |
|
"logits/chosen": -2.5938456058502197, |
|
"logits/rejected": -2.785677194595337, |
|
"logps/chosen": -195.90493774414062, |
|
"logps/rejected": -358.4869384765625, |
|
"loss": 0.176, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -0.9772733449935913, |
|
"rewards/margins": 2.8666346073150635, |
|
"rewards/rejected": -3.8439078330993652, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.823327615780446, |
|
"grad_norm": 11.698320388793945, |
|
"learning_rate": 4.919811712983879e-08, |
|
"logits/chosen": -2.6009421348571777, |
|
"logits/rejected": -2.8167078495025635, |
|
"logps/chosen": -196.6665802001953, |
|
"logps/rejected": -381.7016296386719, |
|
"loss": 0.163, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -0.9723421931266785, |
|
"rewards/margins": 3.029435873031616, |
|
"rewards/rejected": -4.001777648925781, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8919382504288165, |
|
"grad_norm": 10.755302429199219, |
|
"learning_rate": 1.9030116872178314e-08, |
|
"logits/chosen": -2.5659337043762207, |
|
"logits/rejected": -2.7652087211608887, |
|
"logps/chosen": -163.1660614013672, |
|
"logps/rejected": -347.26629638671875, |
|
"loss": 0.1707, |
|
"rewards/accuracies": 0.971875011920929, |
|
"rewards/chosen": -0.8446305990219116, |
|
"rewards/margins": 2.903198003768921, |
|
"rewards/rejected": -3.747828722000122, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.9605488850771869, |
|
"grad_norm": 11.380764961242676, |
|
"learning_rate": 2.7058725088047464e-09, |
|
"logits/chosen": -2.54191255569458, |
|
"logits/rejected": -2.7878177165985107, |
|
"logps/chosen": -196.59622192382812, |
|
"logps/rejected": -356.73626708984375, |
|
"loss": 0.1788, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -0.9953180551528931, |
|
"rewards/margins": 2.8245952129364014, |
|
"rewards/rejected": -3.819913148880005, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9879931389365352, |
|
"step": 72, |
|
"total_flos": 0.0, |
|
"train_loss": 0.26905302993125385, |
|
"train_runtime": 954.9092, |
|
"train_samples_per_second": 4.882, |
|
"train_steps_per_second": 0.075 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 72, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|