|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 58, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03508771929824561, |
|
"grad_norm": 2.067296788002129, |
|
"learning_rate": 0.0, |
|
"loss": 0.866, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07017543859649122, |
|
"grad_norm": 2.290485750060095, |
|
"learning_rate": 7.142857142857143e-06, |
|
"loss": 1.043, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 1.8761831403153433, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.9406, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 1.5348726385452636, |
|
"learning_rate": 2.1428571428571428e-05, |
|
"loss": 0.9753, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.17543859649122806, |
|
"grad_norm": 2.451336617862282, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 0.9902, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 3.321340013917274, |
|
"learning_rate": 3.571428571428572e-05, |
|
"loss": 0.9761, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.24561403508771928, |
|
"grad_norm": 2.813148280382836, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 0.9736, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2807017543859649, |
|
"grad_norm": 1.5924965375006837, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9838, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 1.5690611350244767, |
|
"learning_rate": 4.9993025930300686e-05, |
|
"loss": 0.9958, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3508771929824561, |
|
"grad_norm": 1.3166502579359358, |
|
"learning_rate": 4.99721076122146e-05, |
|
"loss": 0.9588, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38596491228070173, |
|
"grad_norm": 1.136691779583638, |
|
"learning_rate": 4.9937256716606394e-05, |
|
"loss": 0.8503, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 0.8350469358137226, |
|
"learning_rate": 4.9888492687682096e-05, |
|
"loss": 0.8724, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.45614035087719296, |
|
"grad_norm": 0.7872759895415901, |
|
"learning_rate": 4.982584273214061e-05, |
|
"loss": 0.9391, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.49122807017543857, |
|
"grad_norm": 0.7631657638189762, |
|
"learning_rate": 4.9749341803994465e-05, |
|
"loss": 0.8446, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 0.6807894928709346, |
|
"learning_rate": 4.965903258506806e-05, |
|
"loss": 0.8727, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5614035087719298, |
|
"grad_norm": 0.5683155258085846, |
|
"learning_rate": 4.955496546118439e-05, |
|
"loss": 0.8189, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5964912280701754, |
|
"grad_norm": 0.7801939464768173, |
|
"learning_rate": 4.9437198494053464e-05, |
|
"loss": 0.9303, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 0.5867664533954665, |
|
"learning_rate": 4.9305797388878264e-05, |
|
"loss": 0.83, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.5567512617299766, |
|
"learning_rate": 4.916083545769607e-05, |
|
"loss": 0.8938, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 0.5135799351938194, |
|
"learning_rate": 4.9002393578475816e-05, |
|
"loss": 0.8907, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 0.5604879970565443, |
|
"learning_rate": 4.883056014999423e-05, |
|
"loss": 0.8582, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.7719298245614035, |
|
"grad_norm": 0.5008974828940795, |
|
"learning_rate": 4.864543104251587e-05, |
|
"loss": 0.8132, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.8070175438596491, |
|
"grad_norm": 0.5351207184045396, |
|
"learning_rate": 4.8447109544304636e-05, |
|
"loss": 0.8775, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.48534815393327974, |
|
"learning_rate": 4.823570630399665e-05, |
|
"loss": 0.8478, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.8771929824561403, |
|
"grad_norm": 0.4766396695563186, |
|
"learning_rate": 4.8011339268866505e-05, |
|
"loss": 0.8363, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9122807017543859, |
|
"grad_norm": 0.4856803005330466, |
|
"learning_rate": 4.7774133619021514e-05, |
|
"loss": 0.9035, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 0.3964013618666619, |
|
"learning_rate": 4.752422169756048e-05, |
|
"loss": 0.8033, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.9824561403508771, |
|
"grad_norm": 0.48912255110703906, |
|
"learning_rate": 4.726174293673612e-05, |
|
"loss": 0.8316, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.48912255110703906, |
|
"learning_rate": 4.698684378016222e-05, |
|
"loss": 0.6913, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.0350877192982457, |
|
"grad_norm": 0.9512574190630522, |
|
"learning_rate": 4.669967760110908e-05, |
|
"loss": 0.7426, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0701754385964912, |
|
"grad_norm": 0.5119153708536178, |
|
"learning_rate": 4.6400404616932505e-05, |
|
"loss": 0.705, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.1052631578947367, |
|
"grad_norm": 0.5326897543607073, |
|
"learning_rate": 4.608919179968457e-05, |
|
"loss": 0.7502, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.1403508771929824, |
|
"grad_norm": 0.7107030254650039, |
|
"learning_rate": 4.576621278295558e-05, |
|
"loss": 0.7215, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.1754385964912282, |
|
"grad_norm": 0.6935703731339729, |
|
"learning_rate": 4.5431647764999455e-05, |
|
"loss": 0.6888, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.2105263157894737, |
|
"grad_norm": 0.5468190267311956, |
|
"learning_rate": 4.5085683408196535e-05, |
|
"loss": 0.7342, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.2456140350877192, |
|
"grad_norm": 0.6166217447509906, |
|
"learning_rate": 4.4728512734909844e-05, |
|
"loss": 0.7456, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.280701754385965, |
|
"grad_norm": 0.9130217986036923, |
|
"learning_rate": 4.436033501979299e-05, |
|
"loss": 0.7106, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.3157894736842106, |
|
"grad_norm": 0.5281267490436047, |
|
"learning_rate": 4.398135567860972e-05, |
|
"loss": 0.693, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.3508771929824561, |
|
"grad_norm": 0.5213754312271124, |
|
"learning_rate": 4.3591786153627247e-05, |
|
"loss": 0.6418, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.3859649122807016, |
|
"grad_norm": 0.5481623741259232, |
|
"learning_rate": 4.319184379564716e-05, |
|
"loss": 0.7452, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.4210526315789473, |
|
"grad_norm": 0.6014133516410636, |
|
"learning_rate": 4.2781751742739885e-05, |
|
"loss": 0.686, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.456140350877193, |
|
"grad_norm": 0.44416005870003245, |
|
"learning_rate": 4.2361738795750214e-05, |
|
"loss": 0.6433, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.4912280701754386, |
|
"grad_norm": 0.46835862575073184, |
|
"learning_rate": 4.193203929064353e-05, |
|
"loss": 0.6381, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.526315789473684, |
|
"grad_norm": 0.5015932878379395, |
|
"learning_rate": 4.1492892967763686e-05, |
|
"loss": 0.7615, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.5614035087719298, |
|
"grad_norm": 0.49117089644567374, |
|
"learning_rate": 4.1044544838075794e-05, |
|
"loss": 0.7258, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.5964912280701755, |
|
"grad_norm": 0.4310052429661563, |
|
"learning_rate": 4.058724504646834e-05, |
|
"loss": 0.6558, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.631578947368421, |
|
"grad_norm": 0.46975098597348564, |
|
"learning_rate": 4.012124873219094e-05, |
|
"loss": 0.6587, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 0.46564012035747604, |
|
"learning_rate": 3.964681588650562e-05, |
|
"loss": 0.7071, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.7017543859649122, |
|
"grad_norm": 0.4292070243922818, |
|
"learning_rate": 3.916421120763106e-05, |
|
"loss": 0.7292, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.736842105263158, |
|
"grad_norm": 0.4700066192248402, |
|
"learning_rate": 3.867370395306068e-05, |
|
"loss": 0.7118, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.7719298245614035, |
|
"grad_norm": 0.41414590310746596, |
|
"learning_rate": 3.817556778933698e-05, |
|
"loss": 0.7187, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.807017543859649, |
|
"grad_norm": 0.4934164034770572, |
|
"learning_rate": 3.7670080639366004e-05, |
|
"loss": 0.7425, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.8421052631578947, |
|
"grad_norm": 0.4258237994121863, |
|
"learning_rate": 3.715752452735704e-05, |
|
"loss": 0.5943, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.8771929824561404, |
|
"grad_norm": 0.5078698766882597, |
|
"learning_rate": 3.6638185421474084e-05, |
|
"loss": 0.7346, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.912280701754386, |
|
"grad_norm": 0.44841215920564664, |
|
"learning_rate": 3.61123530742869e-05, |
|
"loss": 0.6742, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.9473684210526314, |
|
"grad_norm": 0.39580481306947396, |
|
"learning_rate": 3.5580320861110625e-05, |
|
"loss": 0.7273, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.9824561403508771, |
|
"grad_norm": 0.48313542261877046, |
|
"learning_rate": 3.504238561632424e-05, |
|
"loss": 0.732, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.8524767663864421, |
|
"learning_rate": 3.4498847467759e-05, |
|
"loss": 0.6104, |
|
"step": 58 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 140, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 25934832402432.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|