|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 57, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.017543859649122806, |
|
"grad_norm": 2.410881066137832, |
|
"learning_rate": 0.0, |
|
"loss": 0.8483, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03508771929824561, |
|
"grad_norm": 2.665228254124179, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.8763, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 2.3728587057049992, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.019, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.07017543859649122, |
|
"grad_norm": 1.9987528382054482, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0451, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.08771929824561403, |
|
"grad_norm": 1.2387448314677751, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.8298, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 1.4006709843865188, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.9668, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.12280701754385964, |
|
"grad_norm": 2.1941304727738995, |
|
"learning_rate": 2e-05, |
|
"loss": 0.9639, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 2.0965952504080145, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.906, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 2.0779003780741325, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.9331, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.17543859649122806, |
|
"grad_norm": 1.914852860029578, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9615, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.19298245614035087, |
|
"grad_norm": 1.5453059456697873, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.9976, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 1.2149278389301041, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.8731, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.22807017543859648, |
|
"grad_norm": 9.574453131193065, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9048, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.24561403508771928, |
|
"grad_norm": 1.7542365583306754, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.9045, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 1.5687648722942888, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.9642, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2807017543859649, |
|
"grad_norm": 1.1629052446813282, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9157, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2982456140350877, |
|
"grad_norm": 1.0662859779498828, |
|
"learning_rate": 4.999830770009406e-05, |
|
"loss": 1.0076, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 1.1540729762939772, |
|
"learning_rate": 4.9993231029486544e-05, |
|
"loss": 0.8898, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 2.284686909029557, |
|
"learning_rate": 4.99847706754774e-05, |
|
"loss": 0.9111, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.3508771929824561, |
|
"grad_norm": 0.9299877447474001, |
|
"learning_rate": 4.997292778346312e-05, |
|
"loss": 0.9567, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 0.7891834706045853, |
|
"learning_rate": 4.995770395678171e-05, |
|
"loss": 0.8907, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.38596491228070173, |
|
"grad_norm": 0.7738739475483105, |
|
"learning_rate": 4.993910125649561e-05, |
|
"loss": 0.7582, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.40350877192982454, |
|
"grad_norm": 0.7564543923604894, |
|
"learning_rate": 4.9917122201112656e-05, |
|
"loss": 0.893, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 0.7725302637179225, |
|
"learning_rate": 4.989176976624511e-05, |
|
"loss": 0.794, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.43859649122807015, |
|
"grad_norm": 0.7154589746662706, |
|
"learning_rate": 4.9863047384206835e-05, |
|
"loss": 0.927, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.45614035087719296, |
|
"grad_norm": 0.748200539385192, |
|
"learning_rate": 4.983095894354858e-05, |
|
"loss": 0.8946, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 0.6877177188589038, |
|
"learning_rate": 4.979550878853154e-05, |
|
"loss": 0.8108, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.49122807017543857, |
|
"grad_norm": 0.6905557124467382, |
|
"learning_rate": 4.975670171853926e-05, |
|
"loss": 0.8808, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5087719298245614, |
|
"grad_norm": 0.6261501879052016, |
|
"learning_rate": 4.971454298742779e-05, |
|
"loss": 0.902, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 0.6935833833701227, |
|
"learning_rate": 4.966903830281449e-05, |
|
"loss": 0.7902, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.543859649122807, |
|
"grad_norm": 0.7380295796572786, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 0.8427, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5614035087719298, |
|
"grad_norm": 0.6686373976763716, |
|
"learning_rate": 4.9568016167660334e-05, |
|
"loss": 0.7824, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"grad_norm": 0.7284830674570405, |
|
"learning_rate": 4.951251239389948e-05, |
|
"loss": 0.8758, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5964912280701754, |
|
"grad_norm": 0.7667345279664498, |
|
"learning_rate": 4.9453690018345144e-05, |
|
"loss": 0.9769, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.6140350877192983, |
|
"grad_norm": 0.7186885267427574, |
|
"learning_rate": 4.939155700460536e-05, |
|
"loss": 0.8222, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 0.7129526253734785, |
|
"learning_rate": 4.9326121764495596e-05, |
|
"loss": 0.8398, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6491228070175439, |
|
"grad_norm": 0.6763307332151739, |
|
"learning_rate": 4.925739315689991e-05, |
|
"loss": 0.881, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.7596612492978558, |
|
"learning_rate": 4.9185380486571595e-05, |
|
"loss": 0.8883, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"grad_norm": 0.574360173449421, |
|
"learning_rate": 4.9110093502873476e-05, |
|
"loss": 0.9078, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 0.5045882171460956, |
|
"learning_rate": 4.9031542398457974e-05, |
|
"loss": 0.8722, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7192982456140351, |
|
"grad_norm": 0.6267476132219085, |
|
"learning_rate": 4.894973780788722e-05, |
|
"loss": 0.883, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 0.6021189923969766, |
|
"learning_rate": 4.88646908061933e-05, |
|
"loss": 0.8144, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7543859649122807, |
|
"grad_norm": 0.6014066609170303, |
|
"learning_rate": 4.877641290737884e-05, |
|
"loss": 0.8496, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.7719298245614035, |
|
"grad_norm": 0.6014121657776419, |
|
"learning_rate": 4.868491606285823e-05, |
|
"loss": 0.768, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 0.5324268865416073, |
|
"learning_rate": 4.859021265983959e-05, |
|
"loss": 0.8825, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8070175438596491, |
|
"grad_norm": 0.6348338184686191, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 0.8587, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.8245614035087719, |
|
"grad_norm": 0.6361465840640576, |
|
"learning_rate": 4.839123789598829e-05, |
|
"loss": 0.868, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.5806423652725062, |
|
"learning_rate": 4.828699347315356e-05, |
|
"loss": 0.8445, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8596491228070176, |
|
"grad_norm": 0.6650303211642944, |
|
"learning_rate": 4.817959636416969e-05, |
|
"loss": 0.9439, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.8771929824561403, |
|
"grad_norm": 0.49618932027271617, |
|
"learning_rate": 4.806906110888606e-05, |
|
"loss": 0.676, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"grad_norm": 0.7201128419875511, |
|
"learning_rate": 4.7955402672006854e-05, |
|
"loss": 0.9129, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.9122807017543859, |
|
"grad_norm": 0.7979009332190922, |
|
"learning_rate": 4.783863644106502e-05, |
|
"loss": 0.8844, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.9298245614035088, |
|
"grad_norm": 0.5780879587849941, |
|
"learning_rate": 4.771877822433911e-05, |
|
"loss": 0.835, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 0.5125413888902927, |
|
"learning_rate": 4.759584424871302e-05, |
|
"loss": 0.7854, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9649122807017544, |
|
"grad_norm": 0.6493310136858548, |
|
"learning_rate": 4.7469851157479177e-05, |
|
"loss": 0.8246, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9824561403508771, |
|
"grad_norm": 0.5420670379906682, |
|
"learning_rate": 4.734081600808531e-05, |
|
"loss": 0.8357, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.8014271486806656, |
|
"learning_rate": 4.7208756269825104e-05, |
|
"loss": 0.7012, |
|
"step": 57 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 285, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 13124895178752.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|