|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 58, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03508771929824561, |
|
"grad_norm": 2.067214871055153, |
|
"learning_rate": 0.0, |
|
"loss": 0.866, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07017543859649122, |
|
"grad_norm": 2.2902974488934356, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 1.043, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 1.8959065809264741, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.9429, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 1.7531397239789308, |
|
"learning_rate": 1.2857142857142857e-05, |
|
"loss": 0.9852, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.17543859649122806, |
|
"grad_norm": 1.9134009040538502, |
|
"learning_rate": 1.7142857142857142e-05, |
|
"loss": 0.9898, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 2.4981051959996017, |
|
"learning_rate": 2.1428571428571428e-05, |
|
"loss": 0.9729, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.24561403508771928, |
|
"grad_norm": 2.1259191076410326, |
|
"learning_rate": 2.5714285714285714e-05, |
|
"loss": 0.9501, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2807017543859649, |
|
"grad_norm": 2.6083028457024127, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9957, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 2.2174618708678193, |
|
"learning_rate": 2.999581555818041e-05, |
|
"loss": 1.003, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3508771929824561, |
|
"grad_norm": 1.310512297596868, |
|
"learning_rate": 2.9983264567328756e-05, |
|
"loss": 0.9666, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38596491228070173, |
|
"grad_norm": 1.0417774774374975, |
|
"learning_rate": 2.9962354029963835e-05, |
|
"loss": 0.8532, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 1.096319575622801, |
|
"learning_rate": 2.9933095612609253e-05, |
|
"loss": 0.8722, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.45614035087719296, |
|
"grad_norm": 1.051828485887936, |
|
"learning_rate": 2.989550563928436e-05, |
|
"loss": 0.94, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.49122807017543857, |
|
"grad_norm": 0.7880955122292899, |
|
"learning_rate": 2.9849605082396678e-05, |
|
"loss": 0.8454, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 0.6655179120990646, |
|
"learning_rate": 2.9795419551040836e-05, |
|
"loss": 0.8716, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5614035087719298, |
|
"grad_norm": 0.6709188694271632, |
|
"learning_rate": 2.973297927671063e-05, |
|
"loss": 0.8177, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5964912280701754, |
|
"grad_norm": 0.7681555199843141, |
|
"learning_rate": 2.966231909643208e-05, |
|
"loss": 0.9286, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 0.6474330407996667, |
|
"learning_rate": 2.958347843332696e-05, |
|
"loss": 0.8274, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.5325702575478014, |
|
"learning_rate": 2.949650127461764e-05, |
|
"loss": 0.8929, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 0.5849013227799386, |
|
"learning_rate": 2.940143614708549e-05, |
|
"loss": 0.891, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 0.6549654089627338, |
|
"learning_rate": 2.9298336089996538e-05, |
|
"loss": 0.8568, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.7719298245614035, |
|
"grad_norm": 0.5258630198755742, |
|
"learning_rate": 2.9187258625509518e-05, |
|
"loss": 0.8109, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.8070175438596491, |
|
"grad_norm": 0.4998629934118094, |
|
"learning_rate": 2.906826572658278e-05, |
|
"loss": 0.875, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.42855717754389155, |
|
"learning_rate": 2.8941423782397987e-05, |
|
"loss": 0.8454, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.8771929824561403, |
|
"grad_norm": 0.50628059004859, |
|
"learning_rate": 2.8806803561319903e-05, |
|
"loss": 0.8334, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9122807017543859, |
|
"grad_norm": 0.5060863985075719, |
|
"learning_rate": 2.866448017141291e-05, |
|
"loss": 0.8989, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 0.8609761967398525, |
|
"learning_rate": 2.8514533018536286e-05, |
|
"loss": 0.7991, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.9824561403508771, |
|
"grad_norm": 0.48863408976007106, |
|
"learning_rate": 2.835704576204167e-05, |
|
"loss": 0.8282, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.48863408976007106, |
|
"learning_rate": 2.8192106268097336e-05, |
|
"loss": 0.7073, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.0350877192982457, |
|
"grad_norm": 0.9713567127839504, |
|
"learning_rate": 2.801980656066545e-05, |
|
"loss": 0.7714, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0701754385964912, |
|
"grad_norm": 0.4951617245737733, |
|
"learning_rate": 2.78402427701595e-05, |
|
"loss": 0.7343, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.1052631578947367, |
|
"grad_norm": 0.561308739752077, |
|
"learning_rate": 2.7653515079810744e-05, |
|
"loss": 0.7805, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.1403508771929824, |
|
"grad_norm": 0.6696065745196884, |
|
"learning_rate": 2.7459727669773344e-05, |
|
"loss": 0.7562, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.1754385964912282, |
|
"grad_norm": 0.574338497323505, |
|
"learning_rate": 2.725898865899967e-05, |
|
"loss": 0.7163, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.2105263157894737, |
|
"grad_norm": 0.5954448022407949, |
|
"learning_rate": 2.705141004491792e-05, |
|
"loss": 0.7716, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.2456140350877192, |
|
"grad_norm": 0.4473898865990269, |
|
"learning_rate": 2.6837107640945904e-05, |
|
"loss": 0.7742, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.280701754385965, |
|
"grad_norm": 0.7727798282627385, |
|
"learning_rate": 2.6616201011875792e-05, |
|
"loss": 0.744, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.3157894736842106, |
|
"grad_norm": 0.5121520923399718, |
|
"learning_rate": 2.638881340716583e-05, |
|
"loss": 0.7212, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.3508771929824561, |
|
"grad_norm": 0.4705537944247171, |
|
"learning_rate": 2.6155071692176348e-05, |
|
"loss": 0.674, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.3859649122807016, |
|
"grad_norm": 0.571084271601031, |
|
"learning_rate": 2.5915106277388293e-05, |
|
"loss": 0.7776, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.4210526315789473, |
|
"grad_norm": 0.5850344726559537, |
|
"learning_rate": 2.566905104564393e-05, |
|
"loss": 0.7177, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.456140350877193, |
|
"grad_norm": 0.4332588522858084, |
|
"learning_rate": 2.541704327745013e-05, |
|
"loss": 0.6776, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.4912280701754386, |
|
"grad_norm": 0.4393188232149638, |
|
"learning_rate": 2.5159223574386117e-05, |
|
"loss": 0.6675, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.526315789473684, |
|
"grad_norm": 0.4979789836021164, |
|
"learning_rate": 2.489573578065821e-05, |
|
"loss": 0.8005, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.5614035087719298, |
|
"grad_norm": 0.45732271681245434, |
|
"learning_rate": 2.4626726902845477e-05, |
|
"loss": 0.7579, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.5964912280701755, |
|
"grad_norm": 0.45795659974157954, |
|
"learning_rate": 2.4352347027881003e-05, |
|
"loss": 0.6847, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.631578947368421, |
|
"grad_norm": 0.3706866470068484, |
|
"learning_rate": 2.4072749239314565e-05, |
|
"loss": 0.6851, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 0.3822700465065106, |
|
"learning_rate": 2.3788089531903372e-05, |
|
"loss": 0.7357, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.7017543859649122, |
|
"grad_norm": 0.4388840978268458, |
|
"learning_rate": 2.3498526724578637e-05, |
|
"loss": 0.7545, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.736842105263158, |
|
"grad_norm": 0.5096281415627839, |
|
"learning_rate": 2.320422237183641e-05, |
|
"loss": 0.7449, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.7719298245614035, |
|
"grad_norm": 0.4149064055586781, |
|
"learning_rate": 2.2905340673602184e-05, |
|
"loss": 0.7481, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.807017543859649, |
|
"grad_norm": 0.45646999516691433, |
|
"learning_rate": 2.26020483836196e-05, |
|
"loss": 0.7732, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.8421052631578947, |
|
"grad_norm": 0.45834783579018146, |
|
"learning_rate": 2.229451471641422e-05, |
|
"loss": 0.6229, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.8771929824561404, |
|
"grad_norm": 0.47503100724025626, |
|
"learning_rate": 2.198291125288445e-05, |
|
"loss": 0.7608, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.912280701754386, |
|
"grad_norm": 0.4978417847863162, |
|
"learning_rate": 2.166741184457214e-05, |
|
"loss": 0.7096, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.9473684210526314, |
|
"grad_norm": 0.3815502550665043, |
|
"learning_rate": 2.1348192516666376e-05, |
|
"loss": 0.7613, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.9824561403508771, |
|
"grad_norm": 0.5283326744530167, |
|
"learning_rate": 2.1025431369794546e-05, |
|
"loss": 0.7659, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.7991429287166618, |
|
"learning_rate": 2.0699308480655397e-05, |
|
"loss": 0.6749, |
|
"step": 58 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 140, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 25934832402432.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|