|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 35, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.028985507246376812, |
|
"grad_norm": 11.274454167048727, |
|
"learning_rate": 0.0, |
|
"loss": 0.6357, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.057971014492753624, |
|
"grad_norm": 10.143348640958312, |
|
"learning_rate": 9.090909090909091e-07, |
|
"loss": 0.6666, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.08695652173913043, |
|
"grad_norm": 9.086409087474683, |
|
"learning_rate": 1.8181818181818183e-06, |
|
"loss": 0.6451, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.11594202898550725, |
|
"grad_norm": 8.70666602748876, |
|
"learning_rate": 2.7272727272727272e-06, |
|
"loss": 0.6048, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.14492753623188406, |
|
"grad_norm": 8.976482480167713, |
|
"learning_rate": 3.6363636363636366e-06, |
|
"loss": 0.5937, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.17391304347826086, |
|
"grad_norm": 6.543196052759934, |
|
"learning_rate": 4.5454545454545455e-06, |
|
"loss": 0.4943, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.2028985507246377, |
|
"grad_norm": 7.511256155710179, |
|
"learning_rate": 5.4545454545454545e-06, |
|
"loss": 0.4247, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2318840579710145, |
|
"grad_norm": 3.274069972289533, |
|
"learning_rate": 6.363636363636364e-06, |
|
"loss": 0.302, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2608695652173913, |
|
"grad_norm": 5.507527578193957, |
|
"learning_rate": 7.272727272727273e-06, |
|
"loss": 0.2751, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2898550724637681, |
|
"grad_norm": 3.651481150079746, |
|
"learning_rate": 8.181818181818183e-06, |
|
"loss": 0.2311, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3188405797101449, |
|
"grad_norm": 2.9570009811876314, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.2133, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.34782608695652173, |
|
"grad_norm": 2.5680951348234355, |
|
"learning_rate": 1e-05, |
|
"loss": 0.2021, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.37681159420289856, |
|
"grad_norm": 2.4726534615132394, |
|
"learning_rate": 9.997020702755353e-06, |
|
"loss": 0.2086, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.4057971014492754, |
|
"grad_norm": 2.0168121750509207, |
|
"learning_rate": 9.98808636150624e-06, |
|
"loss": 0.1844, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.43478260869565216, |
|
"grad_norm": 1.988964439347895, |
|
"learning_rate": 9.973207623475964e-06, |
|
"loss": 0.1561, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.463768115942029, |
|
"grad_norm": 1.703404936865681, |
|
"learning_rate": 9.952402219937817e-06, |
|
"loss": 0.1561, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.4927536231884058, |
|
"grad_norm": 1.4224019007386586, |
|
"learning_rate": 9.925694945084369e-06, |
|
"loss": 0.1359, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.5217391304347826, |
|
"grad_norm": 1.390079768168245, |
|
"learning_rate": 9.893117626479778e-06, |
|
"loss": 0.1422, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.5507246376811594, |
|
"grad_norm": 1.363268530198362, |
|
"learning_rate": 9.854709087130261e-06, |
|
"loss": 0.1448, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5797101449275363, |
|
"grad_norm": 1.2167548650541202, |
|
"learning_rate": 9.810515099218004e-06, |
|
"loss": 0.1276, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.6086956521739131, |
|
"grad_norm": 1.2149476390607494, |
|
"learning_rate": 9.76058832955357e-06, |
|
"loss": 0.118, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.6376811594202898, |
|
"grad_norm": 1.1518569512633139, |
|
"learning_rate": 9.704988276811883e-06, |
|
"loss": 0.1088, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 1.1392991979040483, |
|
"learning_rate": 9.643781200626512e-06, |
|
"loss": 0.1287, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.6956521739130435, |
|
"grad_norm": 1.2433820634735937, |
|
"learning_rate": 9.577040042626832e-06, |
|
"loss": 0.1186, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.7246376811594203, |
|
"grad_norm": 1.230070806896417, |
|
"learning_rate": 9.504844339512096e-06, |
|
"loss": 0.1212, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.7536231884057971, |
|
"grad_norm": 1.276827967155928, |
|
"learning_rate": 9.427280128266049e-06, |
|
"loss": 0.1173, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.782608695652174, |
|
"grad_norm": 1.1539744752759111, |
|
"learning_rate": 9.344439843625034e-06, |
|
"loss": 0.1145, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.8115942028985508, |
|
"grad_norm": 1.101305700093271, |
|
"learning_rate": 9.256422207921757e-06, |
|
"loss": 0.1246, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.8405797101449275, |
|
"grad_norm": 1.0720557642935318, |
|
"learning_rate": 9.163332113436031e-06, |
|
"loss": 0.118, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.8695652173913043, |
|
"grad_norm": 1.0709210914227136, |
|
"learning_rate": 9.065280497392663e-06, |
|
"loss": 0.1097, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8985507246376812, |
|
"grad_norm": 1.3387140042516312, |
|
"learning_rate": 8.962384209755453e-06, |
|
"loss": 0.119, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.927536231884058, |
|
"grad_norm": 1.2000636204494934, |
|
"learning_rate": 8.854765873974898e-06, |
|
"loss": 0.1103, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.9565217391304348, |
|
"grad_norm": 1.1660635680582712, |
|
"learning_rate": 8.742553740855507e-06, |
|
"loss": 0.1168, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.9855072463768116, |
|
"grad_norm": 1.0295558925452102, |
|
"learning_rate": 8.625881535716883e-06, |
|
"loss": 0.1047, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.0295558925452102, |
|
"learning_rate": 8.504888299030748e-06, |
|
"loss": 0.0989, |
|
"step": 35 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 102, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3487724470272.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|