|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.943714821763603, |
|
"global_step": 26500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.628378378378378e-07, |
|
"loss": 10.819, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 9.253003003003003e-07, |
|
"loss": 10.816, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 8.877627627627627e-07, |
|
"loss": 10.8125, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 8.502252252252253e-07, |
|
"loss": 10.8092, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 8.126876876876877e-07, |
|
"loss": 10.8057, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 7.751501501501501e-07, |
|
"loss": 10.8023, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.376126126126125e-07, |
|
"loss": 10.7993, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 7.00075075075075e-07, |
|
"loss": 10.7962, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 6.625375375375375e-07, |
|
"loss": 10.7936, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 6.249999999999999e-07, |
|
"loss": 10.7913, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 5.874624624624625e-07, |
|
"loss": 10.7887, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 5.499249249249249e-07, |
|
"loss": 10.7866, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 5.123873873873874e-07, |
|
"loss": 10.7845, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 4.7484984984984984e-07, |
|
"loss": 10.7826, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 4.373123123123123e-07, |
|
"loss": 10.7809, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 3.997747747747748e-07, |
|
"loss": 10.7793, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 3.6223723723723723e-07, |
|
"loss": 10.7777, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 3.246996996996997e-07, |
|
"loss": 10.7764, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 2.871621621621622e-07, |
|
"loss": 10.7754, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 2.496246246246246e-07, |
|
"loss": 10.7741, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 7.88, |
|
"learning_rate": 2.120870870870871e-07, |
|
"loss": 10.7732, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 1.7454954954954954e-07, |
|
"loss": 10.7725, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 1.37012012012012e-07, |
|
"loss": 10.7719, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 9.947447447447447e-08, |
|
"loss": 10.7713, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 6.193693693693693e-08, |
|
"loss": 10.7709, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 2.4399399399399398e-08, |
|
"loss": 10.7708, |
|
"step": 26000 |
|
} |
|
], |
|
"max_steps": 26650, |
|
"num_train_epochs": 10, |
|
"total_flos": 98983723008.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|