|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.166666666666667, |
|
"eval_steps": 100, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.20833333333333334, |
|
"grad_norm": 14.965083122253418, |
|
"learning_rate": 4.991638098272951e-05, |
|
"loss": 4.5817, |
|
"num_input_tokens_seen": 70400, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.4166666666666667, |
|
"grad_norm": 1.2255456447601318, |
|
"learning_rate": 4.966608330212198e-05, |
|
"loss": 0.4906, |
|
"num_input_tokens_seen": 142296, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 0.9692479372024536, |
|
"learning_rate": 4.9250781329863606e-05, |
|
"loss": 0.5919, |
|
"num_input_tokens_seen": 215224, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 1.2537702322006226, |
|
"learning_rate": 4.867325323737765e-05, |
|
"loss": 0.3977, |
|
"num_input_tokens_seen": 285568, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0416666666666667, |
|
"grad_norm": 5.391773700714111, |
|
"learning_rate": 4.793736241118728e-05, |
|
"loss": 0.4036, |
|
"num_input_tokens_seen": 357968, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.7211081981658936, |
|
"learning_rate": 4.7048031608708876e-05, |
|
"loss": 0.3966, |
|
"num_input_tokens_seen": 428848, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.4583333333333333, |
|
"grad_norm": 1.8750863075256348, |
|
"learning_rate": 4.601121002736095e-05, |
|
"loss": 0.3386, |
|
"num_input_tokens_seen": 500536, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 3.4102139472961426, |
|
"learning_rate": 4.4833833507280884e-05, |
|
"loss": 0.3656, |
|
"num_input_tokens_seen": 572648, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 2.648550510406494, |
|
"learning_rate": 4.352377813387398e-05, |
|
"loss": 0.3556, |
|
"num_input_tokens_seen": 643624, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.0833333333333335, |
|
"grad_norm": 0.8133513331413269, |
|
"learning_rate": 4.208980755057178e-05, |
|
"loss": 0.2927, |
|
"num_input_tokens_seen": 715504, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.2916666666666665, |
|
"grad_norm": 2.6871180534362793, |
|
"learning_rate": 4.054151433425194e-05, |
|
"loss": 0.2644, |
|
"num_input_tokens_seen": 786328, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 1.71990966796875, |
|
"learning_rate": 3.888925582549006e-05, |
|
"loss": 0.2624, |
|
"num_input_tokens_seen": 856784, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.7083333333333335, |
|
"grad_norm": 1.2586452960968018, |
|
"learning_rate": 3.7144084842908505e-05, |
|
"loss": 0.2458, |
|
"num_input_tokens_seen": 929768, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.9166666666666665, |
|
"grad_norm": 1.3015620708465576, |
|
"learning_rate": 3.5317675745109866e-05, |
|
"loss": 0.2809, |
|
"num_input_tokens_seen": 1001312, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.125, |
|
"grad_norm": 1.136038899421692, |
|
"learning_rate": 3.34222463348055e-05, |
|
"loss": 0.2873, |
|
"num_input_tokens_seen": 1074016, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 1.0206663608551025, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 0.2201, |
|
"num_input_tokens_seen": 1144184, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.5416666666666665, |
|
"grad_norm": 1.0465854406356812, |
|
"learning_rate": 2.9475421531915827e-05, |
|
"loss": 0.1891, |
|
"num_input_tokens_seen": 1215680, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 2.669116497039795, |
|
"learning_rate": 2.7450428508239024e-05, |
|
"loss": 0.1966, |
|
"num_input_tokens_seen": 1287848, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.9583333333333335, |
|
"grad_norm": 2.8238487243652344, |
|
"learning_rate": 2.5409043290662173e-05, |
|
"loss": 0.1773, |
|
"num_input_tokens_seen": 1359184, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 4.166666666666667, |
|
"grad_norm": 4.206888198852539, |
|
"learning_rate": 2.3364921769246423e-05, |
|
"loss": 0.1521, |
|
"num_input_tokens_seen": 1431816, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.166666666666667, |
|
"eval_loss": 0.3628197908401489, |
|
"eval_runtime": 109.019, |
|
"eval_samples_per_second": 0.881, |
|
"eval_steps_per_second": 0.44, |
|
"num_input_tokens_seen": 1431816, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 192, |
|
"num_input_tokens_seen": 1431816, |
|
"num_train_epochs": 8, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 101273320292352.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|