|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9937106918238994, |
|
"eval_steps": 500, |
|
"global_step": 79, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012578616352201259, |
|
"grad_norm": 26.6982970213865, |
|
"learning_rate": 4.998023493068255e-06, |
|
"loss": 1.3146, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06289308176100629, |
|
"grad_norm": 3.1460504222727472, |
|
"learning_rate": 4.950743417011591e-06, |
|
"loss": 0.554, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12578616352201258, |
|
"grad_norm": 1.3494928101992825, |
|
"learning_rate": 4.804914636820517e-06, |
|
"loss": 0.2696, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18867924528301888, |
|
"grad_norm": 1.2056844525108215, |
|
"learning_rate": 4.568260081357644e-06, |
|
"loss": 0.2379, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.25157232704402516, |
|
"grad_norm": 1.1277496301496466, |
|
"learning_rate": 4.250105186423564e-06, |
|
"loss": 0.2051, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.31446540880503143, |
|
"grad_norm": 1.2877818137628803, |
|
"learning_rate": 3.862986930406669e-06, |
|
"loss": 0.1791, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.37735849056603776, |
|
"grad_norm": 1.4749165374704138, |
|
"learning_rate": 3.4221598113100196e-06, |
|
"loss": 0.1596, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.44025157232704404, |
|
"grad_norm": 1.146174412242993, |
|
"learning_rate": 2.9449947391938768e-06, |
|
"loss": 0.13, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5031446540880503, |
|
"grad_norm": 1.2096807547032962, |
|
"learning_rate": 2.4502945308373246e-06, |
|
"loss": 0.1285, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5660377358490566, |
|
"grad_norm": 1.028526765918339, |
|
"learning_rate": 1.957552979734205e-06, |
|
"loss": 0.1132, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6289308176100629, |
|
"grad_norm": 1.221137786110913, |
|
"learning_rate": 1.4861866979675155e-06, |
|
"loss": 0.1102, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6918238993710691, |
|
"grad_norm": 1.0978213686215132, |
|
"learning_rate": 1.0547699994378787e-06, |
|
"loss": 0.1011, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7547169811320755, |
|
"grad_norm": 0.912807545184827, |
|
"learning_rate": 6.803029740762648e-07, |
|
"loss": 0.0973, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8176100628930818, |
|
"grad_norm": 1.1228289473397595, |
|
"learning_rate": 3.7754159477158994e-07, |
|
"loss": 0.0907, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8805031446540881, |
|
"grad_norm": 1.079722393089992, |
|
"learning_rate": 1.584162543281806e-07, |
|
"loss": 0.0867, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9433962264150944, |
|
"grad_norm": 0.9543542794294788, |
|
"learning_rate": 3.15616451591666e-08, |
|
"loss": 0.0848, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9937106918238994, |
|
"step": 79, |
|
"total_flos": 10436299407360.0, |
|
"train_loss": 0.17545194791842111, |
|
"train_runtime": 544.5229, |
|
"train_samples_per_second": 4.668, |
|
"train_steps_per_second": 0.145 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 79, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 10436299407360.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|