|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 1000, |
|
"global_step": 3528, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05668934240362812, |
|
"grad_norm": 0.3524502481986371, |
|
"learning_rate": 2.8328611898017e-06, |
|
"loss": 0.361, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11337868480725624, |
|
"grad_norm": 0.3997035804333986, |
|
"learning_rate": 5.6657223796034e-06, |
|
"loss": 0.3349, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.17006802721088435, |
|
"grad_norm": 0.38751841177102114, |
|
"learning_rate": 8.4985835694051e-06, |
|
"loss": 0.332, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22675736961451248, |
|
"grad_norm": 0.44836449253905825, |
|
"learning_rate": 9.994594078481824e-06, |
|
"loss": 0.3352, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2834467120181406, |
|
"grad_norm": 0.4021994062977158, |
|
"learning_rate": 9.947201553489385e-06, |
|
"loss": 0.3324, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3401360544217687, |
|
"grad_norm": 0.3947990104528409, |
|
"learning_rate": 9.851412130607201e-06, |
|
"loss": 0.3352, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3968253968253968, |
|
"grad_norm": 0.4273504082938427, |
|
"learning_rate": 9.70816288726541e-06, |
|
"loss": 0.3357, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.45351473922902497, |
|
"grad_norm": 0.3916187294777505, |
|
"learning_rate": 9.518855185209205e-06, |
|
"loss": 0.3386, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.5102040816326531, |
|
"grad_norm": 0.3417478841692644, |
|
"learning_rate": 9.285340961422978e-06, |
|
"loss": 0.3369, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.5668934240362812, |
|
"grad_norm": 0.36668292635175, |
|
"learning_rate": 9.009904611220361e-06, |
|
"loss": 0.3386, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5668934240362812, |
|
"eval_loss": 0.3266855776309967, |
|
"eval_runtime": 713.495, |
|
"eval_samples_per_second": 4.395, |
|
"eval_steps_per_second": 0.549, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.6235827664399093, |
|
"grad_norm": 0.49562486933370287, |
|
"learning_rate": 8.695240640732093e-06, |
|
"loss": 0.3351, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.6802721088435374, |
|
"grad_norm": 0.29570204380494114, |
|
"learning_rate": 8.344427307410323e-06, |
|
"loss": 0.3376, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.7369614512471655, |
|
"grad_norm": 0.4078184129717466, |
|
"learning_rate": 7.960896506415892e-06, |
|
"loss": 0.335, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.7936507936507936, |
|
"grad_norm": 0.3889595466840503, |
|
"learning_rate": 7.548400197480501e-06, |
|
"loss": 0.3347, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.8503401360544217, |
|
"grad_norm": 0.3485999541903875, |
|
"learning_rate": 7.110973700679067e-06, |
|
"loss": 0.3346, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9070294784580499, |
|
"grad_norm": 0.3508468605704029, |
|
"learning_rate": 6.652896220178041e-06, |
|
"loss": 0.3377, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.963718820861678, |
|
"grad_norm": 0.3277279426137875, |
|
"learning_rate": 6.178648982143307e-06, |
|
"loss": 0.3284, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.0204081632653061, |
|
"grad_norm": 0.3946171462399535, |
|
"learning_rate": 5.692871396331168e-06, |
|
"loss": 0.3144, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.0770975056689343, |
|
"grad_norm": 0.36094113773422365, |
|
"learning_rate": 5.2003156702196275e-06, |
|
"loss": 0.276, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.1337868480725624, |
|
"grad_norm": 0.38888742960958356, |
|
"learning_rate": 4.705800319675462e-06, |
|
"loss": 0.2706, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.1337868480725624, |
|
"eval_loss": 0.3267841041088104, |
|
"eval_runtime": 713.4781, |
|
"eval_samples_per_second": 4.395, |
|
"eval_steps_per_second": 0.549, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 0.3794229011992927, |
|
"learning_rate": 4.214163030947442e-06, |
|
"loss": 0.2785, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.2471655328798186, |
|
"grad_norm": 0.37402259299734353, |
|
"learning_rate": 3.730213335121796e-06, |
|
"loss": 0.2756, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.3038548752834467, |
|
"grad_norm": 0.3529299203177358, |
|
"learning_rate": 3.2586855580106876e-06, |
|
"loss": 0.2774, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.3605442176870748, |
|
"grad_norm": 0.3225567379873416, |
|
"learning_rate": 2.8041925057499253e-06, |
|
"loss": 0.2741, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.417233560090703, |
|
"grad_norm": 0.3853580675773505, |
|
"learning_rate": 2.371180339184999e-06, |
|
"loss": 0.2697, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.473922902494331, |
|
"grad_norm": 0.37229687058731975, |
|
"learning_rate": 1.963885078494927e-06, |
|
"loss": 0.2683, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.5306122448979593, |
|
"grad_norm": 0.38174797365201735, |
|
"learning_rate": 1.5862911635553614e-06, |
|
"loss": 0.2728, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.5873015873015874, |
|
"grad_norm": 0.43914992324605495, |
|
"learning_rate": 1.2420924754316915e-06, |
|
"loss": 0.2756, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.6439909297052155, |
|
"grad_norm": 0.3851735442216275, |
|
"learning_rate": 9.346562003165193e-07, |
|
"loss": 0.271, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.7006802721088436, |
|
"grad_norm": 0.463016547902216, |
|
"learning_rate": 6.669898894190413e-07, |
|
"loss": 0.2689, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.7006802721088436, |
|
"eval_loss": 0.32290545105934143, |
|
"eval_runtime": 713.868, |
|
"eval_samples_per_second": 4.393, |
|
"eval_steps_per_second": 0.549, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.7573696145124718, |
|
"grad_norm": 0.38528285238831594, |
|
"learning_rate": 4.417120370490169e-07, |
|
"loss": 0.2701, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.8140589569160999, |
|
"grad_norm": 0.4044768062755781, |
|
"learning_rate": 2.6102646472050463e-07, |
|
"loss": 0.2683, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.870748299319728, |
|
"grad_norm": 0.36435819425940585, |
|
"learning_rate": 1.2670076186756973e-07, |
|
"loss": 0.2732, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.927437641723356, |
|
"grad_norm": 0.38843363773407674, |
|
"learning_rate": 4.004899407954621e-08, |
|
"loss": 0.27, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.9841269841269842, |
|
"grad_norm": 0.4701515632923394, |
|
"learning_rate": 1.9188480157023014e-09, |
|
"loss": 0.2657, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 3528, |
|
"total_flos": 2304752043950080.0, |
|
"train_loss": 0.30443412133084946, |
|
"train_runtime": 78479.1521, |
|
"train_samples_per_second": 0.719, |
|
"train_steps_per_second": 0.045 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 3528, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2304752043950080.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|