|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 200, |
|
"global_step": 3330, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.6006006006006006, |
|
"grad_norm": 10.51630973815918, |
|
"learning_rate": 1.1951951951951951e-05, |
|
"loss": 2.6385, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6006006006006006, |
|
"eval_train_loss": 2.488961696624756, |
|
"eval_train_runtime": 15.8109, |
|
"eval_train_samples_per_second": 338.943, |
|
"eval_train_steps_per_second": 10.626, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.2012012012012012, |
|
"grad_norm": 9.947551727294922, |
|
"learning_rate": 1.9559559559559563e-05, |
|
"loss": 2.3324, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2012012012012012, |
|
"eval_train_loss": 2.419891119003296, |
|
"eval_train_runtime": 15.4172, |
|
"eval_train_samples_per_second": 347.6, |
|
"eval_train_steps_per_second": 10.897, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.8018018018018018, |
|
"grad_norm": 9.27906322479248, |
|
"learning_rate": 1.8224891558224895e-05, |
|
"loss": 2.1772, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.8018018018018018, |
|
"eval_train_loss": 2.389059543609619, |
|
"eval_train_runtime": 15.629, |
|
"eval_train_samples_per_second": 342.889, |
|
"eval_train_steps_per_second": 10.749, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.4024024024024024, |
|
"grad_norm": 8.641522407531738, |
|
"learning_rate": 1.6890223556890223e-05, |
|
"loss": 2.0635, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.4024024024024024, |
|
"eval_train_loss": 2.369140148162842, |
|
"eval_train_runtime": 15.6802, |
|
"eval_train_samples_per_second": 341.768, |
|
"eval_train_steps_per_second": 10.714, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.003003003003003, |
|
"grad_norm": 9.14696216583252, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 1.9915, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.003003003003003, |
|
"eval_train_loss": 2.360917806625366, |
|
"eval_train_runtime": 15.1166, |
|
"eval_train_samples_per_second": 354.511, |
|
"eval_train_steps_per_second": 11.114, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.6036036036036037, |
|
"grad_norm": 8.530783653259277, |
|
"learning_rate": 1.4220887554220888e-05, |
|
"loss": 1.9008, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.6036036036036037, |
|
"eval_train_loss": 2.3688642978668213, |
|
"eval_train_runtime": 15.4199, |
|
"eval_train_samples_per_second": 347.539, |
|
"eval_train_steps_per_second": 10.895, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 4.2042042042042045, |
|
"grad_norm": 10.91430377960205, |
|
"learning_rate": 1.288621955288622e-05, |
|
"loss": 1.8603, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 4.2042042042042045, |
|
"eval_train_loss": 2.3849704265594482, |
|
"eval_train_runtime": 15.4842, |
|
"eval_train_samples_per_second": 346.094, |
|
"eval_train_steps_per_second": 10.85, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 4.804804804804805, |
|
"grad_norm": 7.67103910446167, |
|
"learning_rate": 1.1551551551551552e-05, |
|
"loss": 1.8421, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.804804804804805, |
|
"eval_train_loss": 2.3467628955841064, |
|
"eval_train_runtime": 15.716, |
|
"eval_train_samples_per_second": 340.99, |
|
"eval_train_steps_per_second": 10.69, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 5.405405405405405, |
|
"grad_norm": 10.279878616333008, |
|
"learning_rate": 1.0216883550216886e-05, |
|
"loss": 1.785, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 5.405405405405405, |
|
"eval_train_loss": 2.3648931980133057, |
|
"eval_train_runtime": 15.8675, |
|
"eval_train_samples_per_second": 337.735, |
|
"eval_train_steps_per_second": 10.588, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 6.006006006006006, |
|
"grad_norm": 9.513252258300781, |
|
"learning_rate": 8.882215548882216e-06, |
|
"loss": 1.786, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.006006006006006, |
|
"eval_train_loss": 2.378257989883423, |
|
"eval_train_runtime": 16.2336, |
|
"eval_train_samples_per_second": 330.118, |
|
"eval_train_steps_per_second": 10.349, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.606606606606607, |
|
"grad_norm": 11.29470157623291, |
|
"learning_rate": 7.547547547547548e-06, |
|
"loss": 1.7331, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 6.606606606606607, |
|
"eval_train_loss": 2.3782169818878174, |
|
"eval_train_runtime": 15.9451, |
|
"eval_train_samples_per_second": 336.091, |
|
"eval_train_steps_per_second": 10.536, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 7.207207207207207, |
|
"grad_norm": 10.022029876708984, |
|
"learning_rate": 6.21287954621288e-06, |
|
"loss": 1.7062, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 7.207207207207207, |
|
"eval_train_loss": 2.3825714588165283, |
|
"eval_train_runtime": 15.9203, |
|
"eval_train_samples_per_second": 336.615, |
|
"eval_train_steps_per_second": 10.553, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 7.807807807807808, |
|
"grad_norm": 10.572230339050293, |
|
"learning_rate": 4.878211544878212e-06, |
|
"loss": 1.6929, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 7.807807807807808, |
|
"eval_train_loss": 2.3926050662994385, |
|
"eval_train_runtime": 15.9657, |
|
"eval_train_samples_per_second": 335.657, |
|
"eval_train_steps_per_second": 10.523, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 8.408408408408409, |
|
"grad_norm": 10.39648723602295, |
|
"learning_rate": 3.5435435435435437e-06, |
|
"loss": 1.6618, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 8.408408408408409, |
|
"eval_train_loss": 2.4069101810455322, |
|
"eval_train_runtime": 15.7461, |
|
"eval_train_samples_per_second": 340.337, |
|
"eval_train_steps_per_second": 10.669, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 9.00900900900901, |
|
"grad_norm": 12.46285343170166, |
|
"learning_rate": 2.2088755422088755e-06, |
|
"loss": 1.6348, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.00900900900901, |
|
"eval_train_loss": 2.415452480316162, |
|
"eval_train_runtime": 15.6461, |
|
"eval_train_samples_per_second": 342.513, |
|
"eval_train_steps_per_second": 10.737, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.60960960960961, |
|
"grad_norm": 11.425995826721191, |
|
"learning_rate": 8.742075408742076e-07, |
|
"loss": 1.6553, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 9.60960960960961, |
|
"eval_train_loss": 2.40596079826355, |
|
"eval_train_runtime": 15.9222, |
|
"eval_train_samples_per_second": 336.573, |
|
"eval_train_steps_per_second": 10.551, |
|
"step": 3200 |
|
} |
|
], |
|
"logging_steps": 200, |
|
"max_steps": 3330, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 2000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|