|
{ |
|
"best_metric": 0.08294033469935763, |
|
"best_model_checkpoint": "/bartabsa-reproduce/outputs/gpt22gpt2_42_std_paper/checkpoint-8000", |
|
"epoch": 2.999832822513235, |
|
"eval_steps": 2000, |
|
"global_step": 26916, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05572582892170521, |
|
"grad_norm": 2.4936771392822266, |
|
"learning_rate": 2.5e-05, |
|
"loss": 3.555, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.11145165784341042, |
|
"grad_norm": 1.7875964641571045, |
|
"learning_rate": 5e-05, |
|
"loss": 2.9668, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.16717748676511562, |
|
"grad_norm": 1.4690954685211182, |
|
"learning_rate": 4.9035344960642076e-05, |
|
"loss": 2.8594, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.22290331568682084, |
|
"grad_norm": 1.5910253524780273, |
|
"learning_rate": 4.807068992128415e-05, |
|
"loss": 2.7391, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.22290331568682084, |
|
"eval_loss": 2.587376117706299, |
|
"eval_rouge1": 0.15890436461961194, |
|
"eval_rouge2": 0.026691787346904763, |
|
"eval_rougeL": 0.11299625068223401, |
|
"eval_rougeLsum": 0.15080434380335367, |
|
"eval_runtime": 6206.3629, |
|
"eval_samples_per_second": 2.154, |
|
"eval_steps_per_second": 0.135, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.27862914460852606, |
|
"grad_norm": 1.6178048849105835, |
|
"learning_rate": 4.7106034881926225e-05, |
|
"loss": 2.6739, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.33435497353023125, |
|
"grad_norm": 1.9126936197280884, |
|
"learning_rate": 4.61413798425683e-05, |
|
"loss": 2.6193, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.3900808024519365, |
|
"grad_norm": 1.5849372148513794, |
|
"learning_rate": 4.517672480321037e-05, |
|
"loss": 2.5813, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.4458066313736417, |
|
"grad_norm": 5.0478129386901855, |
|
"learning_rate": 4.421206976385245e-05, |
|
"loss": 2.5412, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.4458066313736417, |
|
"eval_loss": 2.4282822608947754, |
|
"eval_rouge1": 0.26692339384492103, |
|
"eval_rouge2": 0.06788970022541349, |
|
"eval_rougeL": 0.16057716127224064, |
|
"eval_rougeLsum": 0.24985170034027104, |
|
"eval_runtime": 6305.6599, |
|
"eval_samples_per_second": 2.12, |
|
"eval_steps_per_second": 0.133, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.5015324602953469, |
|
"grad_norm": 1.589734673500061, |
|
"learning_rate": 4.324741472449452e-05, |
|
"loss": 2.5082, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.5572582892170521, |
|
"grad_norm": 1.5139461755752563, |
|
"learning_rate": 4.2282759685136595e-05, |
|
"loss": 2.4859, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.6129841181387573, |
|
"grad_norm": 1.4664047956466675, |
|
"learning_rate": 4.131810464577867e-05, |
|
"loss": 2.4547, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.6687099470604625, |
|
"grad_norm": 1.7493504285812378, |
|
"learning_rate": 4.035344960642074e-05, |
|
"loss": 2.4358, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.6687099470604625, |
|
"eval_loss": 2.345005512237549, |
|
"eval_rouge1": 0.2730819963251183, |
|
"eval_rouge2": 0.07305508048248258, |
|
"eval_rougeL": 0.1637512659125704, |
|
"eval_rougeLsum": 0.25593027675446556, |
|
"eval_runtime": 6286.1193, |
|
"eval_samples_per_second": 2.127, |
|
"eval_steps_per_second": 0.133, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.7244357759821677, |
|
"grad_norm": 1.757111668586731, |
|
"learning_rate": 3.938879456706282e-05, |
|
"loss": 2.4158, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.780161604903873, |
|
"grad_norm": 1.46837317943573, |
|
"learning_rate": 3.84241395277049e-05, |
|
"loss": 2.3926, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.8358874338255782, |
|
"grad_norm": 1.4906083345413208, |
|
"learning_rate": 3.745948448834697e-05, |
|
"loss": 2.3803, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.8916132627472834, |
|
"grad_norm": 1.6719425916671753, |
|
"learning_rate": 3.6494829448989046e-05, |
|
"loss": 2.3592, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.8916132627472834, |
|
"eval_loss": 2.261608600616455, |
|
"eval_rouge1": 0.29601660868967444, |
|
"eval_rouge2": 0.08294033469935763, |
|
"eval_rougeL": 0.17290133105856215, |
|
"eval_rougeLsum": 0.2765445625043713, |
|
"eval_runtime": 6302.0707, |
|
"eval_samples_per_second": 2.121, |
|
"eval_steps_per_second": 0.133, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.9473390916689886, |
|
"grad_norm": 1.585368275642395, |
|
"learning_rate": 3.553017440963112e-05, |
|
"loss": 2.3381, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.0030649205906939, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.4565519370273194e-05, |
|
"loss": 2.7416, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.058790749512399, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.360086433091527e-05, |
|
"loss": 0.0, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.1145165784341042, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.263620929155734e-05, |
|
"loss": 0.0, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.1145165784341042, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5771.2576, |
|
"eval_samples_per_second": 2.316, |
|
"eval_steps_per_second": 0.145, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.1702424073558095, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.1671554252199416e-05, |
|
"loss": 0.0, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.2259682362775146, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.070689921284149e-05, |
|
"loss": 0.0, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.28169406519922, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9742244173483564e-05, |
|
"loss": 0.0, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.337419894120925, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8777589134125638e-05, |
|
"loss": 0.0, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.337419894120925, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5768.1931, |
|
"eval_samples_per_second": 2.318, |
|
"eval_steps_per_second": 0.145, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.3931457230426303, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7812934094767712e-05, |
|
"loss": 0.0, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.4488715519643356, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6848279055409786e-05, |
|
"loss": 0.0, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.5045973808860407, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.588362401605186e-05, |
|
"loss": 0.0, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.5603232098077457, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4918968976693934e-05, |
|
"loss": 0.0, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.5603232098077457, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5772.8437, |
|
"eval_samples_per_second": 2.316, |
|
"eval_steps_per_second": 0.145, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.616049038729451, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.3954313937336008e-05, |
|
"loss": 0.0, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.6717748676511563, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.2989658897978082e-05, |
|
"loss": 0.0, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.7275006965728616, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.2025003858620156e-05, |
|
"loss": 0.0, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.7832265254945667, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.1060348819262234e-05, |
|
"loss": 0.0, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.7832265254945667, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5773.9768, |
|
"eval_samples_per_second": 2.315, |
|
"eval_steps_per_second": 0.145, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.8389523544162718, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.0095693779904308e-05, |
|
"loss": 0.0, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.894678183337977, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9131038740546382e-05, |
|
"loss": 0.0, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.9504040122596824, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8166383701188456e-05, |
|
"loss": 0.0, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.0061298411813877, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.720172866183053e-05, |
|
"loss": 0.0, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.0061298411813877, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5771.1419, |
|
"eval_samples_per_second": 2.316, |
|
"eval_steps_per_second": 0.145, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.0618556701030926, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6237073622472604e-05, |
|
"loss": 0.0, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.117581499024798, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5272418583114678e-05, |
|
"loss": 0.0, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.173307327946503, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4307763543756752e-05, |
|
"loss": 0.0, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.2290331568682085, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3343108504398828e-05, |
|
"loss": 0.0, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.2290331568682085, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5768.4551, |
|
"eval_samples_per_second": 2.317, |
|
"eval_steps_per_second": 0.145, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.2847589857899138, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2378453465040902e-05, |
|
"loss": 0.0, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.340484814711619, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1413798425682977e-05, |
|
"loss": 0.0, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 2.396210643633324, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0449143386325052e-05, |
|
"loss": 0.0, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 2.4519364725550292, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.484488346967126e-06, |
|
"loss": 0.0, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 2.4519364725550292, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5770.7442, |
|
"eval_samples_per_second": 2.317, |
|
"eval_steps_per_second": 0.145, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 2.5076623014767345, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.5198333076092e-06, |
|
"loss": 0.0, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 2.56338813039844, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.5551782682512745e-06, |
|
"loss": 0.0, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 2.6191139593201447, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.5905232288933485e-06, |
|
"loss": 0.0, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 2.67483978824185, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.6258681895354226e-06, |
|
"loss": 0.0, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.67483978824185, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5767.1561, |
|
"eval_samples_per_second": 2.318, |
|
"eval_steps_per_second": 0.145, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.7305656171635553, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.661213150177497e-06, |
|
"loss": 0.0, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 2.7862914460852606, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6965581108195706e-06, |
|
"loss": 0.0, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.842017275006966, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7319030714616455e-06, |
|
"loss": 0.0, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 2.897743103928671, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7672480321037198e-06, |
|
"loss": 0.0, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 2.897743103928671, |
|
"eval_loss": NaN, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 5768.7671, |
|
"eval_samples_per_second": 2.317, |
|
"eval_steps_per_second": 0.145, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 2.953468932850376, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.025929927457941e-07, |
|
"loss": 0.0, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 2.999832822513235, |
|
"step": 26916, |
|
"total_flos": 4.3736503104621773e+18, |
|
"train_loss": 0.8739807110765321, |
|
"train_runtime": 102261.0562, |
|
"train_samples_per_second": 8.423, |
|
"train_steps_per_second": 0.263 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 26916, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 2000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.3736503104621773e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|