GaetanMichelet's picture
Model save
7fcf6bb verified
{
"best_metric": 1.0298420190811157,
"best_model_checkpoint": "data/Llama-31-8B_task-2_120-samples_config-1_full/checkpoint-88",
"epoch": 15.0,
"eval_steps": 500,
"global_step": 165,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09090909090909091,
"grad_norm": 0.49713072180747986,
"learning_rate": 1.818181818181818e-06,
"loss": 1.5048,
"step": 1
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.6580377817153931,
"learning_rate": 3.636363636363636e-06,
"loss": 1.7142,
"step": 2
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.4897933304309845,
"learning_rate": 7.272727272727272e-06,
"loss": 1.5422,
"step": 4
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.507178783416748,
"learning_rate": 1.0909090909090909e-05,
"loss": 1.5741,
"step": 6
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.48809972405433655,
"learning_rate": 1.4545454545454545e-05,
"loss": 1.6288,
"step": 8
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.4498646557331085,
"learning_rate": 1.8181818181818182e-05,
"loss": 1.5352,
"step": 10
},
{
"epoch": 1.0,
"eval_loss": 1.53848397731781,
"eval_runtime": 24.3714,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 11
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.5035683512687683,
"learning_rate": 2.1818181818181818e-05,
"loss": 1.5785,
"step": 12
},
{
"epoch": 1.2727272727272727,
"grad_norm": 0.37113550305366516,
"learning_rate": 2.5454545454545454e-05,
"loss": 1.4958,
"step": 14
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.470533162355423,
"learning_rate": 2.909090909090909e-05,
"loss": 1.492,
"step": 16
},
{
"epoch": 1.6363636363636362,
"grad_norm": 0.3534870445728302,
"learning_rate": 3.272727272727273e-05,
"loss": 1.5078,
"step": 18
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.37165534496307373,
"learning_rate": 3.6363636363636364e-05,
"loss": 1.4209,
"step": 20
},
{
"epoch": 2.0,
"grad_norm": 0.2557329833507538,
"learning_rate": 4e-05,
"loss": 1.3167,
"step": 22
},
{
"epoch": 2.0,
"eval_loss": 1.3841285705566406,
"eval_runtime": 24.3704,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 22
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.225205197930336,
"learning_rate": 4.3636363636363636e-05,
"loss": 1.3669,
"step": 24
},
{
"epoch": 2.3636363636363638,
"grad_norm": 0.2652900218963623,
"learning_rate": 4.7272727272727275e-05,
"loss": 1.3475,
"step": 26
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.23897986114025116,
"learning_rate": 5.090909090909091e-05,
"loss": 1.2816,
"step": 28
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.2523474395275116,
"learning_rate": 5.4545454545454546e-05,
"loss": 1.2912,
"step": 30
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.2484261691570282,
"learning_rate": 5.818181818181818e-05,
"loss": 1.226,
"step": 32
},
{
"epoch": 3.0,
"eval_loss": 1.2298388481140137,
"eval_runtime": 24.3715,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 33
},
{
"epoch": 3.090909090909091,
"grad_norm": 0.2355325073003769,
"learning_rate": 6.181818181818182e-05,
"loss": 1.2254,
"step": 34
},
{
"epoch": 3.2727272727272725,
"grad_norm": 0.23926451802253723,
"learning_rate": 6.545454545454546e-05,
"loss": 1.1508,
"step": 36
},
{
"epoch": 3.4545454545454546,
"grad_norm": 0.36814334988594055,
"learning_rate": 6.90909090909091e-05,
"loss": 1.142,
"step": 38
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.25748685002326965,
"learning_rate": 7.272727272727273e-05,
"loss": 1.0562,
"step": 40
},
{
"epoch": 3.8181818181818183,
"grad_norm": 0.4333396852016449,
"learning_rate": 7.636363636363637e-05,
"loss": 1.0066,
"step": 42
},
{
"epoch": 4.0,
"grad_norm": 0.16409271955490112,
"learning_rate": 8e-05,
"loss": 1.1287,
"step": 44
},
{
"epoch": 4.0,
"eval_loss": 1.0978490114212036,
"eval_runtime": 24.3666,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 44
},
{
"epoch": 4.181818181818182,
"grad_norm": 0.16672073304653168,
"learning_rate": 8.363636363636364e-05,
"loss": 1.0102,
"step": 46
},
{
"epoch": 4.363636363636363,
"grad_norm": 0.16206879913806915,
"learning_rate": 8.727272727272727e-05,
"loss": 1.0474,
"step": 48
},
{
"epoch": 4.545454545454545,
"grad_norm": 0.1528177112340927,
"learning_rate": 9.090909090909092e-05,
"loss": 1.0141,
"step": 50
},
{
"epoch": 4.7272727272727275,
"grad_norm": 0.1910155862569809,
"learning_rate": 9.454545454545455e-05,
"loss": 1.0145,
"step": 52
},
{
"epoch": 4.909090909090909,
"grad_norm": 0.1659548580646515,
"learning_rate": 9.818181818181818e-05,
"loss": 1.06,
"step": 54
},
{
"epoch": 5.0,
"eval_loss": 1.0666165351867676,
"eval_runtime": 24.3738,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 55
},
{
"epoch": 5.090909090909091,
"grad_norm": 0.16574902832508087,
"learning_rate": 9.999899300364532e-05,
"loss": 0.9889,
"step": 56
},
{
"epoch": 5.2727272727272725,
"grad_norm": 0.18105646967887878,
"learning_rate": 9.99909372761763e-05,
"loss": 1.0223,
"step": 58
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.176856130361557,
"learning_rate": 9.997482711915927e-05,
"loss": 0.9484,
"step": 60
},
{
"epoch": 5.636363636363637,
"grad_norm": 0.18571056425571442,
"learning_rate": 9.99506651282272e-05,
"loss": 0.9777,
"step": 62
},
{
"epoch": 5.818181818181818,
"grad_norm": 0.18585607409477234,
"learning_rate": 9.991845519630678e-05,
"loss": 0.9662,
"step": 64
},
{
"epoch": 6.0,
"grad_norm": 0.1818024069070816,
"learning_rate": 9.987820251299122e-05,
"loss": 1.019,
"step": 66
},
{
"epoch": 6.0,
"eval_loss": 1.0471864938735962,
"eval_runtime": 24.3649,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 66
},
{
"epoch": 6.181818181818182,
"grad_norm": 0.2027784138917923,
"learning_rate": 9.982991356370404e-05,
"loss": 0.9194,
"step": 68
},
{
"epoch": 6.363636363636363,
"grad_norm": 0.2038961946964264,
"learning_rate": 9.977359612865423e-05,
"loss": 0.9722,
"step": 70
},
{
"epoch": 6.545454545454545,
"grad_norm": 0.19705639779567719,
"learning_rate": 9.970925928158274e-05,
"loss": 0.8931,
"step": 72
},
{
"epoch": 6.7272727272727275,
"grad_norm": 0.244102343916893,
"learning_rate": 9.963691338830044e-05,
"loss": 0.9479,
"step": 74
},
{
"epoch": 6.909090909090909,
"grad_norm": 0.18375569581985474,
"learning_rate": 9.955657010501806e-05,
"loss": 0.9816,
"step": 76
},
{
"epoch": 7.0,
"eval_loss": 1.0346657037734985,
"eval_runtime": 24.3789,
"eval_samples_per_second": 0.984,
"eval_steps_per_second": 0.984,
"step": 77
},
{
"epoch": 7.090909090909091,
"grad_norm": 0.1986258625984192,
"learning_rate": 9.946824237646824e-05,
"loss": 1.0092,
"step": 78
},
{
"epoch": 7.2727272727272725,
"grad_norm": 0.23222436010837555,
"learning_rate": 9.937194443381972e-05,
"loss": 0.8659,
"step": 80
},
{
"epoch": 7.454545454545454,
"grad_norm": 0.24806098639965057,
"learning_rate": 9.926769179238466e-05,
"loss": 0.8708,
"step": 82
},
{
"epoch": 7.636363636363637,
"grad_norm": 0.24764443933963776,
"learning_rate": 9.915550124911866e-05,
"loss": 0.9041,
"step": 84
},
{
"epoch": 7.818181818181818,
"grad_norm": 0.268614798784256,
"learning_rate": 9.903539087991462e-05,
"loss": 0.8901,
"step": 86
},
{
"epoch": 8.0,
"grad_norm": 0.26300254464149475,
"learning_rate": 9.890738003669029e-05,
"loss": 0.9461,
"step": 88
},
{
"epoch": 8.0,
"eval_loss": 1.0298420190811157,
"eval_runtime": 24.3698,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 88
},
{
"epoch": 8.181818181818182,
"grad_norm": 0.25266140699386597,
"learning_rate": 9.877148934427037e-05,
"loss": 0.8964,
"step": 90
},
{
"epoch": 8.363636363636363,
"grad_norm": 1.057136058807373,
"learning_rate": 9.862774069706346e-05,
"loss": 0.8322,
"step": 92
},
{
"epoch": 8.545454545454545,
"grad_norm": 0.33643069863319397,
"learning_rate": 9.847615725553456e-05,
"loss": 0.8889,
"step": 94
},
{
"epoch": 8.727272727272727,
"grad_norm": 0.30312344431877136,
"learning_rate": 9.831676344247342e-05,
"loss": 0.789,
"step": 96
},
{
"epoch": 8.909090909090908,
"grad_norm": 0.3003259301185608,
"learning_rate": 9.814958493905963e-05,
"loss": 0.8633,
"step": 98
},
{
"epoch": 9.0,
"eval_loss": 1.0388423204421997,
"eval_runtime": 24.3676,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 99
},
{
"epoch": 9.090909090909092,
"grad_norm": 0.3024749159812927,
"learning_rate": 9.797464868072488e-05,
"loss": 0.873,
"step": 100
},
{
"epoch": 9.272727272727273,
"grad_norm": 0.3733336627483368,
"learning_rate": 9.779198285281325e-05,
"loss": 0.7714,
"step": 102
},
{
"epoch": 9.454545454545455,
"grad_norm": 0.420226126909256,
"learning_rate": 9.760161688604008e-05,
"loss": 0.7677,
"step": 104
},
{
"epoch": 9.636363636363637,
"grad_norm": 0.3949902653694153,
"learning_rate": 9.740358145174998e-05,
"loss": 0.8519,
"step": 106
},
{
"epoch": 9.818181818181818,
"grad_norm": 0.41283905506134033,
"learning_rate": 9.719790845697533e-05,
"loss": 0.7942,
"step": 108
},
{
"epoch": 10.0,
"grad_norm": 0.4529339075088501,
"learning_rate": 9.698463103929542e-05,
"loss": 0.7599,
"step": 110
},
{
"epoch": 10.0,
"eval_loss": 1.059997797012329,
"eval_runtime": 24.3707,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 110
},
{
"epoch": 10.181818181818182,
"grad_norm": 0.4940122663974762,
"learning_rate": 9.676378356149734e-05,
"loss": 0.7012,
"step": 112
},
{
"epoch": 10.363636363636363,
"grad_norm": 0.4954428970813751,
"learning_rate": 9.653540160603956e-05,
"loss": 0.7028,
"step": 114
},
{
"epoch": 10.545454545454545,
"grad_norm": 0.49719637632369995,
"learning_rate": 9.629952196931901e-05,
"loss": 0.711,
"step": 116
},
{
"epoch": 10.727272727272727,
"grad_norm": 0.6597090363502502,
"learning_rate": 9.60561826557425e-05,
"loss": 0.6915,
"step": 118
},
{
"epoch": 10.909090909090908,
"grad_norm": 0.5840014219284058,
"learning_rate": 9.580542287160348e-05,
"loss": 0.7543,
"step": 120
},
{
"epoch": 11.0,
"eval_loss": 1.1064456701278687,
"eval_runtime": 24.3667,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 121
},
{
"epoch": 11.090909090909092,
"grad_norm": 0.6744766235351562,
"learning_rate": 9.554728301876526e-05,
"loss": 0.684,
"step": 122
},
{
"epoch": 11.272727272727273,
"grad_norm": 0.7541329860687256,
"learning_rate": 9.528180468815155e-05,
"loss": 0.6297,
"step": 124
},
{
"epoch": 11.454545454545455,
"grad_norm": 0.6155569553375244,
"learning_rate": 9.50090306530454e-05,
"loss": 0.6376,
"step": 126
},
{
"epoch": 11.636363636363637,
"grad_norm": 0.5912525653839111,
"learning_rate": 9.472900486219769e-05,
"loss": 0.6693,
"step": 128
},
{
"epoch": 11.818181818181818,
"grad_norm": 0.7152079939842224,
"learning_rate": 9.444177243274618e-05,
"loss": 0.6336,
"step": 130
},
{
"epoch": 12.0,
"grad_norm": 0.6318918466567993,
"learning_rate": 9.414737964294636e-05,
"loss": 0.5739,
"step": 132
},
{
"epoch": 12.0,
"eval_loss": 1.159354567527771,
"eval_runtime": 24.3607,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 132
},
{
"epoch": 12.181818181818182,
"grad_norm": 0.6897273659706116,
"learning_rate": 9.384587392471515e-05,
"loss": 0.5698,
"step": 134
},
{
"epoch": 12.363636363636363,
"grad_norm": 0.8041525483131409,
"learning_rate": 9.353730385598887e-05,
"loss": 0.5094,
"step": 136
},
{
"epoch": 12.545454545454545,
"grad_norm": 0.6993588209152222,
"learning_rate": 9.322171915289635e-05,
"loss": 0.5329,
"step": 138
},
{
"epoch": 12.727272727272727,
"grad_norm": 0.8220525979995728,
"learning_rate": 9.289917066174886e-05,
"loss": 0.5145,
"step": 140
},
{
"epoch": 12.909090909090908,
"grad_norm": 0.6971898078918457,
"learning_rate": 9.256971035084785e-05,
"loss": 0.5887,
"step": 142
},
{
"epoch": 13.0,
"eval_loss": 1.194595456123352,
"eval_runtime": 24.3612,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 143
},
{
"epoch": 13.090909090909092,
"grad_norm": 0.9051700830459595,
"learning_rate": 9.223339130211192e-05,
"loss": 0.4197,
"step": 144
},
{
"epoch": 13.272727272727273,
"grad_norm": 1.2017525434494019,
"learning_rate": 9.189026770252436e-05,
"loss": 0.4788,
"step": 146
},
{
"epoch": 13.454545454545455,
"grad_norm": 0.8263213634490967,
"learning_rate": 9.154039483540273e-05,
"loss": 0.5146,
"step": 148
},
{
"epoch": 13.636363636363637,
"grad_norm": 0.9688396453857422,
"learning_rate": 9.118382907149165e-05,
"loss": 0.459,
"step": 150
},
{
"epoch": 13.818181818181818,
"grad_norm": 0.8769497871398926,
"learning_rate": 9.082062785988049e-05,
"loss": 0.425,
"step": 152
},
{
"epoch": 14.0,
"grad_norm": 0.868798017501831,
"learning_rate": 9.045084971874738e-05,
"loss": 0.3635,
"step": 154
},
{
"epoch": 14.0,
"eval_loss": 1.3156319856643677,
"eval_runtime": 24.3601,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 154
},
{
"epoch": 14.181818181818182,
"grad_norm": 0.8629772067070007,
"learning_rate": 9.007455422593077e-05,
"loss": 0.3637,
"step": 156
},
{
"epoch": 14.363636363636363,
"grad_norm": 1.1226879358291626,
"learning_rate": 8.969180200933047e-05,
"loss": 0.3725,
"step": 158
},
{
"epoch": 14.545454545454545,
"grad_norm": 0.8470020294189453,
"learning_rate": 8.930265473713938e-05,
"loss": 0.4403,
"step": 160
},
{
"epoch": 14.727272727272727,
"grad_norm": 1.1817563772201538,
"learning_rate": 8.890717510790763e-05,
"loss": 0.3476,
"step": 162
},
{
"epoch": 14.909090909090908,
"grad_norm": 0.9473496675491333,
"learning_rate": 8.850542684044078e-05,
"loss": 0.2782,
"step": 164
},
{
"epoch": 15.0,
"eval_loss": 1.3628578186035156,
"eval_runtime": 24.3663,
"eval_samples_per_second": 0.985,
"eval_steps_per_second": 0.985,
"step": 165
},
{
"epoch": 15.0,
"step": 165,
"total_flos": 1.2546788345407078e+17,
"train_loss": 0.9098414410244334,
"train_runtime": 4509.3746,
"train_samples_per_second": 0.976,
"train_steps_per_second": 0.122
}
],
"logging_steps": 2,
"max_steps": 550,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2546788345407078e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}