full_s1_sl16k_bs32_lr3e-5_ckpt116 / trainer_state.json
fengyao1909's picture
Upload folder using huggingface_hub
ae1934c verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 116,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03508771929824561,
"grad_norm": 2.067214871055153,
"learning_rate": 0.0,
"loss": 0.866,
"step": 1
},
{
"epoch": 0.07017543859649122,
"grad_norm": 2.2902974488934356,
"learning_rate": 4.2857142857142855e-06,
"loss": 1.043,
"step": 2
},
{
"epoch": 0.10526315789473684,
"grad_norm": 1.8959065809264741,
"learning_rate": 8.571428571428571e-06,
"loss": 0.9429,
"step": 3
},
{
"epoch": 0.14035087719298245,
"grad_norm": 1.7531397239789308,
"learning_rate": 1.2857142857142857e-05,
"loss": 0.9852,
"step": 4
},
{
"epoch": 0.17543859649122806,
"grad_norm": 1.9134009040538502,
"learning_rate": 1.7142857142857142e-05,
"loss": 0.9898,
"step": 5
},
{
"epoch": 0.21052631578947367,
"grad_norm": 2.4981051959996017,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.9729,
"step": 6
},
{
"epoch": 0.24561403508771928,
"grad_norm": 2.1259191076410326,
"learning_rate": 2.5714285714285714e-05,
"loss": 0.9501,
"step": 7
},
{
"epoch": 0.2807017543859649,
"grad_norm": 2.6083028457024127,
"learning_rate": 3e-05,
"loss": 0.9957,
"step": 8
},
{
"epoch": 0.3157894736842105,
"grad_norm": 2.2174618708678193,
"learning_rate": 2.999581555818041e-05,
"loss": 1.003,
"step": 9
},
{
"epoch": 0.3508771929824561,
"grad_norm": 1.310512297596868,
"learning_rate": 2.9983264567328756e-05,
"loss": 0.9666,
"step": 10
},
{
"epoch": 0.38596491228070173,
"grad_norm": 1.0417774774374975,
"learning_rate": 2.9962354029963835e-05,
"loss": 0.8532,
"step": 11
},
{
"epoch": 0.42105263157894735,
"grad_norm": 1.096319575622801,
"learning_rate": 2.9933095612609253e-05,
"loss": 0.8722,
"step": 12
},
{
"epoch": 0.45614035087719296,
"grad_norm": 1.051828485887936,
"learning_rate": 2.989550563928436e-05,
"loss": 0.94,
"step": 13
},
{
"epoch": 0.49122807017543857,
"grad_norm": 0.7880955122292899,
"learning_rate": 2.9849605082396678e-05,
"loss": 0.8454,
"step": 14
},
{
"epoch": 0.5263157894736842,
"grad_norm": 0.6655179120990646,
"learning_rate": 2.9795419551040836e-05,
"loss": 0.8716,
"step": 15
},
{
"epoch": 0.5614035087719298,
"grad_norm": 0.6709188694271632,
"learning_rate": 2.973297927671063e-05,
"loss": 0.8177,
"step": 16
},
{
"epoch": 0.5964912280701754,
"grad_norm": 0.7681555199843141,
"learning_rate": 2.966231909643208e-05,
"loss": 0.9286,
"step": 17
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.6474330407996667,
"learning_rate": 2.958347843332696e-05,
"loss": 0.8274,
"step": 18
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.5325702575478014,
"learning_rate": 2.949650127461764e-05,
"loss": 0.8929,
"step": 19
},
{
"epoch": 0.7017543859649122,
"grad_norm": 0.5849013227799386,
"learning_rate": 2.940143614708549e-05,
"loss": 0.891,
"step": 20
},
{
"epoch": 0.7368421052631579,
"grad_norm": 0.6549654089627338,
"learning_rate": 2.9298336089996538e-05,
"loss": 0.8568,
"step": 21
},
{
"epoch": 0.7719298245614035,
"grad_norm": 0.5258630198755742,
"learning_rate": 2.9187258625509518e-05,
"loss": 0.8109,
"step": 22
},
{
"epoch": 0.8070175438596491,
"grad_norm": 0.4998629934118094,
"learning_rate": 2.906826572658278e-05,
"loss": 0.875,
"step": 23
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.42855717754389155,
"learning_rate": 2.8941423782397987e-05,
"loss": 0.8454,
"step": 24
},
{
"epoch": 0.8771929824561403,
"grad_norm": 0.50628059004859,
"learning_rate": 2.8806803561319903e-05,
"loss": 0.8334,
"step": 25
},
{
"epoch": 0.9122807017543859,
"grad_norm": 0.5060863985075719,
"learning_rate": 2.866448017141291e-05,
"loss": 0.8989,
"step": 26
},
{
"epoch": 0.9473684210526315,
"grad_norm": 0.8609761967398525,
"learning_rate": 2.8514533018536286e-05,
"loss": 0.7991,
"step": 27
},
{
"epoch": 0.9824561403508771,
"grad_norm": 0.48863408976007106,
"learning_rate": 2.835704576204167e-05,
"loss": 0.8282,
"step": 28
},
{
"epoch": 1.0,
"grad_norm": 0.48863408976007106,
"learning_rate": 2.8192106268097336e-05,
"loss": 0.7073,
"step": 29
},
{
"epoch": 1.0350877192982457,
"grad_norm": 0.9713567127839504,
"learning_rate": 2.801980656066545e-05,
"loss": 0.7714,
"step": 30
},
{
"epoch": 1.0701754385964912,
"grad_norm": 0.4951617245737733,
"learning_rate": 2.78402427701595e-05,
"loss": 0.7343,
"step": 31
},
{
"epoch": 1.1052631578947367,
"grad_norm": 0.561308739752077,
"learning_rate": 2.7653515079810744e-05,
"loss": 0.7805,
"step": 32
},
{
"epoch": 1.1403508771929824,
"grad_norm": 0.6696065745196884,
"learning_rate": 2.7459727669773344e-05,
"loss": 0.7562,
"step": 33
},
{
"epoch": 1.1754385964912282,
"grad_norm": 0.574338497323505,
"learning_rate": 2.725898865899967e-05,
"loss": 0.7163,
"step": 34
},
{
"epoch": 1.2105263157894737,
"grad_norm": 0.5954448022407949,
"learning_rate": 2.705141004491792e-05,
"loss": 0.7716,
"step": 35
},
{
"epoch": 1.2456140350877192,
"grad_norm": 0.4473898865990269,
"learning_rate": 2.6837107640945904e-05,
"loss": 0.7742,
"step": 36
},
{
"epoch": 1.280701754385965,
"grad_norm": 0.7727798282627385,
"learning_rate": 2.6616201011875792e-05,
"loss": 0.744,
"step": 37
},
{
"epoch": 1.3157894736842106,
"grad_norm": 0.5121520923399718,
"learning_rate": 2.638881340716583e-05,
"loss": 0.7212,
"step": 38
},
{
"epoch": 1.3508771929824561,
"grad_norm": 0.4705537944247171,
"learning_rate": 2.6155071692176348e-05,
"loss": 0.674,
"step": 39
},
{
"epoch": 1.3859649122807016,
"grad_norm": 0.571084271601031,
"learning_rate": 2.5915106277388293e-05,
"loss": 0.7776,
"step": 40
},
{
"epoch": 1.4210526315789473,
"grad_norm": 0.5850344726559537,
"learning_rate": 2.566905104564393e-05,
"loss": 0.7177,
"step": 41
},
{
"epoch": 1.456140350877193,
"grad_norm": 0.4332588522858084,
"learning_rate": 2.541704327745013e-05,
"loss": 0.6776,
"step": 42
},
{
"epoch": 1.4912280701754386,
"grad_norm": 0.4393188232149638,
"learning_rate": 2.5159223574386117e-05,
"loss": 0.6675,
"step": 43
},
{
"epoch": 1.526315789473684,
"grad_norm": 0.4979789836021164,
"learning_rate": 2.489573578065821e-05,
"loss": 0.8005,
"step": 44
},
{
"epoch": 1.5614035087719298,
"grad_norm": 0.45732271681245434,
"learning_rate": 2.4626726902845477e-05,
"loss": 0.7579,
"step": 45
},
{
"epoch": 1.5964912280701755,
"grad_norm": 0.45795659974157954,
"learning_rate": 2.4352347027881003e-05,
"loss": 0.6847,
"step": 46
},
{
"epoch": 1.631578947368421,
"grad_norm": 0.3706866470068484,
"learning_rate": 2.4072749239314565e-05,
"loss": 0.6851,
"step": 47
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.3822700465065106,
"learning_rate": 2.3788089531903372e-05,
"loss": 0.7357,
"step": 48
},
{
"epoch": 1.7017543859649122,
"grad_norm": 0.4388840978268458,
"learning_rate": 2.3498526724578637e-05,
"loss": 0.7545,
"step": 49
},
{
"epoch": 1.736842105263158,
"grad_norm": 0.5096281415627839,
"learning_rate": 2.320422237183641e-05,
"loss": 0.7449,
"step": 50
},
{
"epoch": 1.7719298245614035,
"grad_norm": 0.4149064055586781,
"learning_rate": 2.2905340673602184e-05,
"loss": 0.7481,
"step": 51
},
{
"epoch": 1.807017543859649,
"grad_norm": 0.45646999516691433,
"learning_rate": 2.26020483836196e-05,
"loss": 0.7732,
"step": 52
},
{
"epoch": 1.8421052631578947,
"grad_norm": 0.45834783579018146,
"learning_rate": 2.229451471641422e-05,
"loss": 0.6229,
"step": 53
},
{
"epoch": 1.8771929824561404,
"grad_norm": 0.47503100724025626,
"learning_rate": 2.198291125288445e-05,
"loss": 0.7608,
"step": 54
},
{
"epoch": 1.912280701754386,
"grad_norm": 0.4978417847863162,
"learning_rate": 2.166741184457214e-05,
"loss": 0.7096,
"step": 55
},
{
"epoch": 1.9473684210526314,
"grad_norm": 0.3815502550665043,
"learning_rate": 2.1348192516666376e-05,
"loss": 0.7613,
"step": 56
},
{
"epoch": 1.9824561403508771,
"grad_norm": 0.5283326744530167,
"learning_rate": 2.1025431369794546e-05,
"loss": 0.7659,
"step": 57
},
{
"epoch": 2.0,
"grad_norm": 0.7991429287166618,
"learning_rate": 2.0699308480655397e-05,
"loss": 0.6749,
"step": 58
},
{
"epoch": 2.0350877192982457,
"grad_norm": 0.7075576798543697,
"learning_rate": 2.03700058015497e-05,
"loss": 0.6011,
"step": 59
},
{
"epoch": 2.0701754385964914,
"grad_norm": 0.6128577868137788,
"learning_rate": 2.0037707058864343e-05,
"loss": 0.7135,
"step": 60
},
{
"epoch": 2.1052631578947367,
"grad_norm": 2.7353412863881106,
"learning_rate": 1.9702597650566723e-05,
"loss": 0.6262,
"step": 61
},
{
"epoch": 2.1403508771929824,
"grad_norm": 1.436411571175325,
"learning_rate": 1.936486454276647e-05,
"loss": 0.6019,
"step": 62
},
{
"epoch": 2.175438596491228,
"grad_norm": 0.7724991117675305,
"learning_rate": 1.9024696165402272e-05,
"loss": 0.6402,
"step": 63
},
{
"epoch": 2.2105263157894735,
"grad_norm": 0.8129381352505199,
"learning_rate": 1.8682282307111988e-05,
"loss": 0.6407,
"step": 64
},
{
"epoch": 2.245614035087719,
"grad_norm": 0.4291210901719085,
"learning_rate": 1.8337814009344716e-05,
"loss": 0.5708,
"step": 65
},
{
"epoch": 2.280701754385965,
"grad_norm": 0.4866938146227632,
"learning_rate": 1.7991483459773887e-05,
"loss": 0.612,
"step": 66
},
{
"epoch": 2.3157894736842106,
"grad_norm": 0.501246587872298,
"learning_rate": 1.7643483885070827e-05,
"loss": 0.588,
"step": 67
},
{
"epoch": 2.3508771929824563,
"grad_norm": 0.6083185314221561,
"learning_rate": 1.729400944309869e-05,
"loss": 0.6506,
"step": 68
},
{
"epoch": 2.3859649122807016,
"grad_norm": 0.47109461910264055,
"learning_rate": 1.6943255114586788e-05,
"loss": 0.6001,
"step": 69
},
{
"epoch": 2.4210526315789473,
"grad_norm": 0.9612069814317359,
"learning_rate": 1.659141659434587e-05,
"loss": 0.6855,
"step": 70
},
{
"epoch": 2.456140350877193,
"grad_norm": 0.4415318960259018,
"learning_rate": 1.623869018208499e-05,
"loss": 0.6185,
"step": 71
},
{
"epoch": 2.4912280701754383,
"grad_norm": 0.5141343063620322,
"learning_rate": 1.5885272672890842e-05,
"loss": 0.625,
"step": 72
},
{
"epoch": 2.526315789473684,
"grad_norm": 0.5270279056557339,
"learning_rate": 1.553136124743081e-05,
"loss": 0.5617,
"step": 73
},
{
"epoch": 2.56140350877193,
"grad_norm": 0.466590856309372,
"learning_rate": 1.517715336194077e-05,
"loss": 0.6277,
"step": 74
},
{
"epoch": 2.5964912280701755,
"grad_norm": 0.43235236729979676,
"learning_rate": 1.4822846638059234e-05,
"loss": 0.6195,
"step": 75
},
{
"epoch": 2.6315789473684212,
"grad_norm": 0.491374794528347,
"learning_rate": 1.4468638752569193e-05,
"loss": 0.6175,
"step": 76
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.4192646842845411,
"learning_rate": 1.411472732710916e-05,
"loss": 0.6506,
"step": 77
},
{
"epoch": 2.7017543859649122,
"grad_norm": 0.40467023638867167,
"learning_rate": 1.3761309817915017e-05,
"loss": 0.637,
"step": 78
},
{
"epoch": 2.736842105263158,
"grad_norm": 0.43210383757416787,
"learning_rate": 1.340858340565413e-05,
"loss": 0.623,
"step": 79
},
{
"epoch": 2.7719298245614032,
"grad_norm": 0.3261551587036006,
"learning_rate": 1.3056744885413216e-05,
"loss": 0.5767,
"step": 80
},
{
"epoch": 2.807017543859649,
"grad_norm": 0.3518292750647773,
"learning_rate": 1.2705990556901311e-05,
"loss": 0.5364,
"step": 81
},
{
"epoch": 2.8421052631578947,
"grad_norm": 0.40707731514091194,
"learning_rate": 1.2356516114929176e-05,
"loss": 0.6132,
"step": 82
},
{
"epoch": 2.8771929824561404,
"grad_norm": 0.3942472555532828,
"learning_rate": 1.2008516540226115e-05,
"loss": 0.6193,
"step": 83
},
{
"epoch": 2.912280701754386,
"grad_norm": 0.44241351169227155,
"learning_rate": 1.1662185990655285e-05,
"loss": 0.6401,
"step": 84
},
{
"epoch": 2.9473684210526314,
"grad_norm": 0.3874748773873833,
"learning_rate": 1.1317717692888014e-05,
"loss": 0.5637,
"step": 85
},
{
"epoch": 2.982456140350877,
"grad_norm": 0.34302393254475577,
"learning_rate": 1.0975303834597734e-05,
"loss": 0.6325,
"step": 86
},
{
"epoch": 3.0,
"grad_norm": 0.34302393254475577,
"learning_rate": 1.0635135457233533e-05,
"loss": 0.4186,
"step": 87
},
{
"epoch": 3.0350877192982457,
"grad_norm": 0.9844351160592908,
"learning_rate": 1.0297402349433286e-05,
"loss": 0.5259,
"step": 88
},
{
"epoch": 3.0701754385964914,
"grad_norm": 0.9168409207310226,
"learning_rate": 9.96229294113566e-06,
"loss": 0.5808,
"step": 89
},
{
"epoch": 3.1052631578947367,
"grad_norm": 0.4844166706459174,
"learning_rate": 9.629994198450305e-06,
"loss": 0.5523,
"step": 90
},
{
"epoch": 3.1403508771929824,
"grad_norm": 1.1469572697445642,
"learning_rate": 9.300691519344602e-06,
"loss": 0.5111,
"step": 91
},
{
"epoch": 3.175438596491228,
"grad_norm": 1.1765186720119694,
"learning_rate": 8.974568630205462e-06,
"loss": 0.5249,
"step": 92
},
{
"epoch": 3.2105263157894735,
"grad_norm": 0.8176252024624937,
"learning_rate": 8.651807483333627e-06,
"loss": 0.5864,
"step": 93
},
{
"epoch": 3.245614035087719,
"grad_norm": 0.5279006826410574,
"learning_rate": 8.332588155427869e-06,
"loss": 0.496,
"step": 94
},
{
"epoch": 3.280701754385965,
"grad_norm": 0.6870894874287676,
"learning_rate": 8.017088747115554e-06,
"loss": 0.4908,
"step": 95
},
{
"epoch": 3.3157894736842106,
"grad_norm": 0.8638224803482439,
"learning_rate": 7.70548528358578e-06,
"loss": 0.5443,
"step": 96
},
{
"epoch": 3.3508771929824563,
"grad_norm": 0.6334251823605639,
"learning_rate": 7.397951616380401e-06,
"loss": 0.5157,
"step": 97
},
{
"epoch": 3.3859649122807016,
"grad_norm": 0.5006252100198026,
"learning_rate": 7.094659326397818e-06,
"loss": 0.4591,
"step": 98
},
{
"epoch": 3.4210526315789473,
"grad_norm": 0.580855649746264,
"learning_rate": 6.795777628163599e-06,
"loss": 0.4592,
"step": 99
},
{
"epoch": 3.456140350877193,
"grad_norm": 0.4864958551783637,
"learning_rate": 6.50147327542137e-06,
"loss": 0.4456,
"step": 100
},
{
"epoch": 3.4912280701754383,
"grad_norm": 0.4386151451501962,
"learning_rate": 6.211910468096631e-06,
"loss": 0.4912,
"step": 101
},
{
"epoch": 3.526315789473684,
"grad_norm": 0.5204884142263023,
"learning_rate": 5.927250760685441e-06,
"loss": 0.4882,
"step": 102
},
{
"epoch": 3.56140350877193,
"grad_norm": 0.4591459898899543,
"learning_rate": 5.647652972118998e-06,
"loss": 0.5029,
"step": 103
},
{
"epoch": 3.5964912280701755,
"grad_norm": 0.46162973899727977,
"learning_rate": 5.37327309715453e-06,
"loss": 0.4663,
"step": 104
},
{
"epoch": 3.6315789473684212,
"grad_norm": 0.4860430732891522,
"learning_rate": 5.104264219341793e-06,
"loss": 0.4381,
"step": 105
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.4382286914097587,
"learning_rate": 4.840776425613887e-06,
"loss": 0.4718,
"step": 106
},
{
"epoch": 3.7017543859649122,
"grad_norm": 0.3669218088879972,
"learning_rate": 4.5829567225498696e-06,
"loss": 0.4853,
"step": 107
},
{
"epoch": 3.736842105263158,
"grad_norm": 0.4914583934045724,
"learning_rate": 4.330948954356076e-06,
"loss": 0.5486,
"step": 108
},
{
"epoch": 3.7719298245614032,
"grad_norm": 0.4105131971302891,
"learning_rate": 4.084893722611706e-06,
"loss": 0.5743,
"step": 109
},
{
"epoch": 3.807017543859649,
"grad_norm": 0.4293923567900349,
"learning_rate": 3.844928307823655e-06,
"loss": 0.5066,
"step": 110
},
{
"epoch": 3.8421052631578947,
"grad_norm": 0.42234992523360537,
"learning_rate": 3.6111865928341723e-06,
"loss": 0.5164,
"step": 111
},
{
"epoch": 3.8771929824561404,
"grad_norm": 0.3550274138683157,
"learning_rate": 3.3837989881242142e-06,
"loss": 0.4636,
"step": 112
},
{
"epoch": 3.912280701754386,
"grad_norm": 0.3696171628607218,
"learning_rate": 3.162892359054098e-06,
"loss": 0.4773,
"step": 113
},
{
"epoch": 3.9473684210526314,
"grad_norm": 0.3651667188910503,
"learning_rate": 2.948589955082085e-06,
"loss": 0.5031,
"step": 114
},
{
"epoch": 3.982456140350877,
"grad_norm": 0.33560060580788487,
"learning_rate": 2.74101134100033e-06,
"loss": 0.5182,
"step": 115
},
{
"epoch": 4.0,
"grad_norm": 0.6522055904321776,
"learning_rate": 2.540272330226658e-06,
"loss": 0.4388,
"step": 116
}
],
"logging_steps": 1,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 52227047817216.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}