ste_s1_sl16k_bs32_lr3e-5_ckpt87 / trainer_state.json
fengyao1909's picture
Upload folder using huggingface_hub
17aed2c verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 87,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03508771929824561,
"grad_norm": 2.0672840185973786,
"learning_rate": 0.0,
"loss": 0.866,
"step": 1
},
{
"epoch": 0.07017543859649122,
"grad_norm": 2.2899858891181903,
"learning_rate": 4.2857142857142855e-06,
"loss": 1.043,
"step": 2
},
{
"epoch": 0.10526315789473684,
"grad_norm": 1.910333546900785,
"learning_rate": 8.571428571428571e-06,
"loss": 0.9429,
"step": 3
},
{
"epoch": 0.14035087719298245,
"grad_norm": 1.624979132025543,
"learning_rate": 1.2857142857142857e-05,
"loss": 0.985,
"step": 4
},
{
"epoch": 0.17543859649122806,
"grad_norm": 1.3301367316845518,
"learning_rate": 1.7142857142857142e-05,
"loss": 0.9899,
"step": 5
},
{
"epoch": 0.21052631578947367,
"grad_norm": 2.192370335122931,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.9655,
"step": 6
},
{
"epoch": 0.24561403508771928,
"grad_norm": 2.2285319009652462,
"learning_rate": 2.5714285714285714e-05,
"loss": 0.9456,
"step": 7
},
{
"epoch": 0.2807017543859649,
"grad_norm": 2.541438185031773,
"learning_rate": 3e-05,
"loss": 0.9954,
"step": 8
},
{
"epoch": 0.3157894736842105,
"grad_norm": 2.2177826928862956,
"learning_rate": 2.999581555818041e-05,
"loss": 1.0037,
"step": 9
},
{
"epoch": 0.3508771929824561,
"grad_norm": 1.4333465946127133,
"learning_rate": 2.9983264567328756e-05,
"loss": 0.9675,
"step": 10
},
{
"epoch": 0.38596491228070173,
"grad_norm": 1.0448310378956214,
"learning_rate": 2.9962354029963835e-05,
"loss": 0.8559,
"step": 11
},
{
"epoch": 0.42105263157894735,
"grad_norm": 1.0781647166558237,
"learning_rate": 2.9933095612609253e-05,
"loss": 0.8733,
"step": 12
},
{
"epoch": 0.45614035087719296,
"grad_norm": 1.007650519854371,
"learning_rate": 2.989550563928436e-05,
"loss": 0.9403,
"step": 13
},
{
"epoch": 0.49122807017543857,
"grad_norm": 0.7907009024129646,
"learning_rate": 2.9849605082396678e-05,
"loss": 0.8458,
"step": 14
},
{
"epoch": 0.5263157894736842,
"grad_norm": 0.7042592030399148,
"learning_rate": 2.9795419551040836e-05,
"loss": 0.8727,
"step": 15
},
{
"epoch": 0.5614035087719298,
"grad_norm": 0.6419420174929784,
"learning_rate": 2.973297927671063e-05,
"loss": 0.8186,
"step": 16
},
{
"epoch": 0.5964912280701754,
"grad_norm": 0.8076296952096572,
"learning_rate": 2.966231909643208e-05,
"loss": 0.9296,
"step": 17
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.6598191860375567,
"learning_rate": 2.958347843332696e-05,
"loss": 0.8284,
"step": 18
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.5560706149108875,
"learning_rate": 2.949650127461764e-05,
"loss": 0.8931,
"step": 19
},
{
"epoch": 0.7017543859649122,
"grad_norm": 0.5418452815767641,
"learning_rate": 2.940143614708549e-05,
"loss": 0.8911,
"step": 20
},
{
"epoch": 0.7368421052631579,
"grad_norm": 0.6442251215106719,
"learning_rate": 2.9298336089996538e-05,
"loss": 0.857,
"step": 21
},
{
"epoch": 0.7719298245614035,
"grad_norm": 0.5601338870306712,
"learning_rate": 2.9187258625509518e-05,
"loss": 0.8112,
"step": 22
},
{
"epoch": 0.8070175438596491,
"grad_norm": 0.5147949384274566,
"learning_rate": 2.906826572658278e-05,
"loss": 0.8752,
"step": 23
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.414227052576231,
"learning_rate": 2.8941423782397987e-05,
"loss": 0.8455,
"step": 24
},
{
"epoch": 0.8771929824561403,
"grad_norm": 0.4724293164215497,
"learning_rate": 2.8806803561319903e-05,
"loss": 0.8332,
"step": 25
},
{
"epoch": 0.9122807017543859,
"grad_norm": 0.5244546146099837,
"learning_rate": 2.866448017141291e-05,
"loss": 0.8992,
"step": 26
},
{
"epoch": 0.9473684210526315,
"grad_norm": 0.49846452691797277,
"learning_rate": 2.8514533018536286e-05,
"loss": 0.7989,
"step": 27
},
{
"epoch": 0.9824561403508771,
"grad_norm": 0.5139730942456431,
"learning_rate": 2.835704576204167e-05,
"loss": 0.8281,
"step": 28
},
{
"epoch": 1.0,
"grad_norm": 0.5139730942456431,
"learning_rate": 2.8192106268097336e-05,
"loss": 0.7089,
"step": 29
},
{
"epoch": 1.0350877192982457,
"grad_norm": 0.8522153801917955,
"learning_rate": 2.801980656066545e-05,
"loss": 0.7715,
"step": 30
},
{
"epoch": 1.0701754385964912,
"grad_norm": 0.48621319476048247,
"learning_rate": 2.78402427701595e-05,
"loss": 0.7326,
"step": 31
},
{
"epoch": 1.1052631578947367,
"grad_norm": 0.4931003602154137,
"learning_rate": 2.7653515079810744e-05,
"loss": 0.7796,
"step": 32
},
{
"epoch": 1.1403508771929824,
"grad_norm": 0.6820149173810659,
"learning_rate": 2.7459727669773344e-05,
"loss": 0.7558,
"step": 33
},
{
"epoch": 1.1754385964912282,
"grad_norm": 0.6039546286330506,
"learning_rate": 2.725898865899967e-05,
"loss": 0.7164,
"step": 34
},
{
"epoch": 1.2105263157894737,
"grad_norm": 0.49413279486249445,
"learning_rate": 2.705141004491792e-05,
"loss": 0.771,
"step": 35
},
{
"epoch": 1.2456140350877192,
"grad_norm": 0.46540726916290787,
"learning_rate": 2.6837107640945904e-05,
"loss": 0.7745,
"step": 36
},
{
"epoch": 1.280701754385965,
"grad_norm": 0.7142115362037698,
"learning_rate": 2.6616201011875792e-05,
"loss": 0.7457,
"step": 37
},
{
"epoch": 1.3157894736842106,
"grad_norm": 0.5357068778365942,
"learning_rate": 2.638881340716583e-05,
"loss": 0.7222,
"step": 38
},
{
"epoch": 1.3508771929824561,
"grad_norm": 0.46939662640391816,
"learning_rate": 2.6155071692176348e-05,
"loss": 0.6748,
"step": 39
},
{
"epoch": 1.3859649122807016,
"grad_norm": 0.537500418669669,
"learning_rate": 2.5915106277388293e-05,
"loss": 0.7777,
"step": 40
},
{
"epoch": 1.4210526315789473,
"grad_norm": 0.59073194769042,
"learning_rate": 2.566905104564393e-05,
"loss": 0.7173,
"step": 41
},
{
"epoch": 1.456140350877193,
"grad_norm": 0.42637743115128474,
"learning_rate": 2.541704327745013e-05,
"loss": 0.6769,
"step": 42
},
{
"epoch": 1.4912280701754386,
"grad_norm": 0.4344573872029452,
"learning_rate": 2.5159223574386117e-05,
"loss": 0.6656,
"step": 43
},
{
"epoch": 1.526315789473684,
"grad_norm": 0.5018768196524176,
"learning_rate": 2.489573578065821e-05,
"loss": 0.802,
"step": 44
},
{
"epoch": 1.5614035087719298,
"grad_norm": 0.4529698728985265,
"learning_rate": 2.4626726902845477e-05,
"loss": 0.7586,
"step": 45
},
{
"epoch": 1.5964912280701755,
"grad_norm": 0.48235341277848787,
"learning_rate": 2.4352347027881003e-05,
"loss": 0.6852,
"step": 46
},
{
"epoch": 1.631578947368421,
"grad_norm": 0.3627100867848393,
"learning_rate": 2.4072749239314565e-05,
"loss": 0.6858,
"step": 47
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.38664865341383037,
"learning_rate": 2.3788089531903372e-05,
"loss": 0.7365,
"step": 48
},
{
"epoch": 1.7017543859649122,
"grad_norm": 0.4545364316322588,
"learning_rate": 2.3498526724578637e-05,
"loss": 0.7538,
"step": 49
},
{
"epoch": 1.736842105263158,
"grad_norm": 0.518944898034962,
"learning_rate": 2.320422237183641e-05,
"loss": 0.7454,
"step": 50
},
{
"epoch": 1.7719298245614035,
"grad_norm": 0.4067307322789782,
"learning_rate": 2.2905340673602184e-05,
"loss": 0.7482,
"step": 51
},
{
"epoch": 1.807017543859649,
"grad_norm": 0.4862538497731571,
"learning_rate": 2.26020483836196e-05,
"loss": 0.7729,
"step": 52
},
{
"epoch": 1.8421052631578947,
"grad_norm": 0.46087867219067935,
"learning_rate": 2.229451471641422e-05,
"loss": 0.6216,
"step": 53
},
{
"epoch": 1.8771929824561404,
"grad_norm": 0.5430309866121377,
"learning_rate": 2.198291125288445e-05,
"loss": 0.7631,
"step": 54
},
{
"epoch": 1.912280701754386,
"grad_norm": 0.5141331795467418,
"learning_rate": 2.166741184457214e-05,
"loss": 0.7101,
"step": 55
},
{
"epoch": 1.9473684210526314,
"grad_norm": 0.39081537987082116,
"learning_rate": 2.1348192516666376e-05,
"loss": 0.7608,
"step": 56
},
{
"epoch": 1.9824561403508771,
"grad_norm": 0.4994585530441829,
"learning_rate": 2.1025431369794546e-05,
"loss": 0.7654,
"step": 57
},
{
"epoch": 2.0,
"grad_norm": 0.8215961436633803,
"learning_rate": 2.0699308480655397e-05,
"loss": 0.674,
"step": 58
},
{
"epoch": 2.0350877192982457,
"grad_norm": 0.7120328127826715,
"learning_rate": 2.03700058015497e-05,
"loss": 0.6013,
"step": 59
},
{
"epoch": 2.0701754385964914,
"grad_norm": 0.8161065595108032,
"learning_rate": 2.0037707058864343e-05,
"loss": 0.7149,
"step": 60
},
{
"epoch": 2.1052631578947367,
"grad_norm": 1.8952005651095847,
"learning_rate": 1.9702597650566723e-05,
"loss": 0.6234,
"step": 61
},
{
"epoch": 2.1403508771929824,
"grad_norm": 0.5215133657210498,
"learning_rate": 1.936486454276647e-05,
"loss": 0.6001,
"step": 62
},
{
"epoch": 2.175438596491228,
"grad_norm": 0.8401793670285913,
"learning_rate": 1.9024696165402272e-05,
"loss": 0.6392,
"step": 63
},
{
"epoch": 2.2105263157894735,
"grad_norm": 0.7463732624054893,
"learning_rate": 1.8682282307111988e-05,
"loss": 0.6392,
"step": 64
},
{
"epoch": 2.245614035087719,
"grad_norm": 0.449660878951785,
"learning_rate": 1.8337814009344716e-05,
"loss": 0.5725,
"step": 65
},
{
"epoch": 2.280701754385965,
"grad_norm": 0.6166851563782507,
"learning_rate": 1.7991483459773887e-05,
"loss": 0.6117,
"step": 66
},
{
"epoch": 2.3157894736842106,
"grad_norm": 0.4765184980194071,
"learning_rate": 1.7643483885070827e-05,
"loss": 0.5877,
"step": 67
},
{
"epoch": 2.3508771929824563,
"grad_norm": 0.539140038588759,
"learning_rate": 1.729400944309869e-05,
"loss": 0.6497,
"step": 68
},
{
"epoch": 2.3859649122807016,
"grad_norm": 0.48435908336158434,
"learning_rate": 1.6943255114586788e-05,
"loss": 0.6004,
"step": 69
},
{
"epoch": 2.4210526315789473,
"grad_norm": 1.0565779360065954,
"learning_rate": 1.659141659434587e-05,
"loss": 0.6898,
"step": 70
},
{
"epoch": 2.456140350877193,
"grad_norm": 0.44520837075665026,
"learning_rate": 1.623869018208499e-05,
"loss": 0.6189,
"step": 71
},
{
"epoch": 2.4912280701754383,
"grad_norm": 0.5141213138770342,
"learning_rate": 1.5885272672890842e-05,
"loss": 0.6262,
"step": 72
},
{
"epoch": 2.526315789473684,
"grad_norm": 0.5046502197354534,
"learning_rate": 1.553136124743081e-05,
"loss": 0.5617,
"step": 73
},
{
"epoch": 2.56140350877193,
"grad_norm": 0.47228102896457386,
"learning_rate": 1.517715336194077e-05,
"loss": 0.6288,
"step": 74
},
{
"epoch": 2.5964912280701755,
"grad_norm": 0.3945749736245955,
"learning_rate": 1.4822846638059234e-05,
"loss": 0.6189,
"step": 75
},
{
"epoch": 2.6315789473684212,
"grad_norm": 0.47554210947045383,
"learning_rate": 1.4468638752569193e-05,
"loss": 0.6187,
"step": 76
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.4168918402865985,
"learning_rate": 1.411472732710916e-05,
"loss": 0.6511,
"step": 77
},
{
"epoch": 2.7017543859649122,
"grad_norm": 0.3784629074937835,
"learning_rate": 1.3761309817915017e-05,
"loss": 0.6384,
"step": 78
},
{
"epoch": 2.736842105263158,
"grad_norm": 0.3766919801894043,
"learning_rate": 1.340858340565413e-05,
"loss": 0.6241,
"step": 79
},
{
"epoch": 2.7719298245614032,
"grad_norm": 0.3304739692544291,
"learning_rate": 1.3056744885413216e-05,
"loss": 0.5765,
"step": 80
},
{
"epoch": 2.807017543859649,
"grad_norm": 0.3593006941475982,
"learning_rate": 1.2705990556901311e-05,
"loss": 0.5384,
"step": 81
},
{
"epoch": 2.8421052631578947,
"grad_norm": 0.3790675720863581,
"learning_rate": 1.2356516114929176e-05,
"loss": 0.6123,
"step": 82
},
{
"epoch": 2.8771929824561404,
"grad_norm": 0.38906936709954903,
"learning_rate": 1.2008516540226115e-05,
"loss": 0.6196,
"step": 83
},
{
"epoch": 2.912280701754386,
"grad_norm": 0.38879375158868906,
"learning_rate": 1.1662185990655285e-05,
"loss": 0.6376,
"step": 84
},
{
"epoch": 2.9473684210526314,
"grad_norm": 0.37164660785811104,
"learning_rate": 1.1317717692888014e-05,
"loss": 0.5652,
"step": 85
},
{
"epoch": 2.982456140350877,
"grad_norm": 0.3398419044284849,
"learning_rate": 1.0975303834597734e-05,
"loss": 0.6338,
"step": 86
},
{
"epoch": 3.0,
"grad_norm": 0.3398419044284849,
"learning_rate": 1.0635135457233533e-05,
"loss": 0.4196,
"step": 87
}
],
"logging_steps": 1,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 39139677044736.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}