ste_s1_sl16k_bs32_lr3e-5_ckpt140 / trainer_state.json
fengyao1909's picture
Upload folder using huggingface_hub
0199ceb verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.842105263157895,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03508771929824561,
"grad_norm": 2.0672840185973786,
"learning_rate": 0.0,
"loss": 0.866,
"step": 1
},
{
"epoch": 0.07017543859649122,
"grad_norm": 2.2899858891181903,
"learning_rate": 4.2857142857142855e-06,
"loss": 1.043,
"step": 2
},
{
"epoch": 0.10526315789473684,
"grad_norm": 1.910333546900785,
"learning_rate": 8.571428571428571e-06,
"loss": 0.9429,
"step": 3
},
{
"epoch": 0.14035087719298245,
"grad_norm": 1.624979132025543,
"learning_rate": 1.2857142857142857e-05,
"loss": 0.985,
"step": 4
},
{
"epoch": 0.17543859649122806,
"grad_norm": 1.3301367316845518,
"learning_rate": 1.7142857142857142e-05,
"loss": 0.9899,
"step": 5
},
{
"epoch": 0.21052631578947367,
"grad_norm": 2.192370335122931,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.9655,
"step": 6
},
{
"epoch": 0.24561403508771928,
"grad_norm": 2.2285319009652462,
"learning_rate": 2.5714285714285714e-05,
"loss": 0.9456,
"step": 7
},
{
"epoch": 0.2807017543859649,
"grad_norm": 2.541438185031773,
"learning_rate": 3e-05,
"loss": 0.9954,
"step": 8
},
{
"epoch": 0.3157894736842105,
"grad_norm": 2.2177826928862956,
"learning_rate": 2.999581555818041e-05,
"loss": 1.0037,
"step": 9
},
{
"epoch": 0.3508771929824561,
"grad_norm": 1.4333465946127133,
"learning_rate": 2.9983264567328756e-05,
"loss": 0.9675,
"step": 10
},
{
"epoch": 0.38596491228070173,
"grad_norm": 1.0448310378956214,
"learning_rate": 2.9962354029963835e-05,
"loss": 0.8559,
"step": 11
},
{
"epoch": 0.42105263157894735,
"grad_norm": 1.0781647166558237,
"learning_rate": 2.9933095612609253e-05,
"loss": 0.8733,
"step": 12
},
{
"epoch": 0.45614035087719296,
"grad_norm": 1.007650519854371,
"learning_rate": 2.989550563928436e-05,
"loss": 0.9403,
"step": 13
},
{
"epoch": 0.49122807017543857,
"grad_norm": 0.7907009024129646,
"learning_rate": 2.9849605082396678e-05,
"loss": 0.8458,
"step": 14
},
{
"epoch": 0.5263157894736842,
"grad_norm": 0.7042592030399148,
"learning_rate": 2.9795419551040836e-05,
"loss": 0.8727,
"step": 15
},
{
"epoch": 0.5614035087719298,
"grad_norm": 0.6419420174929784,
"learning_rate": 2.973297927671063e-05,
"loss": 0.8186,
"step": 16
},
{
"epoch": 0.5964912280701754,
"grad_norm": 0.8076296952096572,
"learning_rate": 2.966231909643208e-05,
"loss": 0.9296,
"step": 17
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.6598191860375567,
"learning_rate": 2.958347843332696e-05,
"loss": 0.8284,
"step": 18
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.5560706149108875,
"learning_rate": 2.949650127461764e-05,
"loss": 0.8931,
"step": 19
},
{
"epoch": 0.7017543859649122,
"grad_norm": 0.5418452815767641,
"learning_rate": 2.940143614708549e-05,
"loss": 0.8911,
"step": 20
},
{
"epoch": 0.7368421052631579,
"grad_norm": 0.6442251215106719,
"learning_rate": 2.9298336089996538e-05,
"loss": 0.857,
"step": 21
},
{
"epoch": 0.7719298245614035,
"grad_norm": 0.5601338870306712,
"learning_rate": 2.9187258625509518e-05,
"loss": 0.8112,
"step": 22
},
{
"epoch": 0.8070175438596491,
"grad_norm": 0.5147949384274566,
"learning_rate": 2.906826572658278e-05,
"loss": 0.8752,
"step": 23
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.414227052576231,
"learning_rate": 2.8941423782397987e-05,
"loss": 0.8455,
"step": 24
},
{
"epoch": 0.8771929824561403,
"grad_norm": 0.4724293164215497,
"learning_rate": 2.8806803561319903e-05,
"loss": 0.8332,
"step": 25
},
{
"epoch": 0.9122807017543859,
"grad_norm": 0.5244546146099837,
"learning_rate": 2.866448017141291e-05,
"loss": 0.8992,
"step": 26
},
{
"epoch": 0.9473684210526315,
"grad_norm": 0.49846452691797277,
"learning_rate": 2.8514533018536286e-05,
"loss": 0.7989,
"step": 27
},
{
"epoch": 0.9824561403508771,
"grad_norm": 0.5139730942456431,
"learning_rate": 2.835704576204167e-05,
"loss": 0.8281,
"step": 28
},
{
"epoch": 1.0,
"grad_norm": 0.5139730942456431,
"learning_rate": 2.8192106268097336e-05,
"loss": 0.7089,
"step": 29
},
{
"epoch": 1.0350877192982457,
"grad_norm": 0.8522153801917955,
"learning_rate": 2.801980656066545e-05,
"loss": 0.7715,
"step": 30
},
{
"epoch": 1.0701754385964912,
"grad_norm": 0.48621319476048247,
"learning_rate": 2.78402427701595e-05,
"loss": 0.7326,
"step": 31
},
{
"epoch": 1.1052631578947367,
"grad_norm": 0.4931003602154137,
"learning_rate": 2.7653515079810744e-05,
"loss": 0.7796,
"step": 32
},
{
"epoch": 1.1403508771929824,
"grad_norm": 0.6820149173810659,
"learning_rate": 2.7459727669773344e-05,
"loss": 0.7558,
"step": 33
},
{
"epoch": 1.1754385964912282,
"grad_norm": 0.6039546286330506,
"learning_rate": 2.725898865899967e-05,
"loss": 0.7164,
"step": 34
},
{
"epoch": 1.2105263157894737,
"grad_norm": 0.49413279486249445,
"learning_rate": 2.705141004491792e-05,
"loss": 0.771,
"step": 35
},
{
"epoch": 1.2456140350877192,
"grad_norm": 0.46540726916290787,
"learning_rate": 2.6837107640945904e-05,
"loss": 0.7745,
"step": 36
},
{
"epoch": 1.280701754385965,
"grad_norm": 0.7142115362037698,
"learning_rate": 2.6616201011875792e-05,
"loss": 0.7457,
"step": 37
},
{
"epoch": 1.3157894736842106,
"grad_norm": 0.5357068778365942,
"learning_rate": 2.638881340716583e-05,
"loss": 0.7222,
"step": 38
},
{
"epoch": 1.3508771929824561,
"grad_norm": 0.46939662640391816,
"learning_rate": 2.6155071692176348e-05,
"loss": 0.6748,
"step": 39
},
{
"epoch": 1.3859649122807016,
"grad_norm": 0.537500418669669,
"learning_rate": 2.5915106277388293e-05,
"loss": 0.7777,
"step": 40
},
{
"epoch": 1.4210526315789473,
"grad_norm": 0.59073194769042,
"learning_rate": 2.566905104564393e-05,
"loss": 0.7173,
"step": 41
},
{
"epoch": 1.456140350877193,
"grad_norm": 0.42637743115128474,
"learning_rate": 2.541704327745013e-05,
"loss": 0.6769,
"step": 42
},
{
"epoch": 1.4912280701754386,
"grad_norm": 0.4344573872029452,
"learning_rate": 2.5159223574386117e-05,
"loss": 0.6656,
"step": 43
},
{
"epoch": 1.526315789473684,
"grad_norm": 0.5018768196524176,
"learning_rate": 2.489573578065821e-05,
"loss": 0.802,
"step": 44
},
{
"epoch": 1.5614035087719298,
"grad_norm": 0.4529698728985265,
"learning_rate": 2.4626726902845477e-05,
"loss": 0.7586,
"step": 45
},
{
"epoch": 1.5964912280701755,
"grad_norm": 0.48235341277848787,
"learning_rate": 2.4352347027881003e-05,
"loss": 0.6852,
"step": 46
},
{
"epoch": 1.631578947368421,
"grad_norm": 0.3627100867848393,
"learning_rate": 2.4072749239314565e-05,
"loss": 0.6858,
"step": 47
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.38664865341383037,
"learning_rate": 2.3788089531903372e-05,
"loss": 0.7365,
"step": 48
},
{
"epoch": 1.7017543859649122,
"grad_norm": 0.4545364316322588,
"learning_rate": 2.3498526724578637e-05,
"loss": 0.7538,
"step": 49
},
{
"epoch": 1.736842105263158,
"grad_norm": 0.518944898034962,
"learning_rate": 2.320422237183641e-05,
"loss": 0.7454,
"step": 50
},
{
"epoch": 1.7719298245614035,
"grad_norm": 0.4067307322789782,
"learning_rate": 2.2905340673602184e-05,
"loss": 0.7482,
"step": 51
},
{
"epoch": 1.807017543859649,
"grad_norm": 0.4862538497731571,
"learning_rate": 2.26020483836196e-05,
"loss": 0.7729,
"step": 52
},
{
"epoch": 1.8421052631578947,
"grad_norm": 0.46087867219067935,
"learning_rate": 2.229451471641422e-05,
"loss": 0.6216,
"step": 53
},
{
"epoch": 1.8771929824561404,
"grad_norm": 0.5430309866121377,
"learning_rate": 2.198291125288445e-05,
"loss": 0.7631,
"step": 54
},
{
"epoch": 1.912280701754386,
"grad_norm": 0.5141331795467418,
"learning_rate": 2.166741184457214e-05,
"loss": 0.7101,
"step": 55
},
{
"epoch": 1.9473684210526314,
"grad_norm": 0.39081537987082116,
"learning_rate": 2.1348192516666376e-05,
"loss": 0.7608,
"step": 56
},
{
"epoch": 1.9824561403508771,
"grad_norm": 0.4994585530441829,
"learning_rate": 2.1025431369794546e-05,
"loss": 0.7654,
"step": 57
},
{
"epoch": 2.0,
"grad_norm": 0.8215961436633803,
"learning_rate": 2.0699308480655397e-05,
"loss": 0.674,
"step": 58
},
{
"epoch": 2.0350877192982457,
"grad_norm": 0.7120328127826715,
"learning_rate": 2.03700058015497e-05,
"loss": 0.6013,
"step": 59
},
{
"epoch": 2.0701754385964914,
"grad_norm": 0.8161065595108032,
"learning_rate": 2.0037707058864343e-05,
"loss": 0.7149,
"step": 60
},
{
"epoch": 2.1052631578947367,
"grad_norm": 1.8952005651095847,
"learning_rate": 1.9702597650566723e-05,
"loss": 0.6234,
"step": 61
},
{
"epoch": 2.1403508771929824,
"grad_norm": 0.5215133657210498,
"learning_rate": 1.936486454276647e-05,
"loss": 0.6001,
"step": 62
},
{
"epoch": 2.175438596491228,
"grad_norm": 0.8401793670285913,
"learning_rate": 1.9024696165402272e-05,
"loss": 0.6392,
"step": 63
},
{
"epoch": 2.2105263157894735,
"grad_norm": 0.7463732624054893,
"learning_rate": 1.8682282307111988e-05,
"loss": 0.6392,
"step": 64
},
{
"epoch": 2.245614035087719,
"grad_norm": 0.449660878951785,
"learning_rate": 1.8337814009344716e-05,
"loss": 0.5725,
"step": 65
},
{
"epoch": 2.280701754385965,
"grad_norm": 0.6166851563782507,
"learning_rate": 1.7991483459773887e-05,
"loss": 0.6117,
"step": 66
},
{
"epoch": 2.3157894736842106,
"grad_norm": 0.4765184980194071,
"learning_rate": 1.7643483885070827e-05,
"loss": 0.5877,
"step": 67
},
{
"epoch": 2.3508771929824563,
"grad_norm": 0.539140038588759,
"learning_rate": 1.729400944309869e-05,
"loss": 0.6497,
"step": 68
},
{
"epoch": 2.3859649122807016,
"grad_norm": 0.48435908336158434,
"learning_rate": 1.6943255114586788e-05,
"loss": 0.6004,
"step": 69
},
{
"epoch": 2.4210526315789473,
"grad_norm": 1.0565779360065954,
"learning_rate": 1.659141659434587e-05,
"loss": 0.6898,
"step": 70
},
{
"epoch": 2.456140350877193,
"grad_norm": 0.44520837075665026,
"learning_rate": 1.623869018208499e-05,
"loss": 0.6189,
"step": 71
},
{
"epoch": 2.4912280701754383,
"grad_norm": 0.5141213138770342,
"learning_rate": 1.5885272672890842e-05,
"loss": 0.6262,
"step": 72
},
{
"epoch": 2.526315789473684,
"grad_norm": 0.5046502197354534,
"learning_rate": 1.553136124743081e-05,
"loss": 0.5617,
"step": 73
},
{
"epoch": 2.56140350877193,
"grad_norm": 0.47228102896457386,
"learning_rate": 1.517715336194077e-05,
"loss": 0.6288,
"step": 74
},
{
"epoch": 2.5964912280701755,
"grad_norm": 0.3945749736245955,
"learning_rate": 1.4822846638059234e-05,
"loss": 0.6189,
"step": 75
},
{
"epoch": 2.6315789473684212,
"grad_norm": 0.47554210947045383,
"learning_rate": 1.4468638752569193e-05,
"loss": 0.6187,
"step": 76
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.4168918402865985,
"learning_rate": 1.411472732710916e-05,
"loss": 0.6511,
"step": 77
},
{
"epoch": 2.7017543859649122,
"grad_norm": 0.3784629074937835,
"learning_rate": 1.3761309817915017e-05,
"loss": 0.6384,
"step": 78
},
{
"epoch": 2.736842105263158,
"grad_norm": 0.3766919801894043,
"learning_rate": 1.340858340565413e-05,
"loss": 0.6241,
"step": 79
},
{
"epoch": 2.7719298245614032,
"grad_norm": 0.3304739692544291,
"learning_rate": 1.3056744885413216e-05,
"loss": 0.5765,
"step": 80
},
{
"epoch": 2.807017543859649,
"grad_norm": 0.3593006941475982,
"learning_rate": 1.2705990556901311e-05,
"loss": 0.5384,
"step": 81
},
{
"epoch": 2.8421052631578947,
"grad_norm": 0.3790675720863581,
"learning_rate": 1.2356516114929176e-05,
"loss": 0.6123,
"step": 82
},
{
"epoch": 2.8771929824561404,
"grad_norm": 0.38906936709954903,
"learning_rate": 1.2008516540226115e-05,
"loss": 0.6196,
"step": 83
},
{
"epoch": 2.912280701754386,
"grad_norm": 0.38879375158868906,
"learning_rate": 1.1662185990655285e-05,
"loss": 0.6376,
"step": 84
},
{
"epoch": 2.9473684210526314,
"grad_norm": 0.37164660785811104,
"learning_rate": 1.1317717692888014e-05,
"loss": 0.5652,
"step": 85
},
{
"epoch": 2.982456140350877,
"grad_norm": 0.3398419044284849,
"learning_rate": 1.0975303834597734e-05,
"loss": 0.6338,
"step": 86
},
{
"epoch": 3.0,
"grad_norm": 0.3398419044284849,
"learning_rate": 1.0635135457233533e-05,
"loss": 0.4196,
"step": 87
},
{
"epoch": 3.0350877192982457,
"grad_norm": 1.0137232981739484,
"learning_rate": 1.0297402349433286e-05,
"loss": 0.5244,
"step": 88
},
{
"epoch": 3.0701754385964914,
"grad_norm": 1.00282256056707,
"learning_rate": 9.96229294113566e-06,
"loss": 0.5768,
"step": 89
},
{
"epoch": 3.1052631578947367,
"grad_norm": 0.7359804333242452,
"learning_rate": 9.629994198450305e-06,
"loss": 0.5524,
"step": 90
},
{
"epoch": 3.1403508771929824,
"grad_norm": 2.2445121599018316,
"learning_rate": 9.300691519344602e-06,
"loss": 0.5148,
"step": 91
},
{
"epoch": 3.175438596491228,
"grad_norm": 1.8476077615083395,
"learning_rate": 8.974568630205462e-06,
"loss": 0.5253,
"step": 92
},
{
"epoch": 3.2105263157894735,
"grad_norm": 0.9919680932647257,
"learning_rate": 8.651807483333627e-06,
"loss": 0.5889,
"step": 93
},
{
"epoch": 3.245614035087719,
"grad_norm": 0.6006306613920271,
"learning_rate": 8.332588155427869e-06,
"loss": 0.4976,
"step": 94
},
{
"epoch": 3.280701754385965,
"grad_norm": 0.8381288364191982,
"learning_rate": 8.017088747115554e-06,
"loss": 0.4911,
"step": 95
},
{
"epoch": 3.3157894736842106,
"grad_norm": 1.019844749447574,
"learning_rate": 7.70548528358578e-06,
"loss": 0.545,
"step": 96
},
{
"epoch": 3.3508771929824563,
"grad_norm": 0.686676528584022,
"learning_rate": 7.397951616380401e-06,
"loss": 0.5178,
"step": 97
},
{
"epoch": 3.3859649122807016,
"grad_norm": 0.44425862963385576,
"learning_rate": 7.094659326397818e-06,
"loss": 0.4589,
"step": 98
},
{
"epoch": 3.4210526315789473,
"grad_norm": 0.5838625291964793,
"learning_rate": 6.795777628163599e-06,
"loss": 0.458,
"step": 99
},
{
"epoch": 3.456140350877193,
"grad_norm": 0.5930203797342937,
"learning_rate": 6.50147327542137e-06,
"loss": 0.446,
"step": 100
},
{
"epoch": 3.4912280701754383,
"grad_norm": 0.5269168747999409,
"learning_rate": 6.211910468096631e-06,
"loss": 0.4934,
"step": 101
},
{
"epoch": 3.526315789473684,
"grad_norm": 0.5670642434002793,
"learning_rate": 5.927250760685441e-06,
"loss": 0.4873,
"step": 102
},
{
"epoch": 3.56140350877193,
"grad_norm": 0.4247200179986956,
"learning_rate": 5.647652972118998e-06,
"loss": 0.5014,
"step": 103
},
{
"epoch": 3.5964912280701755,
"grad_norm": 0.4430935108406106,
"learning_rate": 5.37327309715453e-06,
"loss": 0.4655,
"step": 104
},
{
"epoch": 3.6315789473684212,
"grad_norm": 0.5635679456028786,
"learning_rate": 5.104264219341793e-06,
"loss": 0.4347,
"step": 105
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.5287787974058867,
"learning_rate": 4.840776425613887e-06,
"loss": 0.4702,
"step": 106
},
{
"epoch": 3.7017543859649122,
"grad_norm": 0.39061619403610864,
"learning_rate": 4.5829567225498696e-06,
"loss": 0.4811,
"step": 107
},
{
"epoch": 3.736842105263158,
"grad_norm": 0.460388428002953,
"learning_rate": 4.330948954356076e-06,
"loss": 0.5509,
"step": 108
},
{
"epoch": 3.7719298245614032,
"grad_norm": 0.4278739474644464,
"learning_rate": 4.084893722611706e-06,
"loss": 0.5717,
"step": 109
},
{
"epoch": 3.807017543859649,
"grad_norm": 0.45200972992342586,
"learning_rate": 3.844928307823655e-06,
"loss": 0.5056,
"step": 110
},
{
"epoch": 3.8421052631578947,
"grad_norm": 0.4253590718408458,
"learning_rate": 3.6111865928341723e-06,
"loss": 0.5168,
"step": 111
},
{
"epoch": 3.8771929824561404,
"grad_norm": 0.32918346887072003,
"learning_rate": 3.3837989881242142e-06,
"loss": 0.4625,
"step": 112
},
{
"epoch": 3.912280701754386,
"grad_norm": 0.3745055783395016,
"learning_rate": 3.162892359054098e-06,
"loss": 0.4761,
"step": 113
},
{
"epoch": 3.9473684210526314,
"grad_norm": 0.4208498044542645,
"learning_rate": 2.948589955082085e-06,
"loss": 0.501,
"step": 114
},
{
"epoch": 3.982456140350877,
"grad_norm": 0.35518617384373624,
"learning_rate": 2.74101134100033e-06,
"loss": 0.518,
"step": 115
},
{
"epoch": 4.0,
"grad_norm": 0.6359258653193457,
"learning_rate": 2.540272330226658e-06,
"loss": 0.4388,
"step": 116
},
{
"epoch": 4.035087719298246,
"grad_norm": 0.9344007231992887,
"learning_rate": 2.3464849201892596e-06,
"loss": 0.4476,
"step": 117
},
{
"epoch": 4.0701754385964914,
"grad_norm": 0.7231074120032394,
"learning_rate": 2.1597572298405e-06,
"loss": 0.4268,
"step": 118
},
{
"epoch": 4.105263157894737,
"grad_norm": 0.4903922122084103,
"learning_rate": 1.980193439334554e-06,
"loss": 0.4815,
"step": 119
},
{
"epoch": 4.140350877192983,
"grad_norm": 0.5916262133643917,
"learning_rate": 1.8078937319026655e-06,
"loss": 0.4353,
"step": 120
},
{
"epoch": 4.175438596491228,
"grad_norm": 0.34841237956938415,
"learning_rate": 1.6429542379583313e-06,
"loss": 0.4176,
"step": 121
},
{
"epoch": 4.2105263157894735,
"grad_norm": 0.4947947773800059,
"learning_rate": 1.4854669814637145e-06,
"loss": 0.4334,
"step": 122
},
{
"epoch": 4.245614035087719,
"grad_norm": 0.4962400757050512,
"learning_rate": 1.3355198285870935e-06,
"loss": 0.4354,
"step": 123
},
{
"epoch": 4.280701754385965,
"grad_norm": 0.6480795706970779,
"learning_rate": 1.1931964386800991e-06,
"loss": 0.4221,
"step": 124
},
{
"epoch": 4.315789473684211,
"grad_norm": 0.5789623920753147,
"learning_rate": 1.0585762176020148e-06,
"loss": 0.4818,
"step": 125
},
{
"epoch": 4.350877192982456,
"grad_norm": 0.515709293090189,
"learning_rate": 9.317342734172213e-07,
"loss": 0.4548,
"step": 126
},
{
"epoch": 4.385964912280702,
"grad_norm": 0.6538071676655299,
"learning_rate": 8.127413744904805e-07,
"loss": 0.4475,
"step": 127
},
{
"epoch": 4.421052631578947,
"grad_norm": 0.41858161288361395,
"learning_rate": 7.016639100034627e-07,
"loss": 0.4535,
"step": 128
},
{
"epoch": 4.456140350877193,
"grad_norm": 0.38988164233302713,
"learning_rate": 5.985638529145115e-07,
"loss": 0.4714,
"step": 129
},
{
"epoch": 4.491228070175438,
"grad_norm": 0.4008059598246354,
"learning_rate": 5.034987253823614e-07,
"loss": 0.5021,
"step": 130
},
{
"epoch": 4.526315789473684,
"grad_norm": 0.3728752330965698,
"learning_rate": 4.165215666730415e-07,
"loss": 0.4285,
"step": 131
},
{
"epoch": 4.56140350877193,
"grad_norm": 0.33158568625741724,
"learning_rate": 3.376809035679218e-07,
"loss": 0.4621,
"step": 132
},
{
"epoch": 4.5964912280701755,
"grad_norm": 0.30943024786404216,
"learning_rate": 2.670207232893684e-07,
"loss": 0.3947,
"step": 133
},
{
"epoch": 4.631578947368421,
"grad_norm": 0.3543355424963944,
"learning_rate": 2.0458044895916516e-07,
"loss": 0.3815,
"step": 134
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.3281884846876981,
"learning_rate": 1.503949176033259e-07,
"loss": 0.437,
"step": 135
},
{
"epoch": 4.701754385964913,
"grad_norm": 0.32730709189350027,
"learning_rate": 1.04494360715639e-07,
"loss": 0.4172,
"step": 136
},
{
"epoch": 4.7368421052631575,
"grad_norm": 0.34332536619087617,
"learning_rate": 6.690438739074767e-08,
"loss": 0.4577,
"step": 137
},
{
"epoch": 4.771929824561403,
"grad_norm": 0.32103263244796076,
"learning_rate": 3.764597003616421e-08,
"loss": 0.4919,
"step": 138
},
{
"epoch": 4.807017543859649,
"grad_norm": 0.3426179471144698,
"learning_rate": 1.6735432671243223e-08,
"loss": 0.4387,
"step": 139
},
{
"epoch": 4.842105263157895,
"grad_norm": 0.3081417797666205,
"learning_rate": 4.184441819588547e-09,
"loss": 0.4253,
"step": 140
}
],
"logging_steps": 1,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 63037458677760.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}