fals3's picture
Upload folder using huggingface_hub
e260101 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9941650458460685,
"eval_steps": 500,
"global_step": 1347,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0022228396776882466,
"grad_norm": 0.31254997849464417,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.8161,
"step": 1
},
{
"epoch": 0.004445679355376493,
"grad_norm": 0.3643212616443634,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.8137,
"step": 2
},
{
"epoch": 0.0066685190330647405,
"grad_norm": 0.3575803339481354,
"learning_rate": 6.666666666666667e-05,
"loss": 1.7748,
"step": 3
},
{
"epoch": 0.008891358710752986,
"grad_norm": 0.31876519322395325,
"learning_rate": 8.888888888888889e-05,
"loss": 1.7532,
"step": 4
},
{
"epoch": 0.011114198388441235,
"grad_norm": 0.3756287693977356,
"learning_rate": 0.0001111111111111111,
"loss": 1.7228,
"step": 5
},
{
"epoch": 0.013337038066129481,
"grad_norm": 0.39089882373809814,
"learning_rate": 0.00013333333333333334,
"loss": 2.0789,
"step": 6
},
{
"epoch": 0.015559877743817728,
"grad_norm": 0.3725750148296356,
"learning_rate": 0.00015555555555555556,
"loss": 1.71,
"step": 7
},
{
"epoch": 0.017782717421505972,
"grad_norm": 0.3968382179737091,
"learning_rate": 0.00017777777777777779,
"loss": 1.8689,
"step": 8
},
{
"epoch": 0.02000555709919422,
"grad_norm": 0.3096616566181183,
"learning_rate": 0.0002,
"loss": 1.9166,
"step": 9
},
{
"epoch": 0.02222839677688247,
"grad_norm": 0.40228065848350525,
"learning_rate": 0.0002222222222222222,
"loss": 1.7125,
"step": 10
},
{
"epoch": 0.024451236454570716,
"grad_norm": 0.3223334848880768,
"learning_rate": 0.0002444444444444445,
"loss": 1.929,
"step": 11
},
{
"epoch": 0.026674076132258962,
"grad_norm": 0.28250545263290405,
"learning_rate": 0.0002666666666666667,
"loss": 1.6014,
"step": 12
},
{
"epoch": 0.02889691580994721,
"grad_norm": 0.3532133400440216,
"learning_rate": 0.0002888888888888889,
"loss": 1.8351,
"step": 13
},
{
"epoch": 0.031119755487635455,
"grad_norm": 0.28702786564826965,
"learning_rate": 0.0003111111111111111,
"loss": 1.7652,
"step": 14
},
{
"epoch": 0.0333425951653237,
"grad_norm": 0.30105912685394287,
"learning_rate": 0.0003333333333333333,
"loss": 1.6464,
"step": 15
},
{
"epoch": 0.035565434843011945,
"grad_norm": 0.2549647092819214,
"learning_rate": 0.00035555555555555557,
"loss": 1.7022,
"step": 16
},
{
"epoch": 0.037788274520700195,
"grad_norm": 0.280660480260849,
"learning_rate": 0.00037777777777777777,
"loss": 1.7242,
"step": 17
},
{
"epoch": 0.04001111419838844,
"grad_norm": 0.3075920641422272,
"learning_rate": 0.0004,
"loss": 1.5918,
"step": 18
},
{
"epoch": 0.04223395387607669,
"grad_norm": 0.2627553939819336,
"learning_rate": 0.00042222222222222227,
"loss": 1.6612,
"step": 19
},
{
"epoch": 0.04445679355376494,
"grad_norm": 0.24408374726772308,
"learning_rate": 0.0004444444444444444,
"loss": 1.7682,
"step": 20
},
{
"epoch": 0.04667963323145318,
"grad_norm": 0.27137765288352966,
"learning_rate": 0.00046666666666666666,
"loss": 1.6334,
"step": 21
},
{
"epoch": 0.04890247290914143,
"grad_norm": 0.25347405672073364,
"learning_rate": 0.000488888888888889,
"loss": 1.8713,
"step": 22
},
{
"epoch": 0.051125312586829674,
"grad_norm": 0.21500591933727264,
"learning_rate": 0.0005111111111111112,
"loss": 1.637,
"step": 23
},
{
"epoch": 0.053348152264517924,
"grad_norm": 0.23220418393611908,
"learning_rate": 0.0005333333333333334,
"loss": 1.6928,
"step": 24
},
{
"epoch": 0.05557099194220617,
"grad_norm": 0.21109041571617126,
"learning_rate": 0.0005555555555555556,
"loss": 1.5009,
"step": 25
},
{
"epoch": 0.05779383161989442,
"grad_norm": 0.19518163800239563,
"learning_rate": 0.0005777777777777778,
"loss": 1.5758,
"step": 26
},
{
"epoch": 0.06001667129758266,
"grad_norm": 0.19399304687976837,
"learning_rate": 0.0006000000000000001,
"loss": 1.6354,
"step": 27
},
{
"epoch": 0.06223951097527091,
"grad_norm": 0.2238541692495346,
"learning_rate": 0.0006222222222222223,
"loss": 1.4809,
"step": 28
},
{
"epoch": 0.06446235065295916,
"grad_norm": 0.17391888797283173,
"learning_rate": 0.0006444444444444444,
"loss": 1.3821,
"step": 29
},
{
"epoch": 0.0666851903306474,
"grad_norm": 0.17666837573051453,
"learning_rate": 0.0006666666666666666,
"loss": 1.3057,
"step": 30
},
{
"epoch": 0.06890803000833565,
"grad_norm": 0.19777463376522064,
"learning_rate": 0.000688888888888889,
"loss": 1.617,
"step": 31
},
{
"epoch": 0.07113086968602389,
"grad_norm": 0.16105899214744568,
"learning_rate": 0.0007111111111111111,
"loss": 1.5269,
"step": 32
},
{
"epoch": 0.07335370936371215,
"grad_norm": 0.17360083758831024,
"learning_rate": 0.0007333333333333333,
"loss": 1.4468,
"step": 33
},
{
"epoch": 0.07557654904140039,
"grad_norm": 0.1594723016023636,
"learning_rate": 0.0007555555555555555,
"loss": 1.4326,
"step": 34
},
{
"epoch": 0.07779938871908863,
"grad_norm": 0.1737031638622284,
"learning_rate": 0.0007777777777777777,
"loss": 1.405,
"step": 35
},
{
"epoch": 0.08002222839677688,
"grad_norm": 0.1713709533214569,
"learning_rate": 0.0008,
"loss": 1.1766,
"step": 36
},
{
"epoch": 0.08224506807446513,
"grad_norm": 0.1633702665567398,
"learning_rate": 0.0008222222222222222,
"loss": 1.3118,
"step": 37
},
{
"epoch": 0.08446790775215338,
"grad_norm": 0.17267894744873047,
"learning_rate": 0.0008444444444444445,
"loss": 1.3091,
"step": 38
},
{
"epoch": 0.08669074742984162,
"grad_norm": 0.14479492604732513,
"learning_rate": 0.0008666666666666666,
"loss": 1.378,
"step": 39
},
{
"epoch": 0.08891358710752988,
"grad_norm": 0.13730104267597198,
"learning_rate": 0.0008888888888888888,
"loss": 1.244,
"step": 40
},
{
"epoch": 0.09113642678521812,
"grad_norm": 0.17174997925758362,
"learning_rate": 0.0009111111111111111,
"loss": 1.3365,
"step": 41
},
{
"epoch": 0.09335926646290636,
"grad_norm": 0.13429133594036102,
"learning_rate": 0.0009333333333333333,
"loss": 1.0797,
"step": 42
},
{
"epoch": 0.0955821061405946,
"grad_norm": 0.1332227885723114,
"learning_rate": 0.0009555555555555556,
"loss": 1.3447,
"step": 43
},
{
"epoch": 0.09780494581828286,
"grad_norm": 0.10597763955593109,
"learning_rate": 0.000977777777777778,
"loss": 1.2595,
"step": 44
},
{
"epoch": 0.1000277854959711,
"grad_norm": 0.14317654073238373,
"learning_rate": 0.001,
"loss": 1.2789,
"step": 45
},
{
"epoch": 0.10225062517365935,
"grad_norm": 0.13641610741615295,
"learning_rate": 0.0010222222222222223,
"loss": 1.3444,
"step": 46
},
{
"epoch": 0.10447346485134759,
"grad_norm": 0.125591441988945,
"learning_rate": 0.0010444444444444444,
"loss": 1.1095,
"step": 47
},
{
"epoch": 0.10669630452903585,
"grad_norm": 0.14460553228855133,
"learning_rate": 0.0010666666666666667,
"loss": 1.2323,
"step": 48
},
{
"epoch": 0.10891914420672409,
"grad_norm": 0.12514247000217438,
"learning_rate": 0.001088888888888889,
"loss": 1.1664,
"step": 49
},
{
"epoch": 0.11114198388441233,
"grad_norm": 0.1322765350341797,
"learning_rate": 0.0011111111111111111,
"loss": 1.3675,
"step": 50
},
{
"epoch": 0.11336482356210058,
"grad_norm": 0.14224638044834137,
"learning_rate": 0.0011333333333333334,
"loss": 1.24,
"step": 51
},
{
"epoch": 0.11558766323978883,
"grad_norm": 0.11452258378267288,
"learning_rate": 0.0011555555555555555,
"loss": 1.3013,
"step": 52
},
{
"epoch": 0.11781050291747708,
"grad_norm": 0.1195889487862587,
"learning_rate": 0.0011777777777777778,
"loss": 1.1503,
"step": 53
},
{
"epoch": 0.12003334259516532,
"grad_norm": 0.1277323216199875,
"learning_rate": 0.0012000000000000001,
"loss": 1.1958,
"step": 54
},
{
"epoch": 0.12225618227285356,
"grad_norm": 0.09402500092983246,
"learning_rate": 0.0012222222222222222,
"loss": 0.9762,
"step": 55
},
{
"epoch": 0.12447902195054182,
"grad_norm": 0.10695713013410568,
"learning_rate": 0.0012444444444444445,
"loss": 1.1013,
"step": 56
},
{
"epoch": 0.12670186162823005,
"grad_norm": 0.10557325184345245,
"learning_rate": 0.0012666666666666666,
"loss": 1.0793,
"step": 57
},
{
"epoch": 0.12892470130591832,
"grad_norm": 0.12055651843547821,
"learning_rate": 0.001288888888888889,
"loss": 1.2221,
"step": 58
},
{
"epoch": 0.13114754098360656,
"grad_norm": 0.08459929376840591,
"learning_rate": 0.0013111111111111112,
"loss": 1.0976,
"step": 59
},
{
"epoch": 0.1333703806612948,
"grad_norm": 0.11813811212778091,
"learning_rate": 0.0013333333333333333,
"loss": 1.1168,
"step": 60
},
{
"epoch": 0.13559322033898305,
"grad_norm": 0.1075403243303299,
"learning_rate": 0.0013555555555555556,
"loss": 0.9292,
"step": 61
},
{
"epoch": 0.1378160600166713,
"grad_norm": 0.07610759884119034,
"learning_rate": 0.001377777777777778,
"loss": 1.0947,
"step": 62
},
{
"epoch": 0.14003889969435954,
"grad_norm": 0.09791035205125809,
"learning_rate": 0.0014,
"loss": 1.1571,
"step": 63
},
{
"epoch": 0.14226173937204778,
"grad_norm": 0.09495139867067337,
"learning_rate": 0.0014222222222222223,
"loss": 0.8295,
"step": 64
},
{
"epoch": 0.14448457904973605,
"grad_norm": 0.07287945598363876,
"learning_rate": 0.0014444444444444444,
"loss": 0.9453,
"step": 65
},
{
"epoch": 0.1467074187274243,
"grad_norm": 0.08833761513233185,
"learning_rate": 0.0014666666666666667,
"loss": 1.1003,
"step": 66
},
{
"epoch": 0.14893025840511254,
"grad_norm": 0.09454136341810226,
"learning_rate": 0.001488888888888889,
"loss": 1.1327,
"step": 67
},
{
"epoch": 0.15115309808280078,
"grad_norm": 0.09609611332416534,
"learning_rate": 0.001511111111111111,
"loss": 0.9176,
"step": 68
},
{
"epoch": 0.15337593776048902,
"grad_norm": 0.10027166455984116,
"learning_rate": 0.0015333333333333332,
"loss": 1.0967,
"step": 69
},
{
"epoch": 0.15559877743817727,
"grad_norm": 0.08003437519073486,
"learning_rate": 0.0015555555555555555,
"loss": 0.9908,
"step": 70
},
{
"epoch": 0.1578216171158655,
"grad_norm": 0.06798948347568512,
"learning_rate": 0.0015777777777777778,
"loss": 0.8607,
"step": 71
},
{
"epoch": 0.16004445679355375,
"grad_norm": 0.07019151747226715,
"learning_rate": 0.0016,
"loss": 0.9327,
"step": 72
},
{
"epoch": 0.16226729647124202,
"grad_norm": 0.09492333978414536,
"learning_rate": 0.0016222222222222222,
"loss": 0.8007,
"step": 73
},
{
"epoch": 0.16449013614893027,
"grad_norm": 0.06816477328538895,
"learning_rate": 0.0016444444444444445,
"loss": 0.8318,
"step": 74
},
{
"epoch": 0.1667129758266185,
"grad_norm": 0.05683727562427521,
"learning_rate": 0.0016666666666666668,
"loss": 0.816,
"step": 75
},
{
"epoch": 0.16893581550430675,
"grad_norm": 0.06219564750790596,
"learning_rate": 0.001688888888888889,
"loss": 0.8008,
"step": 76
},
{
"epoch": 0.171158655181995,
"grad_norm": 0.06426230072975159,
"learning_rate": 0.0017111111111111114,
"loss": 1.0033,
"step": 77
},
{
"epoch": 0.17338149485968324,
"grad_norm": 0.057185135781764984,
"learning_rate": 0.0017333333333333333,
"loss": 0.9249,
"step": 78
},
{
"epoch": 0.17560433453737148,
"grad_norm": 0.0551241934299469,
"learning_rate": 0.0017555555555555556,
"loss": 0.891,
"step": 79
},
{
"epoch": 0.17782717421505975,
"grad_norm": 0.05191711336374283,
"learning_rate": 0.0017777777777777776,
"loss": 0.9672,
"step": 80
},
{
"epoch": 0.180050013892748,
"grad_norm": 0.06342898309230804,
"learning_rate": 0.0018,
"loss": 0.9372,
"step": 81
},
{
"epoch": 0.18227285357043624,
"grad_norm": 0.05517818406224251,
"learning_rate": 0.0018222222222222223,
"loss": 0.8477,
"step": 82
},
{
"epoch": 0.18449569324812448,
"grad_norm": 0.058783963322639465,
"learning_rate": 0.0018444444444444446,
"loss": 1.0301,
"step": 83
},
{
"epoch": 0.18671853292581272,
"grad_norm": 0.05436277762055397,
"learning_rate": 0.0018666666666666666,
"loss": 0.8878,
"step": 84
},
{
"epoch": 0.18894137260350097,
"grad_norm": 0.041651614010334015,
"learning_rate": 0.001888888888888889,
"loss": 0.9357,
"step": 85
},
{
"epoch": 0.1911642122811892,
"grad_norm": 0.050092216581106186,
"learning_rate": 0.0019111111111111113,
"loss": 0.9938,
"step": 86
},
{
"epoch": 0.19338705195887745,
"grad_norm": 0.053640950471162796,
"learning_rate": 0.0019333333333333336,
"loss": 1.0159,
"step": 87
},
{
"epoch": 0.19560989163656572,
"grad_norm": 0.05056702345609665,
"learning_rate": 0.001955555555555556,
"loss": 0.9827,
"step": 88
},
{
"epoch": 0.19783273131425397,
"grad_norm": 0.04525972530245781,
"learning_rate": 0.0019777777777777775,
"loss": 0.7511,
"step": 89
},
{
"epoch": 0.2000555709919422,
"grad_norm": 0.04984632506966591,
"learning_rate": 0.002,
"loss": 0.8568,
"step": 90
},
{
"epoch": 0.20227841066963045,
"grad_norm": 0.04132724925875664,
"learning_rate": 0.002022222222222222,
"loss": 0.9806,
"step": 91
},
{
"epoch": 0.2045012503473187,
"grad_norm": 0.039617739617824554,
"learning_rate": 0.0020444444444444447,
"loss": 0.8346,
"step": 92
},
{
"epoch": 0.20672409002500694,
"grad_norm": 0.03755780681967735,
"learning_rate": 0.0020666666666666667,
"loss": 0.813,
"step": 93
},
{
"epoch": 0.20894692970269518,
"grad_norm": 0.038126129657030106,
"learning_rate": 0.002088888888888889,
"loss": 0.9437,
"step": 94
},
{
"epoch": 0.21116976938038343,
"grad_norm": 0.03865043818950653,
"learning_rate": 0.0021111111111111113,
"loss": 0.7994,
"step": 95
},
{
"epoch": 0.2133926090580717,
"grad_norm": 0.038602616637945175,
"learning_rate": 0.0021333333333333334,
"loss": 0.9596,
"step": 96
},
{
"epoch": 0.21561544873575994,
"grad_norm": 0.03432125225663185,
"learning_rate": 0.0021555555555555555,
"loss": 0.7337,
"step": 97
},
{
"epoch": 0.21783828841344818,
"grad_norm": 0.04551972821354866,
"learning_rate": 0.002177777777777778,
"loss": 0.9933,
"step": 98
},
{
"epoch": 0.22006112809113643,
"grad_norm": 0.03250821307301521,
"learning_rate": 0.0021999999999999997,
"loss": 0.8251,
"step": 99
},
{
"epoch": 0.22228396776882467,
"grad_norm": 0.03453311696648598,
"learning_rate": 0.0022222222222222222,
"loss": 0.8074,
"step": 100
},
{
"epoch": 0.2245068074465129,
"grad_norm": 0.041663624346256256,
"learning_rate": 0.0022444444444444443,
"loss": 0.7755,
"step": 101
},
{
"epoch": 0.22672964712420116,
"grad_norm": 0.035251155495643616,
"learning_rate": 0.002266666666666667,
"loss": 0.9015,
"step": 102
},
{
"epoch": 0.22895248680188943,
"grad_norm": 0.026816092431545258,
"learning_rate": 0.002288888888888889,
"loss": 0.6089,
"step": 103
},
{
"epoch": 0.23117532647957767,
"grad_norm": 0.025371363386511803,
"learning_rate": 0.002311111111111111,
"loss": 0.6579,
"step": 104
},
{
"epoch": 0.2333981661572659,
"grad_norm": 0.030030515044927597,
"learning_rate": 0.0023333333333333335,
"loss": 0.7587,
"step": 105
},
{
"epoch": 0.23562100583495416,
"grad_norm": 0.03042268380522728,
"learning_rate": 0.0023555555555555556,
"loss": 0.673,
"step": 106
},
{
"epoch": 0.2378438455126424,
"grad_norm": 0.03374524414539337,
"learning_rate": 0.002377777777777778,
"loss": 0.7638,
"step": 107
},
{
"epoch": 0.24006668519033064,
"grad_norm": 0.05823088809847832,
"learning_rate": 0.0024000000000000002,
"loss": 1.0445,
"step": 108
},
{
"epoch": 0.24228952486801889,
"grad_norm": 0.02552204020321369,
"learning_rate": 0.0024222222222222223,
"loss": 0.758,
"step": 109
},
{
"epoch": 0.24451236454570713,
"grad_norm": 0.030858848243951797,
"learning_rate": 0.0024444444444444444,
"loss": 0.8163,
"step": 110
},
{
"epoch": 0.2467352042233954,
"grad_norm": 0.030134430155158043,
"learning_rate": 0.0024666666666666665,
"loss": 0.8197,
"step": 111
},
{
"epoch": 0.24895804390108364,
"grad_norm": 0.03654756769537926,
"learning_rate": 0.002488888888888889,
"loss": 0.8028,
"step": 112
},
{
"epoch": 0.2511808835787719,
"grad_norm": 0.04132033884525299,
"learning_rate": 0.002511111111111111,
"loss": 0.8087,
"step": 113
},
{
"epoch": 0.2534037232564601,
"grad_norm": 0.02729404903948307,
"learning_rate": 0.002533333333333333,
"loss": 0.7327,
"step": 114
},
{
"epoch": 0.25562656293414837,
"grad_norm": 0.037324242293834686,
"learning_rate": 0.0025555555555555557,
"loss": 0.8203,
"step": 115
},
{
"epoch": 0.25784940261183664,
"grad_norm": 0.045340005308389664,
"learning_rate": 0.002577777777777778,
"loss": 1.1208,
"step": 116
},
{
"epoch": 0.26007224228952486,
"grad_norm": 0.03202158212661743,
"learning_rate": 0.0026000000000000003,
"loss": 0.7704,
"step": 117
},
{
"epoch": 0.26229508196721313,
"grad_norm": 0.031156064942479134,
"learning_rate": 0.0026222222222222224,
"loss": 0.6212,
"step": 118
},
{
"epoch": 0.26451792164490134,
"grad_norm": 0.0396842360496521,
"learning_rate": 0.0026444444444444445,
"loss": 0.8821,
"step": 119
},
{
"epoch": 0.2667407613225896,
"grad_norm": 0.024765778332948685,
"learning_rate": 0.0026666666666666666,
"loss": 0.7624,
"step": 120
},
{
"epoch": 0.26896360100027783,
"grad_norm": 0.0588197335600853,
"learning_rate": 0.0026888888888888887,
"loss": 0.7491,
"step": 121
},
{
"epoch": 0.2711864406779661,
"grad_norm": 0.04317701607942581,
"learning_rate": 0.002711111111111111,
"loss": 0.9822,
"step": 122
},
{
"epoch": 0.27340928035565437,
"grad_norm": 0.023474140092730522,
"learning_rate": 0.0027333333333333333,
"loss": 0.6543,
"step": 123
},
{
"epoch": 0.2756321200333426,
"grad_norm": 0.03395792841911316,
"learning_rate": 0.002755555555555556,
"loss": 0.9753,
"step": 124
},
{
"epoch": 0.27785495971103086,
"grad_norm": 0.03774833306670189,
"learning_rate": 0.002777777777777778,
"loss": 0.7568,
"step": 125
},
{
"epoch": 0.2800777993887191,
"grad_norm": 0.019662071019411087,
"learning_rate": 0.0028,
"loss": 0.683,
"step": 126
},
{
"epoch": 0.28230063906640734,
"grad_norm": 0.02699323743581772,
"learning_rate": 0.0028222222222222225,
"loss": 0.7626,
"step": 127
},
{
"epoch": 0.28452347874409556,
"grad_norm": 0.02954452484846115,
"learning_rate": 0.0028444444444444446,
"loss": 0.8323,
"step": 128
},
{
"epoch": 0.28674631842178383,
"grad_norm": 0.021271225064992905,
"learning_rate": 0.0028666666666666667,
"loss": 0.7864,
"step": 129
},
{
"epoch": 0.2889691580994721,
"grad_norm": 0.02944800816476345,
"learning_rate": 0.0028888888888888888,
"loss": 0.9257,
"step": 130
},
{
"epoch": 0.2911919977771603,
"grad_norm": 0.029844852164387703,
"learning_rate": 0.002911111111111111,
"loss": 0.7552,
"step": 131
},
{
"epoch": 0.2934148374548486,
"grad_norm": 0.023612311109900475,
"learning_rate": 0.0029333333333333334,
"loss": 0.6756,
"step": 132
},
{
"epoch": 0.2956376771325368,
"grad_norm": 0.017975427210330963,
"learning_rate": 0.0029555555555555555,
"loss": 0.7141,
"step": 133
},
{
"epoch": 0.2978605168102251,
"grad_norm": 0.038071002811193466,
"learning_rate": 0.002977777777777778,
"loss": 0.9247,
"step": 134
},
{
"epoch": 0.3000833564879133,
"grad_norm": 0.027641968801617622,
"learning_rate": 0.003,
"loss": 0.6383,
"step": 135
},
{
"epoch": 0.30230619616560156,
"grad_norm": 0.02428288944065571,
"learning_rate": 0.0029975247524752476,
"loss": 0.803,
"step": 136
},
{
"epoch": 0.30452903584328983,
"grad_norm": 0.055082373321056366,
"learning_rate": 0.002995049504950495,
"loss": 0.6262,
"step": 137
},
{
"epoch": 0.30675187552097805,
"grad_norm": 0.019244015216827393,
"learning_rate": 0.0029925742574257426,
"loss": 0.7297,
"step": 138
},
{
"epoch": 0.3089747151986663,
"grad_norm": 0.0185896847397089,
"learning_rate": 0.00299009900990099,
"loss": 0.775,
"step": 139
},
{
"epoch": 0.31119755487635453,
"grad_norm": 0.04311481863260269,
"learning_rate": 0.0029876237623762377,
"loss": 0.9192,
"step": 140
},
{
"epoch": 0.3134203945540428,
"grad_norm": 0.02153279259800911,
"learning_rate": 0.002985148514851485,
"loss": 0.7181,
"step": 141
},
{
"epoch": 0.315643234231731,
"grad_norm": 0.030037125572562218,
"learning_rate": 0.0029826732673267327,
"loss": 0.7947,
"step": 142
},
{
"epoch": 0.3178660739094193,
"grad_norm": 0.03192693367600441,
"learning_rate": 0.0029801980198019802,
"loss": 1.0311,
"step": 143
},
{
"epoch": 0.3200889135871075,
"grad_norm": 0.021701497957110405,
"learning_rate": 0.0029777227722772278,
"loss": 0.6511,
"step": 144
},
{
"epoch": 0.3223117532647958,
"grad_norm": 0.025493718683719635,
"learning_rate": 0.0029752475247524753,
"loss": 0.6231,
"step": 145
},
{
"epoch": 0.32453459294248405,
"grad_norm": 0.02308356948196888,
"learning_rate": 0.002972772277227723,
"loss": 0.7476,
"step": 146
},
{
"epoch": 0.32675743262017226,
"grad_norm": 0.01986018754541874,
"learning_rate": 0.0029702970297029703,
"loss": 0.7325,
"step": 147
},
{
"epoch": 0.32898027229786053,
"grad_norm": 0.017985232174396515,
"learning_rate": 0.002967821782178218,
"loss": 0.7233,
"step": 148
},
{
"epoch": 0.33120311197554875,
"grad_norm": 0.01952328160405159,
"learning_rate": 0.0029653465346534654,
"loss": 0.6821,
"step": 149
},
{
"epoch": 0.333425951653237,
"grad_norm": 0.020746653899550438,
"learning_rate": 0.002962871287128713,
"loss": 0.8254,
"step": 150
},
{
"epoch": 0.33564879133092523,
"grad_norm": 0.018172353506088257,
"learning_rate": 0.0029603960396039604,
"loss": 0.6146,
"step": 151
},
{
"epoch": 0.3378716310086135,
"grad_norm": 0.01994176022708416,
"learning_rate": 0.002957920792079208,
"loss": 0.8184,
"step": 152
},
{
"epoch": 0.3400944706863018,
"grad_norm": 0.026261357590556145,
"learning_rate": 0.0029554455445544555,
"loss": 0.7213,
"step": 153
},
{
"epoch": 0.34231731036399,
"grad_norm": 0.019761715084314346,
"learning_rate": 0.002952970297029703,
"loss": 0.7215,
"step": 154
},
{
"epoch": 0.34454015004167826,
"grad_norm": 0.022664040327072144,
"learning_rate": 0.0029504950495049505,
"loss": 0.8718,
"step": 155
},
{
"epoch": 0.3467629897193665,
"grad_norm": 0.027874581515789032,
"learning_rate": 0.002948019801980198,
"loss": 0.8164,
"step": 156
},
{
"epoch": 0.34898582939705475,
"grad_norm": 0.02273603528738022,
"learning_rate": 0.0029455445544554456,
"loss": 0.7851,
"step": 157
},
{
"epoch": 0.35120866907474296,
"grad_norm": 0.0187845416367054,
"learning_rate": 0.002943069306930693,
"loss": 0.7604,
"step": 158
},
{
"epoch": 0.35343150875243123,
"grad_norm": 0.019273824989795685,
"learning_rate": 0.0029405940594059406,
"loss": 0.5622,
"step": 159
},
{
"epoch": 0.3556543484301195,
"grad_norm": 0.02631755918264389,
"learning_rate": 0.002938118811881188,
"loss": 0.8661,
"step": 160
},
{
"epoch": 0.3578771881078077,
"grad_norm": 0.022346017882227898,
"learning_rate": 0.0029356435643564356,
"loss": 0.6781,
"step": 161
},
{
"epoch": 0.360100027785496,
"grad_norm": 0.021816574037075043,
"learning_rate": 0.002933168316831683,
"loss": 0.9022,
"step": 162
},
{
"epoch": 0.3623228674631842,
"grad_norm": 0.021531615406274796,
"learning_rate": 0.002930693069306931,
"loss": 0.6871,
"step": 163
},
{
"epoch": 0.3645457071408725,
"grad_norm": 0.022265218198299408,
"learning_rate": 0.002928217821782178,
"loss": 0.9247,
"step": 164
},
{
"epoch": 0.3667685468185607,
"grad_norm": 0.01669679768383503,
"learning_rate": 0.0029257425742574257,
"loss": 0.6005,
"step": 165
},
{
"epoch": 0.36899138649624896,
"grad_norm": 0.020891698077321053,
"learning_rate": 0.0029232673267326737,
"loss": 0.7063,
"step": 166
},
{
"epoch": 0.3712142261739372,
"grad_norm": 0.01642497256398201,
"learning_rate": 0.002920792079207921,
"loss": 0.7598,
"step": 167
},
{
"epoch": 0.37343706585162545,
"grad_norm": 0.022815469652414322,
"learning_rate": 0.0029183168316831683,
"loss": 0.7631,
"step": 168
},
{
"epoch": 0.3756599055293137,
"grad_norm": 0.02138301357626915,
"learning_rate": 0.0029158415841584163,
"loss": 0.7098,
"step": 169
},
{
"epoch": 0.37788274520700194,
"grad_norm": 0.022588403895497322,
"learning_rate": 0.0029133663366336634,
"loss": 0.8482,
"step": 170
},
{
"epoch": 0.3801055848846902,
"grad_norm": 0.017089666798710823,
"learning_rate": 0.002910891089108911,
"loss": 0.6223,
"step": 171
},
{
"epoch": 0.3823284245623784,
"grad_norm": 0.014961940236389637,
"learning_rate": 0.002908415841584159,
"loss": 0.6889,
"step": 172
},
{
"epoch": 0.3845512642400667,
"grad_norm": 0.022101087495684624,
"learning_rate": 0.002905940594059406,
"loss": 0.7824,
"step": 173
},
{
"epoch": 0.3867741039177549,
"grad_norm": 0.020935313776135445,
"learning_rate": 0.0029034653465346534,
"loss": 0.7214,
"step": 174
},
{
"epoch": 0.3889969435954432,
"grad_norm": 0.027509761974215508,
"learning_rate": 0.0029009900990099014,
"loss": 0.7776,
"step": 175
},
{
"epoch": 0.39121978327313145,
"grad_norm": 0.03217627480626106,
"learning_rate": 0.0028985148514851485,
"loss": 0.7934,
"step": 176
},
{
"epoch": 0.39344262295081966,
"grad_norm": 0.01634255051612854,
"learning_rate": 0.002896039603960396,
"loss": 0.6374,
"step": 177
},
{
"epoch": 0.39566546262850794,
"grad_norm": 0.016692742705345154,
"learning_rate": 0.002893564356435644,
"loss": 0.6864,
"step": 178
},
{
"epoch": 0.39788830230619615,
"grad_norm": 0.019375508651137352,
"learning_rate": 0.002891089108910891,
"loss": 0.7127,
"step": 179
},
{
"epoch": 0.4001111419838844,
"grad_norm": 0.02516164816915989,
"learning_rate": 0.0028886138613861386,
"loss": 0.6287,
"step": 180
},
{
"epoch": 0.40233398166157264,
"grad_norm": 0.024371830746531487,
"learning_rate": 0.0028861386138613865,
"loss": 0.6914,
"step": 181
},
{
"epoch": 0.4045568213392609,
"grad_norm": 0.023201432079076767,
"learning_rate": 0.0028836633663366336,
"loss": 0.8256,
"step": 182
},
{
"epoch": 0.4067796610169492,
"grad_norm": 0.03344796970486641,
"learning_rate": 0.002881188118811881,
"loss": 0.7973,
"step": 183
},
{
"epoch": 0.4090025006946374,
"grad_norm": 0.019620005041360855,
"learning_rate": 0.002878712871287129,
"loss": 0.7596,
"step": 184
},
{
"epoch": 0.41122534037232567,
"grad_norm": 0.017026478424668312,
"learning_rate": 0.002876237623762376,
"loss": 0.7195,
"step": 185
},
{
"epoch": 0.4134481800500139,
"grad_norm": 0.01848086714744568,
"learning_rate": 0.0028737623762376237,
"loss": 0.887,
"step": 186
},
{
"epoch": 0.41567101972770215,
"grad_norm": 0.02092432789504528,
"learning_rate": 0.0028712871287128717,
"loss": 0.7142,
"step": 187
},
{
"epoch": 0.41789385940539037,
"grad_norm": 0.060593266040086746,
"learning_rate": 0.0028688118811881188,
"loss": 0.6788,
"step": 188
},
{
"epoch": 0.42011669908307864,
"grad_norm": 0.018896559253335,
"learning_rate": 0.0028663366336633663,
"loss": 0.5818,
"step": 189
},
{
"epoch": 0.42233953876076685,
"grad_norm": 0.015938229858875275,
"learning_rate": 0.0028638613861386142,
"loss": 0.6667,
"step": 190
},
{
"epoch": 0.4245623784384551,
"grad_norm": 0.04060890153050423,
"learning_rate": 0.0028613861386138613,
"loss": 0.7224,
"step": 191
},
{
"epoch": 0.4267852181161434,
"grad_norm": 0.019955582916736603,
"learning_rate": 0.002858910891089109,
"loss": 0.7129,
"step": 192
},
{
"epoch": 0.4290080577938316,
"grad_norm": 0.02846820093691349,
"learning_rate": 0.002856435643564357,
"loss": 0.8338,
"step": 193
},
{
"epoch": 0.4312308974715199,
"grad_norm": 0.024327551946043968,
"learning_rate": 0.002853960396039604,
"loss": 0.7844,
"step": 194
},
{
"epoch": 0.4334537371492081,
"grad_norm": 0.022156810387969017,
"learning_rate": 0.0028514851485148514,
"loss": 0.7783,
"step": 195
},
{
"epoch": 0.43567657682689637,
"grad_norm": 0.020973121747374535,
"learning_rate": 0.0028490099009900994,
"loss": 0.7924,
"step": 196
},
{
"epoch": 0.4378994165045846,
"grad_norm": 0.02271064557135105,
"learning_rate": 0.0028465346534653465,
"loss": 0.8475,
"step": 197
},
{
"epoch": 0.44012225618227285,
"grad_norm": 0.017409365624189377,
"learning_rate": 0.002844059405940594,
"loss": 0.6592,
"step": 198
},
{
"epoch": 0.4423450958599611,
"grad_norm": 0.016952740028500557,
"learning_rate": 0.002841584158415842,
"loss": 0.7419,
"step": 199
},
{
"epoch": 0.44456793553764934,
"grad_norm": 0.022074569016695023,
"learning_rate": 0.002839108910891089,
"loss": 0.9592,
"step": 200
},
{
"epoch": 0.4467907752153376,
"grad_norm": 0.019633406773209572,
"learning_rate": 0.0028366336633663366,
"loss": 0.846,
"step": 201
},
{
"epoch": 0.4490136148930258,
"grad_norm": 0.02506411075592041,
"learning_rate": 0.0028341584158415845,
"loss": 0.8544,
"step": 202
},
{
"epoch": 0.4512364545707141,
"grad_norm": 0.011820493265986443,
"learning_rate": 0.0028316831683168316,
"loss": 0.6832,
"step": 203
},
{
"epoch": 0.4534592942484023,
"grad_norm": 0.013546622358262539,
"learning_rate": 0.002829207920792079,
"loss": 0.7245,
"step": 204
},
{
"epoch": 0.4556821339260906,
"grad_norm": 0.016001511365175247,
"learning_rate": 0.002826732673267327,
"loss": 0.6386,
"step": 205
},
{
"epoch": 0.45790497360377885,
"grad_norm": 0.016753453761339188,
"learning_rate": 0.002824257425742574,
"loss": 0.681,
"step": 206
},
{
"epoch": 0.46012781328146707,
"grad_norm": 0.021365856751799583,
"learning_rate": 0.0028217821782178217,
"loss": 0.7229,
"step": 207
},
{
"epoch": 0.46235065295915534,
"grad_norm": 0.016552921384572983,
"learning_rate": 0.0028193069306930696,
"loss": 0.741,
"step": 208
},
{
"epoch": 0.46457349263684355,
"grad_norm": 0.0160622950643301,
"learning_rate": 0.0028168316831683167,
"loss": 0.8179,
"step": 209
},
{
"epoch": 0.4667963323145318,
"grad_norm": 0.01873830333352089,
"learning_rate": 0.0028143564356435643,
"loss": 0.7208,
"step": 210
},
{
"epoch": 0.46901917199222004,
"grad_norm": 0.01617845520377159,
"learning_rate": 0.002811881188118812,
"loss": 0.7291,
"step": 211
},
{
"epoch": 0.4712420116699083,
"grad_norm": 0.016890326514840126,
"learning_rate": 0.0028094059405940593,
"loss": 0.7388,
"step": 212
},
{
"epoch": 0.4734648513475965,
"grad_norm": 0.01602407731115818,
"learning_rate": 0.0028069306930693073,
"loss": 0.6657,
"step": 213
},
{
"epoch": 0.4756876910252848,
"grad_norm": 0.02520548366010189,
"learning_rate": 0.0028044554455445548,
"loss": 0.7801,
"step": 214
},
{
"epoch": 0.47791053070297307,
"grad_norm": 0.017677821218967438,
"learning_rate": 0.002801980198019802,
"loss": 0.7508,
"step": 215
},
{
"epoch": 0.4801333703806613,
"grad_norm": 0.01971537061035633,
"learning_rate": 0.00279950495049505,
"loss": 0.8671,
"step": 216
},
{
"epoch": 0.48235621005834955,
"grad_norm": 0.020906252786517143,
"learning_rate": 0.0027970297029702973,
"loss": 0.8528,
"step": 217
},
{
"epoch": 0.48457904973603777,
"grad_norm": 0.020072396844625473,
"learning_rate": 0.0027945544554455444,
"loss": 0.6818,
"step": 218
},
{
"epoch": 0.48680188941372604,
"grad_norm": 0.015342594124376774,
"learning_rate": 0.0027920792079207924,
"loss": 0.7102,
"step": 219
},
{
"epoch": 0.48902472909141426,
"grad_norm": 0.018210800364613533,
"learning_rate": 0.00278960396039604,
"loss": 0.6902,
"step": 220
},
{
"epoch": 0.4912475687691025,
"grad_norm": 0.02456044964492321,
"learning_rate": 0.002787128712871287,
"loss": 0.7501,
"step": 221
},
{
"epoch": 0.4934704084467908,
"grad_norm": 0.010794063098728657,
"learning_rate": 0.002784653465346535,
"loss": 0.6494,
"step": 222
},
{
"epoch": 0.495693248124479,
"grad_norm": 0.014852452091872692,
"learning_rate": 0.0027821782178217825,
"loss": 0.6509,
"step": 223
},
{
"epoch": 0.4979160878021673,
"grad_norm": 0.023020023480057716,
"learning_rate": 0.0027797029702970296,
"loss": 0.7606,
"step": 224
},
{
"epoch": 0.5001389274798556,
"grad_norm": 0.018114762380719185,
"learning_rate": 0.0027772277227722775,
"loss": 0.7541,
"step": 225
},
{
"epoch": 0.5023617671575438,
"grad_norm": 0.018622858449816704,
"learning_rate": 0.002774752475247525,
"loss": 0.8621,
"step": 226
},
{
"epoch": 0.504584606835232,
"grad_norm": 0.018405485898256302,
"learning_rate": 0.002772277227722772,
"loss": 0.6815,
"step": 227
},
{
"epoch": 0.5068074465129202,
"grad_norm": 0.019367920234799385,
"learning_rate": 0.00276980198019802,
"loss": 0.8323,
"step": 228
},
{
"epoch": 0.5090302861906085,
"grad_norm": 0.016442907974123955,
"learning_rate": 0.0027673267326732676,
"loss": 0.6732,
"step": 229
},
{
"epoch": 0.5112531258682967,
"grad_norm": 0.02419120818376541,
"learning_rate": 0.0027648514851485147,
"loss": 0.7696,
"step": 230
},
{
"epoch": 0.513475965545985,
"grad_norm": 0.023511352017521858,
"learning_rate": 0.0027623762376237627,
"loss": 0.6797,
"step": 231
},
{
"epoch": 0.5156988052236733,
"grad_norm": 0.019304659217596054,
"learning_rate": 0.00275990099009901,
"loss": 0.7255,
"step": 232
},
{
"epoch": 0.5179216449013615,
"grad_norm": 0.01737944595515728,
"learning_rate": 0.0027574257425742573,
"loss": 0.6911,
"step": 233
},
{
"epoch": 0.5201444845790497,
"grad_norm": 0.017303744331002235,
"learning_rate": 0.0027549504950495052,
"loss": 0.7673,
"step": 234
},
{
"epoch": 0.5223673242567379,
"grad_norm": 0.018068619072437286,
"learning_rate": 0.0027524752475247528,
"loss": 0.7392,
"step": 235
},
{
"epoch": 0.5245901639344263,
"grad_norm": 0.01970060169696808,
"learning_rate": 0.00275,
"loss": 0.6399,
"step": 236
},
{
"epoch": 0.5268130036121145,
"grad_norm": 0.016325384378433228,
"learning_rate": 0.002747524752475248,
"loss": 0.8503,
"step": 237
},
{
"epoch": 0.5290358432898027,
"grad_norm": 0.020598750561475754,
"learning_rate": 0.002745049504950495,
"loss": 0.7524,
"step": 238
},
{
"epoch": 0.531258682967491,
"grad_norm": 0.016212867572903633,
"learning_rate": 0.0027425742574257424,
"loss": 0.6051,
"step": 239
},
{
"epoch": 0.5334815226451792,
"grad_norm": 0.015269650146365166,
"learning_rate": 0.0027400990099009904,
"loss": 0.642,
"step": 240
},
{
"epoch": 0.5357043623228674,
"grad_norm": 0.023223020136356354,
"learning_rate": 0.0027376237623762375,
"loss": 0.5973,
"step": 241
},
{
"epoch": 0.5379272020005557,
"grad_norm": 0.015060219913721085,
"learning_rate": 0.002735148514851485,
"loss": 0.6679,
"step": 242
},
{
"epoch": 0.540150041678244,
"grad_norm": 0.016781222075223923,
"learning_rate": 0.002732673267326733,
"loss": 0.6876,
"step": 243
},
{
"epoch": 0.5423728813559322,
"grad_norm": 0.01453195046633482,
"learning_rate": 0.00273019801980198,
"loss": 0.6025,
"step": 244
},
{
"epoch": 0.5445957210336204,
"grad_norm": 0.017990389838814735,
"learning_rate": 0.0027277227722772275,
"loss": 0.7093,
"step": 245
},
{
"epoch": 0.5468185607113087,
"grad_norm": 0.013350987806916237,
"learning_rate": 0.0027252475247524755,
"loss": 0.659,
"step": 246
},
{
"epoch": 0.549041400388997,
"grad_norm": 0.016740279272198677,
"learning_rate": 0.0027227722772277226,
"loss": 0.6952,
"step": 247
},
{
"epoch": 0.5512642400666852,
"grad_norm": 0.016187356784939766,
"learning_rate": 0.00272029702970297,
"loss": 0.6865,
"step": 248
},
{
"epoch": 0.5534870797443734,
"grad_norm": 0.015389680862426758,
"learning_rate": 0.002717821782178218,
"loss": 0.7032,
"step": 249
},
{
"epoch": 0.5557099194220617,
"grad_norm": 0.01613827608525753,
"learning_rate": 0.002715346534653465,
"loss": 0.8342,
"step": 250
},
{
"epoch": 0.5579327590997499,
"grad_norm": 0.02487931028008461,
"learning_rate": 0.0027128712871287127,
"loss": 0.7652,
"step": 251
},
{
"epoch": 0.5601555987774381,
"grad_norm": 0.01547365915030241,
"learning_rate": 0.0027103960396039606,
"loss": 0.6371,
"step": 252
},
{
"epoch": 0.5623784384551265,
"grad_norm": 0.08315558731555939,
"learning_rate": 0.0027079207920792077,
"loss": 0.7897,
"step": 253
},
{
"epoch": 0.5646012781328147,
"grad_norm": 0.014980711974203587,
"learning_rate": 0.0027054455445544552,
"loss": 0.8446,
"step": 254
},
{
"epoch": 0.5668241178105029,
"grad_norm": 0.01912595145404339,
"learning_rate": 0.002702970297029703,
"loss": 0.6859,
"step": 255
},
{
"epoch": 0.5690469574881911,
"grad_norm": 0.015234099701046944,
"learning_rate": 0.0027004950495049503,
"loss": 0.6902,
"step": 256
},
{
"epoch": 0.5712697971658794,
"grad_norm": 0.016250934451818466,
"learning_rate": 0.002698019801980198,
"loss": 0.6429,
"step": 257
},
{
"epoch": 0.5734926368435677,
"grad_norm": 0.013874251395463943,
"learning_rate": 0.0026955445544554458,
"loss": 0.6679,
"step": 258
},
{
"epoch": 0.5757154765212559,
"grad_norm": 0.020642723888158798,
"learning_rate": 0.002693069306930693,
"loss": 0.7782,
"step": 259
},
{
"epoch": 0.5779383161989442,
"grad_norm": 0.018470142036676407,
"learning_rate": 0.0026905940594059404,
"loss": 0.726,
"step": 260
},
{
"epoch": 0.5801611558766324,
"grad_norm": 0.01383270975202322,
"learning_rate": 0.0026881188118811883,
"loss": 0.6343,
"step": 261
},
{
"epoch": 0.5823839955543206,
"grad_norm": 0.02608071267604828,
"learning_rate": 0.0026856435643564354,
"loss": 0.7594,
"step": 262
},
{
"epoch": 0.5846068352320088,
"grad_norm": 0.01618632860481739,
"learning_rate": 0.0026831683168316834,
"loss": 0.6656,
"step": 263
},
{
"epoch": 0.5868296749096972,
"grad_norm": 0.022311942651867867,
"learning_rate": 0.002680693069306931,
"loss": 0.9195,
"step": 264
},
{
"epoch": 0.5890525145873854,
"grad_norm": 0.01792202889919281,
"learning_rate": 0.002678217821782178,
"loss": 0.7247,
"step": 265
},
{
"epoch": 0.5912753542650736,
"grad_norm": 0.017917366698384285,
"learning_rate": 0.002675742574257426,
"loss": 0.6302,
"step": 266
},
{
"epoch": 0.5934981939427619,
"grad_norm": 0.022937409579753876,
"learning_rate": 0.0026732673267326735,
"loss": 0.9526,
"step": 267
},
{
"epoch": 0.5957210336204501,
"grad_norm": 0.015674270689487457,
"learning_rate": 0.0026707920792079206,
"loss": 0.7029,
"step": 268
},
{
"epoch": 0.5979438732981384,
"grad_norm": 0.01607857085764408,
"learning_rate": 0.0026683168316831685,
"loss": 0.8267,
"step": 269
},
{
"epoch": 0.6001667129758266,
"grad_norm": 0.018459970131516457,
"learning_rate": 0.002665841584158416,
"loss": 0.7411,
"step": 270
},
{
"epoch": 0.6023895526535149,
"grad_norm": 0.01604890450835228,
"learning_rate": 0.002663366336633663,
"loss": 0.7138,
"step": 271
},
{
"epoch": 0.6046123923312031,
"grad_norm": 0.021214580163359642,
"learning_rate": 0.002660891089108911,
"loss": 0.6114,
"step": 272
},
{
"epoch": 0.6068352320088913,
"grad_norm": 0.019329868257045746,
"learning_rate": 0.0026584158415841586,
"loss": 0.723,
"step": 273
},
{
"epoch": 0.6090580716865797,
"grad_norm": 0.012522357515990734,
"learning_rate": 0.0026559405940594057,
"loss": 0.7007,
"step": 274
},
{
"epoch": 0.6112809113642679,
"grad_norm": 0.01763787679374218,
"learning_rate": 0.0026534653465346537,
"loss": 0.7885,
"step": 275
},
{
"epoch": 0.6135037510419561,
"grad_norm": 0.018133271485567093,
"learning_rate": 0.002650990099009901,
"loss": 0.7477,
"step": 276
},
{
"epoch": 0.6157265907196443,
"grad_norm": 0.013666578568518162,
"learning_rate": 0.0026485148514851483,
"loss": 0.6034,
"step": 277
},
{
"epoch": 0.6179494303973326,
"grad_norm": 0.014519906602799892,
"learning_rate": 0.0026460396039603962,
"loss": 0.6982,
"step": 278
},
{
"epoch": 0.6201722700750208,
"grad_norm": 0.016167080029845238,
"learning_rate": 0.0026435643564356437,
"loss": 0.7584,
"step": 279
},
{
"epoch": 0.6223951097527091,
"grad_norm": 0.0192260704934597,
"learning_rate": 0.002641089108910891,
"loss": 0.8423,
"step": 280
},
{
"epoch": 0.6246179494303973,
"grad_norm": 0.02769404463469982,
"learning_rate": 0.002638613861386139,
"loss": 0.7093,
"step": 281
},
{
"epoch": 0.6268407891080856,
"grad_norm": 0.023452669382095337,
"learning_rate": 0.0026361386138613863,
"loss": 0.7986,
"step": 282
},
{
"epoch": 0.6290636287857738,
"grad_norm": 0.02152738720178604,
"learning_rate": 0.0026336633663366334,
"loss": 0.7953,
"step": 283
},
{
"epoch": 0.631286468463462,
"grad_norm": 0.022495241835713387,
"learning_rate": 0.0026311881188118814,
"loss": 0.8022,
"step": 284
},
{
"epoch": 0.6335093081411504,
"grad_norm": 0.01805182732641697,
"learning_rate": 0.002628712871287129,
"loss": 0.6848,
"step": 285
},
{
"epoch": 0.6357321478188386,
"grad_norm": 0.020260373130440712,
"learning_rate": 0.002626237623762376,
"loss": 0.6499,
"step": 286
},
{
"epoch": 0.6379549874965268,
"grad_norm": 0.01385380420833826,
"learning_rate": 0.002623762376237624,
"loss": 0.6989,
"step": 287
},
{
"epoch": 0.640177827174215,
"grad_norm": 0.015470602549612522,
"learning_rate": 0.0026212871287128714,
"loss": 0.745,
"step": 288
},
{
"epoch": 0.6424006668519033,
"grad_norm": 0.011990759521722794,
"learning_rate": 0.0026188118811881185,
"loss": 0.6064,
"step": 289
},
{
"epoch": 0.6446235065295916,
"grad_norm": 0.01651688665151596,
"learning_rate": 0.0026163366336633665,
"loss": 0.6985,
"step": 290
},
{
"epoch": 0.6468463462072798,
"grad_norm": 0.013933480717241764,
"learning_rate": 0.002613861386138614,
"loss": 0.6943,
"step": 291
},
{
"epoch": 0.6490691858849681,
"grad_norm": 0.014627222903072834,
"learning_rate": 0.002611386138613861,
"loss": 0.5978,
"step": 292
},
{
"epoch": 0.6512920255626563,
"grad_norm": 0.016101978719234467,
"learning_rate": 0.002608910891089109,
"loss": 0.7254,
"step": 293
},
{
"epoch": 0.6535148652403445,
"grad_norm": 0.012672492302954197,
"learning_rate": 0.0026064356435643566,
"loss": 0.686,
"step": 294
},
{
"epoch": 0.6557377049180327,
"grad_norm": 0.019365599378943443,
"learning_rate": 0.0026039603960396037,
"loss": 0.6987,
"step": 295
},
{
"epoch": 0.6579605445957211,
"grad_norm": 0.013569362461566925,
"learning_rate": 0.0026014851485148516,
"loss": 0.6362,
"step": 296
},
{
"epoch": 0.6601833842734093,
"grad_norm": 0.016133926808834076,
"learning_rate": 0.002599009900990099,
"loss": 0.6661,
"step": 297
},
{
"epoch": 0.6624062239510975,
"grad_norm": 0.016300931572914124,
"learning_rate": 0.0025965346534653462,
"loss": 0.64,
"step": 298
},
{
"epoch": 0.6646290636287858,
"grad_norm": 0.012371404096484184,
"learning_rate": 0.002594059405940594,
"loss": 0.6825,
"step": 299
},
{
"epoch": 0.666851903306474,
"grad_norm": 0.018244942650198936,
"learning_rate": 0.0025915841584158417,
"loss": 0.735,
"step": 300
},
{
"epoch": 0.6690747429841623,
"grad_norm": 0.05636992305517197,
"learning_rate": 0.002589108910891089,
"loss": 0.7862,
"step": 301
},
{
"epoch": 0.6712975826618505,
"grad_norm": 0.01677551493048668,
"learning_rate": 0.0025866336633663368,
"loss": 0.7861,
"step": 302
},
{
"epoch": 0.6735204223395388,
"grad_norm": 0.01706579327583313,
"learning_rate": 0.0025841584158415843,
"loss": 0.8371,
"step": 303
},
{
"epoch": 0.675743262017227,
"grad_norm": 0.014940536580979824,
"learning_rate": 0.0025816831683168314,
"loss": 0.6754,
"step": 304
},
{
"epoch": 0.6779661016949152,
"grad_norm": 0.013865659013390541,
"learning_rate": 0.0025792079207920793,
"loss": 0.9877,
"step": 305
},
{
"epoch": 0.6801889413726036,
"grad_norm": 0.010642016306519508,
"learning_rate": 0.002576732673267327,
"loss": 0.6201,
"step": 306
},
{
"epoch": 0.6824117810502918,
"grad_norm": 0.01661493070423603,
"learning_rate": 0.002574257425742574,
"loss": 0.6654,
"step": 307
},
{
"epoch": 0.68463462072798,
"grad_norm": 0.018468208611011505,
"learning_rate": 0.002571782178217822,
"loss": 0.7863,
"step": 308
},
{
"epoch": 0.6868574604056682,
"grad_norm": 0.022226519882678986,
"learning_rate": 0.0025693069306930694,
"loss": 0.7191,
"step": 309
},
{
"epoch": 0.6890803000833565,
"grad_norm": 0.019259922206401825,
"learning_rate": 0.0025668316831683165,
"loss": 0.8028,
"step": 310
},
{
"epoch": 0.6913031397610447,
"grad_norm": 0.017490798607468605,
"learning_rate": 0.0025643564356435645,
"loss": 0.7814,
"step": 311
},
{
"epoch": 0.693525979438733,
"grad_norm": 0.02297484688460827,
"learning_rate": 0.002561881188118812,
"loss": 0.6705,
"step": 312
},
{
"epoch": 0.6957488191164213,
"grad_norm": 0.01425850111991167,
"learning_rate": 0.0025594059405940595,
"loss": 0.7235,
"step": 313
},
{
"epoch": 0.6979716587941095,
"grad_norm": 0.012471362948417664,
"learning_rate": 0.002556930693069307,
"loss": 0.7424,
"step": 314
},
{
"epoch": 0.7001944984717977,
"grad_norm": 0.013804659247398376,
"learning_rate": 0.0025544554455445546,
"loss": 0.6745,
"step": 315
},
{
"epoch": 0.7024173381494859,
"grad_norm": 0.01753597892820835,
"learning_rate": 0.002551980198019802,
"loss": 0.7136,
"step": 316
},
{
"epoch": 0.7046401778271743,
"grad_norm": 0.01735616661608219,
"learning_rate": 0.0025495049504950496,
"loss": 0.6987,
"step": 317
},
{
"epoch": 0.7068630175048625,
"grad_norm": 0.013026660308241844,
"learning_rate": 0.002547029702970297,
"loss": 0.5521,
"step": 318
},
{
"epoch": 0.7090858571825507,
"grad_norm": 0.011274688877165318,
"learning_rate": 0.0025445544554455446,
"loss": 0.5929,
"step": 319
},
{
"epoch": 0.711308696860239,
"grad_norm": 0.013325052335858345,
"learning_rate": 0.002542079207920792,
"loss": 0.6723,
"step": 320
},
{
"epoch": 0.7135315365379272,
"grad_norm": 0.012779835611581802,
"learning_rate": 0.0025396039603960397,
"loss": 0.6871,
"step": 321
},
{
"epoch": 0.7157543762156154,
"grad_norm": 0.019513219594955444,
"learning_rate": 0.002537128712871287,
"loss": 0.7549,
"step": 322
},
{
"epoch": 0.7179772158933037,
"grad_norm": 0.014735296368598938,
"learning_rate": 0.0025346534653465347,
"loss": 0.7868,
"step": 323
},
{
"epoch": 0.720200055570992,
"grad_norm": 0.015642767772078514,
"learning_rate": 0.0025321782178217823,
"loss": 0.6612,
"step": 324
},
{
"epoch": 0.7224228952486802,
"grad_norm": 0.013669556006789207,
"learning_rate": 0.0025297029702970298,
"loss": 0.6112,
"step": 325
},
{
"epoch": 0.7246457349263684,
"grad_norm": 0.012594704516232014,
"learning_rate": 0.0025272277227722773,
"loss": 0.7393,
"step": 326
},
{
"epoch": 0.7268685746040566,
"grad_norm": 0.015079697594046593,
"learning_rate": 0.002524752475247525,
"loss": 0.793,
"step": 327
},
{
"epoch": 0.729091414281745,
"grad_norm": 0.013951257802546024,
"learning_rate": 0.0025222772277227723,
"loss": 0.6706,
"step": 328
},
{
"epoch": 0.7313142539594332,
"grad_norm": 0.019177842885255814,
"learning_rate": 0.00251980198019802,
"loss": 0.7779,
"step": 329
},
{
"epoch": 0.7335370936371214,
"grad_norm": 0.013305417262017727,
"learning_rate": 0.0025173267326732674,
"loss": 0.7178,
"step": 330
},
{
"epoch": 0.7357599333148097,
"grad_norm": 0.02146240696310997,
"learning_rate": 0.002514851485148515,
"loss": 0.7293,
"step": 331
},
{
"epoch": 0.7379827729924979,
"grad_norm": 0.013251793570816517,
"learning_rate": 0.0025123762376237624,
"loss": 0.7747,
"step": 332
},
{
"epoch": 0.7402056126701861,
"grad_norm": 0.01089315302670002,
"learning_rate": 0.00250990099009901,
"loss": 0.5445,
"step": 333
},
{
"epoch": 0.7424284523478744,
"grad_norm": 0.01445831824094057,
"learning_rate": 0.0025074257425742575,
"loss": 0.6669,
"step": 334
},
{
"epoch": 0.7446512920255627,
"grad_norm": 0.014526651240885258,
"learning_rate": 0.002504950495049505,
"loss": 0.7194,
"step": 335
},
{
"epoch": 0.7468741317032509,
"grad_norm": 0.018426531925797462,
"learning_rate": 0.0025024752475247525,
"loss": 0.8315,
"step": 336
},
{
"epoch": 0.7490969713809391,
"grad_norm": 0.01586451753973961,
"learning_rate": 0.0025,
"loss": 0.572,
"step": 337
},
{
"epoch": 0.7513198110586274,
"grad_norm": 0.014794967137277126,
"learning_rate": 0.0024975247524752476,
"loss": 0.6875,
"step": 338
},
{
"epoch": 0.7535426507363157,
"grad_norm": 0.013549881055951118,
"learning_rate": 0.002495049504950495,
"loss": 0.6709,
"step": 339
},
{
"epoch": 0.7557654904140039,
"grad_norm": 0.0105968052521348,
"learning_rate": 0.0024925742574257426,
"loss": 0.5964,
"step": 340
},
{
"epoch": 0.7579883300916921,
"grad_norm": 0.015588056296110153,
"learning_rate": 0.00249009900990099,
"loss": 0.6671,
"step": 341
},
{
"epoch": 0.7602111697693804,
"grad_norm": 0.01798832416534424,
"learning_rate": 0.0024876237623762377,
"loss": 0.8818,
"step": 342
},
{
"epoch": 0.7624340094470686,
"grad_norm": 0.013795461505651474,
"learning_rate": 0.002485148514851485,
"loss": 0.5748,
"step": 343
},
{
"epoch": 0.7646568491247568,
"grad_norm": 0.016307495534420013,
"learning_rate": 0.0024826732673267327,
"loss": 0.7323,
"step": 344
},
{
"epoch": 0.7668796888024452,
"grad_norm": 0.012711350806057453,
"learning_rate": 0.0024801980198019802,
"loss": 0.5185,
"step": 345
},
{
"epoch": 0.7691025284801334,
"grad_norm": 0.01902935467660427,
"learning_rate": 0.0024777227722772278,
"loss": 0.7524,
"step": 346
},
{
"epoch": 0.7713253681578216,
"grad_norm": 0.013338466174900532,
"learning_rate": 0.0024752475247524753,
"loss": 0.6606,
"step": 347
},
{
"epoch": 0.7735482078355098,
"grad_norm": 0.017752062529325485,
"learning_rate": 0.002472772277227723,
"loss": 0.85,
"step": 348
},
{
"epoch": 0.7757710475131981,
"grad_norm": 0.013239924795925617,
"learning_rate": 0.0024702970297029703,
"loss": 0.623,
"step": 349
},
{
"epoch": 0.7779938871908864,
"grad_norm": 0.017011214047670364,
"learning_rate": 0.002467821782178218,
"loss": 0.6873,
"step": 350
},
{
"epoch": 0.7802167268685746,
"grad_norm": 0.015225573442876339,
"learning_rate": 0.0024653465346534654,
"loss": 0.6334,
"step": 351
},
{
"epoch": 0.7824395665462629,
"grad_norm": 0.020225588232278824,
"learning_rate": 0.002462871287128713,
"loss": 0.7056,
"step": 352
},
{
"epoch": 0.7846624062239511,
"grad_norm": 0.01229569036513567,
"learning_rate": 0.0024603960396039604,
"loss": 0.7022,
"step": 353
},
{
"epoch": 0.7868852459016393,
"grad_norm": 0.013165128417313099,
"learning_rate": 0.002457920792079208,
"loss": 0.6532,
"step": 354
},
{
"epoch": 0.7891080855793275,
"grad_norm": 0.026388289406895638,
"learning_rate": 0.0024554455445544555,
"loss": 0.6812,
"step": 355
},
{
"epoch": 0.7913309252570159,
"grad_norm": 0.014291019178926945,
"learning_rate": 0.002452970297029703,
"loss": 0.7062,
"step": 356
},
{
"epoch": 0.7935537649347041,
"grad_norm": 0.017437303438782692,
"learning_rate": 0.0024504950495049505,
"loss": 0.7651,
"step": 357
},
{
"epoch": 0.7957766046123923,
"grad_norm": 0.016056453809142113,
"learning_rate": 0.002448019801980198,
"loss": 0.6909,
"step": 358
},
{
"epoch": 0.7979994442900806,
"grad_norm": 0.017870964482426643,
"learning_rate": 0.0024455445544554455,
"loss": 0.541,
"step": 359
},
{
"epoch": 0.8002222839677688,
"grad_norm": 0.014736250974237919,
"learning_rate": 0.002443069306930693,
"loss": 0.7619,
"step": 360
},
{
"epoch": 0.8024451236454571,
"grad_norm": 0.013172541745007038,
"learning_rate": 0.0024405940594059406,
"loss": 0.5607,
"step": 361
},
{
"epoch": 0.8046679633231453,
"grad_norm": 0.017459526658058167,
"learning_rate": 0.002438118811881188,
"loss": 0.7483,
"step": 362
},
{
"epoch": 0.8068908030008336,
"grad_norm": 0.016839707270264626,
"learning_rate": 0.002435643564356436,
"loss": 0.7191,
"step": 363
},
{
"epoch": 0.8091136426785218,
"grad_norm": 0.017826620489358902,
"learning_rate": 0.002433168316831683,
"loss": 0.8137,
"step": 364
},
{
"epoch": 0.81133648235621,
"grad_norm": 0.019586768001317978,
"learning_rate": 0.0024306930693069307,
"loss": 0.8679,
"step": 365
},
{
"epoch": 0.8135593220338984,
"grad_norm": 0.03678233548998833,
"learning_rate": 0.0024282178217821786,
"loss": 0.6402,
"step": 366
},
{
"epoch": 0.8157821617115866,
"grad_norm": 0.02029457874596119,
"learning_rate": 0.0024257425742574257,
"loss": 0.794,
"step": 367
},
{
"epoch": 0.8180050013892748,
"grad_norm": 0.014588624238967896,
"learning_rate": 0.0024232673267326733,
"loss": 0.5804,
"step": 368
},
{
"epoch": 0.820227841066963,
"grad_norm": 0.014623894356191158,
"learning_rate": 0.002420792079207921,
"loss": 0.6849,
"step": 369
},
{
"epoch": 0.8224506807446513,
"grad_norm": 0.014702382497489452,
"learning_rate": 0.0024183168316831683,
"loss": 0.6318,
"step": 370
},
{
"epoch": 0.8246735204223395,
"grad_norm": 0.026238251477479935,
"learning_rate": 0.002415841584158416,
"loss": 0.6903,
"step": 371
},
{
"epoch": 0.8268963601000278,
"grad_norm": 0.017133679240942,
"learning_rate": 0.0024133663366336638,
"loss": 0.6215,
"step": 372
},
{
"epoch": 0.8291191997777161,
"grad_norm": 0.011881059035658836,
"learning_rate": 0.002410891089108911,
"loss": 0.6326,
"step": 373
},
{
"epoch": 0.8313420394554043,
"grad_norm": 0.015931686386466026,
"learning_rate": 0.0024084158415841584,
"loss": 0.5843,
"step": 374
},
{
"epoch": 0.8335648791330925,
"grad_norm": 0.014707183465361595,
"learning_rate": 0.0024059405940594063,
"loss": 0.5787,
"step": 375
},
{
"epoch": 0.8357877188107807,
"grad_norm": 0.015076966024935246,
"learning_rate": 0.0024034653465346534,
"loss": 0.6184,
"step": 376
},
{
"epoch": 0.8380105584884691,
"grad_norm": 0.015617873519659042,
"learning_rate": 0.002400990099009901,
"loss": 0.6095,
"step": 377
},
{
"epoch": 0.8402333981661573,
"grad_norm": 0.016791800037026405,
"learning_rate": 0.002398514851485149,
"loss": 0.7002,
"step": 378
},
{
"epoch": 0.8424562378438455,
"grad_norm": 0.01942884549498558,
"learning_rate": 0.002396039603960396,
"loss": 0.6643,
"step": 379
},
{
"epoch": 0.8446790775215337,
"grad_norm": 0.02692730724811554,
"learning_rate": 0.0023935643564356435,
"loss": 0.6722,
"step": 380
},
{
"epoch": 0.846901917199222,
"grad_norm": 0.016538864001631737,
"learning_rate": 0.0023910891089108915,
"loss": 0.592,
"step": 381
},
{
"epoch": 0.8491247568769102,
"grad_norm": 0.0320366770029068,
"learning_rate": 0.0023886138613861386,
"loss": 0.6845,
"step": 382
},
{
"epoch": 0.8513475965545985,
"grad_norm": 0.011704586446285248,
"learning_rate": 0.002386138613861386,
"loss": 0.6338,
"step": 383
},
{
"epoch": 0.8535704362322868,
"grad_norm": 0.012988455593585968,
"learning_rate": 0.002383663366336634,
"loss": 0.6345,
"step": 384
},
{
"epoch": 0.855793275909975,
"grad_norm": 0.036786969751119614,
"learning_rate": 0.002381188118811881,
"loss": 0.6513,
"step": 385
},
{
"epoch": 0.8580161155876632,
"grad_norm": 0.01267944648861885,
"learning_rate": 0.0023787128712871287,
"loss": 0.6335,
"step": 386
},
{
"epoch": 0.8602389552653514,
"grad_norm": 0.021348008885979652,
"learning_rate": 0.0023762376237623766,
"loss": 0.6572,
"step": 387
},
{
"epoch": 0.8624617949430398,
"grad_norm": 0.016306119039654732,
"learning_rate": 0.0023737623762376237,
"loss": 0.6352,
"step": 388
},
{
"epoch": 0.864684634620728,
"grad_norm": 0.016549356281757355,
"learning_rate": 0.0023712871287128712,
"loss": 0.6428,
"step": 389
},
{
"epoch": 0.8669074742984162,
"grad_norm": 0.01346430741250515,
"learning_rate": 0.002368811881188119,
"loss": 0.6152,
"step": 390
},
{
"epoch": 0.8691303139761045,
"grad_norm": 0.01475781761109829,
"learning_rate": 0.0023663366336633663,
"loss": 0.6869,
"step": 391
},
{
"epoch": 0.8713531536537927,
"grad_norm": 0.021658863872289658,
"learning_rate": 0.002363861386138614,
"loss": 0.8617,
"step": 392
},
{
"epoch": 0.873575993331481,
"grad_norm": 0.014893801882863045,
"learning_rate": 0.0023613861386138617,
"loss": 0.6334,
"step": 393
},
{
"epoch": 0.8757988330091692,
"grad_norm": 0.015763310715556145,
"learning_rate": 0.002358910891089109,
"loss": 0.6145,
"step": 394
},
{
"epoch": 0.8780216726868575,
"grad_norm": 0.01235837209969759,
"learning_rate": 0.0023564356435643564,
"loss": 0.6377,
"step": 395
},
{
"epoch": 0.8802445123645457,
"grad_norm": 0.08386275917291641,
"learning_rate": 0.0023539603960396043,
"loss": 0.6466,
"step": 396
},
{
"epoch": 0.8824673520422339,
"grad_norm": 0.024875586852431297,
"learning_rate": 0.0023514851485148514,
"loss": 0.772,
"step": 397
},
{
"epoch": 0.8846901917199222,
"grad_norm": 0.022778382524847984,
"learning_rate": 0.002349009900990099,
"loss": 0.77,
"step": 398
},
{
"epoch": 0.8869130313976105,
"grad_norm": 0.01597260683774948,
"learning_rate": 0.002346534653465347,
"loss": 0.8057,
"step": 399
},
{
"epoch": 0.8891358710752987,
"grad_norm": 0.023420272395014763,
"learning_rate": 0.002344059405940594,
"loss": 0.6152,
"step": 400
},
{
"epoch": 0.8913587107529869,
"grad_norm": 0.014347629621624947,
"learning_rate": 0.0023415841584158415,
"loss": 0.6851,
"step": 401
},
{
"epoch": 0.8935815504306752,
"grad_norm": 0.015029153786599636,
"learning_rate": 0.0023391089108910895,
"loss": 0.667,
"step": 402
},
{
"epoch": 0.8958043901083634,
"grad_norm": 0.014809265732765198,
"learning_rate": 0.0023366336633663365,
"loss": 0.5953,
"step": 403
},
{
"epoch": 0.8980272297860517,
"grad_norm": 0.015516281127929688,
"learning_rate": 0.002334158415841584,
"loss": 0.7201,
"step": 404
},
{
"epoch": 0.90025006946374,
"grad_norm": 0.016222365200519562,
"learning_rate": 0.002331683168316832,
"loss": 0.6713,
"step": 405
},
{
"epoch": 0.9024729091414282,
"grad_norm": 0.017578039318323135,
"learning_rate": 0.002329207920792079,
"loss": 0.5812,
"step": 406
},
{
"epoch": 0.9046957488191164,
"grad_norm": 0.018017487600445747,
"learning_rate": 0.0023267326732673266,
"loss": 0.6542,
"step": 407
},
{
"epoch": 0.9069185884968046,
"grad_norm": 0.020421525463461876,
"learning_rate": 0.0023242574257425746,
"loss": 0.8204,
"step": 408
},
{
"epoch": 0.909141428174493,
"grad_norm": 0.016109053045511246,
"learning_rate": 0.0023217821782178217,
"loss": 0.6965,
"step": 409
},
{
"epoch": 0.9113642678521812,
"grad_norm": 0.014515829272568226,
"learning_rate": 0.002319306930693069,
"loss": 0.6253,
"step": 410
},
{
"epoch": 0.9135871075298694,
"grad_norm": 0.021663261577486992,
"learning_rate": 0.002316831683168317,
"loss": 0.8357,
"step": 411
},
{
"epoch": 0.9158099472075577,
"grad_norm": 0.014531668275594711,
"learning_rate": 0.0023143564356435642,
"loss": 0.6608,
"step": 412
},
{
"epoch": 0.9180327868852459,
"grad_norm": 0.018239939585328102,
"learning_rate": 0.002311881188118812,
"loss": 0.7747,
"step": 413
},
{
"epoch": 0.9202556265629341,
"grad_norm": 0.012773513793945312,
"learning_rate": 0.0023094059405940597,
"loss": 0.5356,
"step": 414
},
{
"epoch": 0.9224784662406224,
"grad_norm": 0.012568398378789425,
"learning_rate": 0.002306930693069307,
"loss": 0.6577,
"step": 415
},
{
"epoch": 0.9247013059183107,
"grad_norm": 0.017879627645015717,
"learning_rate": 0.0023044554455445548,
"loss": 0.5712,
"step": 416
},
{
"epoch": 0.9269241455959989,
"grad_norm": 0.0374915786087513,
"learning_rate": 0.0023019801980198023,
"loss": 0.6147,
"step": 417
},
{
"epoch": 0.9291469852736871,
"grad_norm": 0.01358667854219675,
"learning_rate": 0.0022995049504950494,
"loss": 0.6152,
"step": 418
},
{
"epoch": 0.9313698249513754,
"grad_norm": 0.013912348076701164,
"learning_rate": 0.0022970297029702973,
"loss": 0.6511,
"step": 419
},
{
"epoch": 0.9335926646290637,
"grad_norm": 0.014229892753064632,
"learning_rate": 0.002294554455445545,
"loss": 0.6068,
"step": 420
},
{
"epoch": 0.9358155043067519,
"grad_norm": 0.017046229913830757,
"learning_rate": 0.002292079207920792,
"loss": 0.7221,
"step": 421
},
{
"epoch": 0.9380383439844401,
"grad_norm": 0.013679401949048042,
"learning_rate": 0.00228960396039604,
"loss": 0.6077,
"step": 422
},
{
"epoch": 0.9402611836621284,
"grad_norm": 0.018443197011947632,
"learning_rate": 0.0022871287128712874,
"loss": 0.7146,
"step": 423
},
{
"epoch": 0.9424840233398166,
"grad_norm": 0.021021803840994835,
"learning_rate": 0.0022846534653465345,
"loss": 0.829,
"step": 424
},
{
"epoch": 0.9447068630175048,
"grad_norm": 0.013253494165837765,
"learning_rate": 0.0022821782178217825,
"loss": 0.6044,
"step": 425
},
{
"epoch": 0.946929702695193,
"grad_norm": 0.015879614278674126,
"learning_rate": 0.00227970297029703,
"loss": 0.6039,
"step": 426
},
{
"epoch": 0.9491525423728814,
"grad_norm": 0.02068069390952587,
"learning_rate": 0.002277227722772277,
"loss": 0.7617,
"step": 427
},
{
"epoch": 0.9513753820505696,
"grad_norm": 0.013184352777898312,
"learning_rate": 0.002274752475247525,
"loss": 0.6523,
"step": 428
},
{
"epoch": 0.9535982217282578,
"grad_norm": 0.0113218380138278,
"learning_rate": 0.0022722772277227726,
"loss": 0.6023,
"step": 429
},
{
"epoch": 0.9558210614059461,
"grad_norm": 0.018204597756266594,
"learning_rate": 0.0022698019801980196,
"loss": 0.6851,
"step": 430
},
{
"epoch": 0.9580439010836344,
"grad_norm": 0.024960225448012352,
"learning_rate": 0.0022673267326732676,
"loss": 0.7379,
"step": 431
},
{
"epoch": 0.9602667407613226,
"grad_norm": 0.01937321573495865,
"learning_rate": 0.002264851485148515,
"loss": 0.6909,
"step": 432
},
{
"epoch": 0.9624895804390108,
"grad_norm": 0.017059147357940674,
"learning_rate": 0.0022623762376237622,
"loss": 0.7239,
"step": 433
},
{
"epoch": 0.9647124201166991,
"grad_norm": 0.011201994493603706,
"learning_rate": 0.00225990099009901,
"loss": 0.5845,
"step": 434
},
{
"epoch": 0.9669352597943873,
"grad_norm": 0.01506833452731371,
"learning_rate": 0.0022574257425742577,
"loss": 0.7139,
"step": 435
},
{
"epoch": 0.9691580994720755,
"grad_norm": 0.017956897616386414,
"learning_rate": 0.002254950495049505,
"loss": 0.6997,
"step": 436
},
{
"epoch": 0.9713809391497639,
"grad_norm": 0.018598267808556557,
"learning_rate": 0.0022524752475247527,
"loss": 0.5882,
"step": 437
},
{
"epoch": 0.9736037788274521,
"grad_norm": 0.016982879489660263,
"learning_rate": 0.0022500000000000003,
"loss": 0.5976,
"step": 438
},
{
"epoch": 0.9758266185051403,
"grad_norm": 0.01876719854772091,
"learning_rate": 0.0022475247524752474,
"loss": 0.7,
"step": 439
},
{
"epoch": 0.9780494581828285,
"grad_norm": 0.020731210708618164,
"learning_rate": 0.0022450495049504953,
"loss": 0.705,
"step": 440
},
{
"epoch": 0.9802722978605168,
"grad_norm": 0.014992648735642433,
"learning_rate": 0.0022425742574257424,
"loss": 0.6393,
"step": 441
},
{
"epoch": 0.982495137538205,
"grad_norm": 0.012561212293803692,
"learning_rate": 0.00224009900990099,
"loss": 0.6634,
"step": 442
},
{
"epoch": 0.9847179772158933,
"grad_norm": 0.019787229597568512,
"learning_rate": 0.002237623762376238,
"loss": 0.6422,
"step": 443
},
{
"epoch": 0.9869408168935816,
"grad_norm": 0.015230356715619564,
"learning_rate": 0.002235148514851485,
"loss": 0.6319,
"step": 444
},
{
"epoch": 0.9891636565712698,
"grad_norm": 0.013144402764737606,
"learning_rate": 0.0022326732673267325,
"loss": 0.5625,
"step": 445
},
{
"epoch": 0.991386496248958,
"grad_norm": 0.010007484816014767,
"learning_rate": 0.0022301980198019804,
"loss": 0.5449,
"step": 446
},
{
"epoch": 0.9936093359266462,
"grad_norm": 0.014984616078436375,
"learning_rate": 0.0022277227722772275,
"loss": 0.7016,
"step": 447
},
{
"epoch": 0.9958321756043346,
"grad_norm": 0.014676968567073345,
"learning_rate": 0.002225247524752475,
"loss": 0.6125,
"step": 448
},
{
"epoch": 0.9980550152820228,
"grad_norm": 0.01267558429390192,
"learning_rate": 0.002222772277227723,
"loss": 0.6128,
"step": 449
},
{
"epoch": 1.000277854959711,
"grad_norm": 0.014883238822221756,
"learning_rate": 0.00222029702970297,
"loss": 0.6702,
"step": 450
},
{
"epoch": 1.0025006946373993,
"grad_norm": 0.020183809101581573,
"learning_rate": 0.0022178217821782176,
"loss": 0.7836,
"step": 451
},
{
"epoch": 1.0047235343150875,
"grad_norm": 0.018987402319908142,
"learning_rate": 0.0022153465346534656,
"loss": 0.6474,
"step": 452
},
{
"epoch": 1.0069463739927758,
"grad_norm": 0.013754444196820259,
"learning_rate": 0.0022128712871287127,
"loss": 0.5332,
"step": 453
},
{
"epoch": 1.009169213670464,
"grad_norm": 0.024167753756046295,
"learning_rate": 0.00221039603960396,
"loss": 0.6738,
"step": 454
},
{
"epoch": 1.0113920533481522,
"grad_norm": 0.013680294156074524,
"learning_rate": 0.002207920792079208,
"loss": 0.6034,
"step": 455
},
{
"epoch": 1.0136148930258404,
"grad_norm": 0.01513979583978653,
"learning_rate": 0.0022054455445544552,
"loss": 0.6736,
"step": 456
},
{
"epoch": 1.0158377327035288,
"grad_norm": 0.01789553090929985,
"learning_rate": 0.0022029702970297028,
"loss": 0.5954,
"step": 457
},
{
"epoch": 1.018060572381217,
"grad_norm": 0.01587206870317459,
"learning_rate": 0.0022004950495049507,
"loss": 0.6374,
"step": 458
},
{
"epoch": 1.0202834120589053,
"grad_norm": 0.013578852638602257,
"learning_rate": 0.002198019801980198,
"loss": 0.6105,
"step": 459
},
{
"epoch": 1.0225062517365935,
"grad_norm": 0.01525961235165596,
"learning_rate": 0.0021955445544554453,
"loss": 0.5187,
"step": 460
},
{
"epoch": 1.0247290914142817,
"grad_norm": 0.01209926512092352,
"learning_rate": 0.0021930693069306933,
"loss": 0.5858,
"step": 461
},
{
"epoch": 1.02695193109197,
"grad_norm": 0.013947585597634315,
"learning_rate": 0.0021905940594059404,
"loss": 0.7486,
"step": 462
},
{
"epoch": 1.0291747707696581,
"grad_norm": 0.013879354111850262,
"learning_rate": 0.002188118811881188,
"loss": 0.5767,
"step": 463
},
{
"epoch": 1.0313976104473466,
"grad_norm": 0.015864117071032524,
"learning_rate": 0.002185643564356436,
"loss": 0.5945,
"step": 464
},
{
"epoch": 1.0336204501250348,
"grad_norm": 0.012343275360763073,
"learning_rate": 0.002183168316831683,
"loss": 0.6243,
"step": 465
},
{
"epoch": 1.035843289802723,
"grad_norm": 0.011329158209264278,
"learning_rate": 0.002180693069306931,
"loss": 0.7206,
"step": 466
},
{
"epoch": 1.0380661294804112,
"grad_norm": 0.014862005598843098,
"learning_rate": 0.0021782178217821784,
"loss": 0.7483,
"step": 467
},
{
"epoch": 1.0402889691580994,
"grad_norm": 0.013391937129199505,
"learning_rate": 0.0021757425742574255,
"loss": 0.7626,
"step": 468
},
{
"epoch": 1.0425118088357876,
"grad_norm": 0.018975989893078804,
"learning_rate": 0.0021732673267326735,
"loss": 0.7409,
"step": 469
},
{
"epoch": 1.0447346485134759,
"grad_norm": 0.01241536159068346,
"learning_rate": 0.002170792079207921,
"loss": 0.753,
"step": 470
},
{
"epoch": 1.0469574881911643,
"grad_norm": 0.015430880710482597,
"learning_rate": 0.002168316831683168,
"loss": 0.776,
"step": 471
},
{
"epoch": 1.0491803278688525,
"grad_norm": 0.014778917655348778,
"learning_rate": 0.002165841584158416,
"loss": 0.7363,
"step": 472
},
{
"epoch": 1.0514031675465407,
"grad_norm": 0.016265999525785446,
"learning_rate": 0.0021633663366336636,
"loss": 0.6905,
"step": 473
},
{
"epoch": 1.053626007224229,
"grad_norm": 0.01429293118417263,
"learning_rate": 0.0021608910891089106,
"loss": 0.6937,
"step": 474
},
{
"epoch": 1.0558488469019172,
"grad_norm": 0.015557540580630302,
"learning_rate": 0.0021584158415841586,
"loss": 0.5898,
"step": 475
},
{
"epoch": 1.0580716865796054,
"grad_norm": 0.010969873517751694,
"learning_rate": 0.002155940594059406,
"loss": 0.6872,
"step": 476
},
{
"epoch": 1.0602945262572936,
"grad_norm": 0.02743585780262947,
"learning_rate": 0.002153465346534653,
"loss": 0.6932,
"step": 477
},
{
"epoch": 1.062517365934982,
"grad_norm": 0.018156539648771286,
"learning_rate": 0.002150990099009901,
"loss": 0.6349,
"step": 478
},
{
"epoch": 1.0647402056126702,
"grad_norm": 0.029770391061902046,
"learning_rate": 0.0021485148514851487,
"loss": 0.8011,
"step": 479
},
{
"epoch": 1.0669630452903585,
"grad_norm": 0.028706863522529602,
"learning_rate": 0.0021460396039603958,
"loss": 0.5518,
"step": 480
},
{
"epoch": 1.0691858849680467,
"grad_norm": 0.01390407606959343,
"learning_rate": 0.0021435643564356437,
"loss": 0.6653,
"step": 481
},
{
"epoch": 1.071408724645735,
"grad_norm": 0.02037845179438591,
"learning_rate": 0.0021410891089108913,
"loss": 0.7506,
"step": 482
},
{
"epoch": 1.073631564323423,
"grad_norm": 0.012380843982100487,
"learning_rate": 0.0021386138613861383,
"loss": 0.5933,
"step": 483
},
{
"epoch": 1.0758544040011113,
"grad_norm": 0.01470144558697939,
"learning_rate": 0.0021361386138613863,
"loss": 0.8263,
"step": 484
},
{
"epoch": 1.0780772436787998,
"grad_norm": 0.015167983248829842,
"learning_rate": 0.002133663366336634,
"loss": 0.7961,
"step": 485
},
{
"epoch": 1.080300083356488,
"grad_norm": 0.016553625464439392,
"learning_rate": 0.002131188118811881,
"loss": 0.6522,
"step": 486
},
{
"epoch": 1.0825229230341762,
"grad_norm": 0.029548676684498787,
"learning_rate": 0.002128712871287129,
"loss": 0.7258,
"step": 487
},
{
"epoch": 1.0847457627118644,
"grad_norm": 0.012250442057847977,
"learning_rate": 0.0021262376237623764,
"loss": 0.5983,
"step": 488
},
{
"epoch": 1.0869686023895526,
"grad_norm": 0.013489637523889542,
"learning_rate": 0.0021237623762376235,
"loss": 0.6837,
"step": 489
},
{
"epoch": 1.0891914420672408,
"grad_norm": 0.01595526933670044,
"learning_rate": 0.0021212871287128714,
"loss": 0.6818,
"step": 490
},
{
"epoch": 1.091414281744929,
"grad_norm": 0.020508253946900368,
"learning_rate": 0.002118811881188119,
"loss": 0.6479,
"step": 491
},
{
"epoch": 1.0936371214226175,
"grad_norm": 0.01163517590612173,
"learning_rate": 0.002116336633663366,
"loss": 0.5976,
"step": 492
},
{
"epoch": 1.0958599611003057,
"grad_norm": 0.015765825286507607,
"learning_rate": 0.002113861386138614,
"loss": 0.6612,
"step": 493
},
{
"epoch": 1.098082800777994,
"grad_norm": 0.018122674897313118,
"learning_rate": 0.0021113861386138615,
"loss": 0.6547,
"step": 494
},
{
"epoch": 1.1003056404556821,
"grad_norm": 0.011971593834459782,
"learning_rate": 0.0021089108910891086,
"loss": 0.5676,
"step": 495
},
{
"epoch": 1.1025284801333703,
"grad_norm": 0.01705964468419552,
"learning_rate": 0.0021064356435643566,
"loss": 0.6342,
"step": 496
},
{
"epoch": 1.1047513198110586,
"grad_norm": 0.018494384363293648,
"learning_rate": 0.002103960396039604,
"loss": 0.7893,
"step": 497
},
{
"epoch": 1.1069741594887468,
"grad_norm": 0.015471387654542923,
"learning_rate": 0.002101485148514851,
"loss": 0.5907,
"step": 498
},
{
"epoch": 1.1091969991664352,
"grad_norm": 0.013633133843541145,
"learning_rate": 0.002099009900990099,
"loss": 0.7585,
"step": 499
},
{
"epoch": 1.1114198388441234,
"grad_norm": 0.013887847773730755,
"learning_rate": 0.0020965346534653467,
"loss": 0.6958,
"step": 500
},
{
"epoch": 1.1136426785218116,
"grad_norm": 0.015942484140396118,
"learning_rate": 0.0020940594059405938,
"loss": 0.7464,
"step": 501
},
{
"epoch": 1.1158655181994999,
"grad_norm": 0.01781686581671238,
"learning_rate": 0.0020915841584158417,
"loss": 0.5278,
"step": 502
},
{
"epoch": 1.118088357877188,
"grad_norm": 0.017188075929880142,
"learning_rate": 0.0020891089108910892,
"loss": 0.6229,
"step": 503
},
{
"epoch": 1.1203111975548763,
"grad_norm": 0.01655665598809719,
"learning_rate": 0.0020866336633663363,
"loss": 0.6761,
"step": 504
},
{
"epoch": 1.1225340372325645,
"grad_norm": 0.01681992970407009,
"learning_rate": 0.0020841584158415843,
"loss": 0.6118,
"step": 505
},
{
"epoch": 1.124756876910253,
"grad_norm": 0.021237896755337715,
"learning_rate": 0.002081683168316832,
"loss": 0.8598,
"step": 506
},
{
"epoch": 1.1269797165879412,
"grad_norm": 0.012300066649913788,
"learning_rate": 0.002079207920792079,
"loss": 0.5905,
"step": 507
},
{
"epoch": 1.1292025562656294,
"grad_norm": 0.018155410885810852,
"learning_rate": 0.002076732673267327,
"loss": 0.6204,
"step": 508
},
{
"epoch": 1.1314253959433176,
"grad_norm": 0.011526006273925304,
"learning_rate": 0.0020742574257425744,
"loss": 0.666,
"step": 509
},
{
"epoch": 1.1336482356210058,
"grad_norm": 0.014204266481101513,
"learning_rate": 0.0020717821782178215,
"loss": 0.6174,
"step": 510
},
{
"epoch": 1.135871075298694,
"grad_norm": 0.015811122953891754,
"learning_rate": 0.0020693069306930694,
"loss": 0.6967,
"step": 511
},
{
"epoch": 1.1380939149763822,
"grad_norm": 0.014839411713182926,
"learning_rate": 0.002066831683168317,
"loss": 0.5885,
"step": 512
},
{
"epoch": 1.1403167546540707,
"grad_norm": 0.015573407523334026,
"learning_rate": 0.002064356435643564,
"loss": 0.6261,
"step": 513
},
{
"epoch": 1.142539594331759,
"grad_norm": 0.01776973530650139,
"learning_rate": 0.002061881188118812,
"loss": 0.8348,
"step": 514
},
{
"epoch": 1.144762434009447,
"grad_norm": 0.02191152609884739,
"learning_rate": 0.0020594059405940595,
"loss": 0.6325,
"step": 515
},
{
"epoch": 1.1469852736871353,
"grad_norm": 0.03398964926600456,
"learning_rate": 0.002056930693069307,
"loss": 0.784,
"step": 516
},
{
"epoch": 1.1492081133648235,
"grad_norm": 0.01424696296453476,
"learning_rate": 0.0020544554455445545,
"loss": 0.7358,
"step": 517
},
{
"epoch": 1.1514309530425118,
"grad_norm": 0.013430099934339523,
"learning_rate": 0.002051980198019802,
"loss": 0.6242,
"step": 518
},
{
"epoch": 1.1536537927202,
"grad_norm": 0.010374044068157673,
"learning_rate": 0.0020495049504950496,
"loss": 0.5702,
"step": 519
},
{
"epoch": 1.1558766323978884,
"grad_norm": 0.0130368173122406,
"learning_rate": 0.002047029702970297,
"loss": 0.6321,
"step": 520
},
{
"epoch": 1.1580994720755766,
"grad_norm": 0.016580238938331604,
"learning_rate": 0.0020445544554455446,
"loss": 0.7411,
"step": 521
},
{
"epoch": 1.1603223117532648,
"grad_norm": 0.015030154958367348,
"learning_rate": 0.002042079207920792,
"loss": 0.572,
"step": 522
},
{
"epoch": 1.162545151430953,
"grad_norm": 0.014109233394265175,
"learning_rate": 0.0020396039603960397,
"loss": 0.7282,
"step": 523
},
{
"epoch": 1.1647679911086413,
"grad_norm": 0.019948862493038177,
"learning_rate": 0.002037128712871287,
"loss": 0.8526,
"step": 524
},
{
"epoch": 1.1669908307863295,
"grad_norm": 0.03067428059875965,
"learning_rate": 0.0020346534653465347,
"loss": 0.77,
"step": 525
},
{
"epoch": 1.1692136704640177,
"grad_norm": 0.014951253309845924,
"learning_rate": 0.0020321782178217822,
"loss": 0.5926,
"step": 526
},
{
"epoch": 1.1714365101417061,
"grad_norm": 0.020071959123015404,
"learning_rate": 0.0020297029702970298,
"loss": 0.6401,
"step": 527
},
{
"epoch": 1.1736593498193943,
"grad_norm": 0.013084011152386665,
"learning_rate": 0.0020272277227722773,
"loss": 0.5778,
"step": 528
},
{
"epoch": 1.1758821894970826,
"grad_norm": 0.012277776375412941,
"learning_rate": 0.002024752475247525,
"loss": 0.6096,
"step": 529
},
{
"epoch": 1.1781050291747708,
"grad_norm": 0.012979078106582165,
"learning_rate": 0.0020222772277227723,
"loss": 0.5634,
"step": 530
},
{
"epoch": 1.180327868852459,
"grad_norm": 0.015513453632593155,
"learning_rate": 0.00201980198019802,
"loss": 0.5527,
"step": 531
},
{
"epoch": 1.1825507085301472,
"grad_norm": 0.020396575331687927,
"learning_rate": 0.0020173267326732674,
"loss": 0.9307,
"step": 532
},
{
"epoch": 1.1847735482078354,
"grad_norm": 0.018580187112092972,
"learning_rate": 0.002014851485148515,
"loss": 0.6786,
"step": 533
},
{
"epoch": 1.1869963878855239,
"grad_norm": 0.015678012743592262,
"learning_rate": 0.0020123762376237624,
"loss": 0.5932,
"step": 534
},
{
"epoch": 1.189219227563212,
"grad_norm": 0.01222003810107708,
"learning_rate": 0.00200990099009901,
"loss": 0.5452,
"step": 535
},
{
"epoch": 1.1914420672409003,
"grad_norm": 0.0165384691208601,
"learning_rate": 0.0020074257425742575,
"loss": 0.655,
"step": 536
},
{
"epoch": 1.1936649069185885,
"grad_norm": 0.01701522432267666,
"learning_rate": 0.002004950495049505,
"loss": 0.512,
"step": 537
},
{
"epoch": 1.1958877465962767,
"grad_norm": 0.014413165859878063,
"learning_rate": 0.0020024752475247525,
"loss": 0.6258,
"step": 538
},
{
"epoch": 1.198110586273965,
"grad_norm": 0.011610453017055988,
"learning_rate": 0.002,
"loss": 0.6445,
"step": 539
},
{
"epoch": 1.2003334259516532,
"grad_norm": 0.016592789441347122,
"learning_rate": 0.0019975247524752476,
"loss": 0.7951,
"step": 540
},
{
"epoch": 1.2025562656293416,
"grad_norm": 0.018560299649834633,
"learning_rate": 0.001995049504950495,
"loss": 0.6788,
"step": 541
},
{
"epoch": 1.2047791053070298,
"grad_norm": 0.0228038989007473,
"learning_rate": 0.0019925742574257426,
"loss": 0.7306,
"step": 542
},
{
"epoch": 1.207001944984718,
"grad_norm": 0.013510987162590027,
"learning_rate": 0.00199009900990099,
"loss": 0.5083,
"step": 543
},
{
"epoch": 1.2092247846624062,
"grad_norm": 0.018051352351903915,
"learning_rate": 0.0019876237623762377,
"loss": 0.6776,
"step": 544
},
{
"epoch": 1.2114476243400945,
"grad_norm": 0.01775428093969822,
"learning_rate": 0.001985148514851485,
"loss": 0.702,
"step": 545
},
{
"epoch": 1.2136704640177827,
"grad_norm": 0.013823853805661201,
"learning_rate": 0.0019826732673267327,
"loss": 0.6253,
"step": 546
},
{
"epoch": 1.2158933036954709,
"grad_norm": 0.019051026552915573,
"learning_rate": 0.0019801980198019802,
"loss": 0.5959,
"step": 547
},
{
"epoch": 1.2181161433731593,
"grad_norm": 0.014647615142166615,
"learning_rate": 0.0019777227722772277,
"loss": 0.4997,
"step": 548
},
{
"epoch": 1.2203389830508475,
"grad_norm": 0.019500313326716423,
"learning_rate": 0.0019752475247524753,
"loss": 0.6763,
"step": 549
},
{
"epoch": 1.2225618227285358,
"grad_norm": 0.018273385241627693,
"learning_rate": 0.001972772277227723,
"loss": 0.5713,
"step": 550
},
{
"epoch": 1.224784662406224,
"grad_norm": 0.01844954676926136,
"learning_rate": 0.0019702970297029703,
"loss": 0.6973,
"step": 551
},
{
"epoch": 1.2270075020839122,
"grad_norm": 0.013612812384963036,
"learning_rate": 0.001967821782178218,
"loss": 0.53,
"step": 552
},
{
"epoch": 1.2292303417616004,
"grad_norm": 0.018429776653647423,
"learning_rate": 0.0019653465346534654,
"loss": 0.6369,
"step": 553
},
{
"epoch": 1.2314531814392886,
"grad_norm": 0.018885424360632896,
"learning_rate": 0.001962871287128713,
"loss": 0.7193,
"step": 554
},
{
"epoch": 1.233676021116977,
"grad_norm": 0.019281677901744843,
"learning_rate": 0.0019603960396039604,
"loss": 0.6858,
"step": 555
},
{
"epoch": 1.2358988607946653,
"grad_norm": 0.01671980880200863,
"learning_rate": 0.001957920792079208,
"loss": 0.708,
"step": 556
},
{
"epoch": 1.2381217004723535,
"grad_norm": 0.015330369584262371,
"learning_rate": 0.0019554455445544554,
"loss": 0.6055,
"step": 557
},
{
"epoch": 1.2403445401500417,
"grad_norm": 0.015489472076296806,
"learning_rate": 0.0019529702970297032,
"loss": 0.6312,
"step": 558
},
{
"epoch": 1.24256737982773,
"grad_norm": 0.01861627586185932,
"learning_rate": 0.0019504950495049505,
"loss": 0.6892,
"step": 559
},
{
"epoch": 1.2447902195054181,
"grad_norm": 0.01912919245660305,
"learning_rate": 0.0019480198019801978,
"loss": 0.5856,
"step": 560
},
{
"epoch": 1.2470130591831063,
"grad_norm": 0.01634219102561474,
"learning_rate": 0.0019455445544554458,
"loss": 0.7168,
"step": 561
},
{
"epoch": 1.2492358988607948,
"grad_norm": 0.01822001487016678,
"learning_rate": 0.001943069306930693,
"loss": 0.5872,
"step": 562
},
{
"epoch": 1.2514587385384828,
"grad_norm": 0.01985522173345089,
"learning_rate": 0.0019405940594059404,
"loss": 0.6333,
"step": 563
},
{
"epoch": 1.2536815782161712,
"grad_norm": 0.016396082937717438,
"learning_rate": 0.0019381188118811883,
"loss": 0.7005,
"step": 564
},
{
"epoch": 1.2559044178938594,
"grad_norm": 0.012350345961749554,
"learning_rate": 0.0019356435643564356,
"loss": 0.6913,
"step": 565
},
{
"epoch": 1.2581272575715476,
"grad_norm": 0.01816396601498127,
"learning_rate": 0.0019331683168316834,
"loss": 0.5174,
"step": 566
},
{
"epoch": 1.2603500972492359,
"grad_norm": 0.01564900204539299,
"learning_rate": 0.0019306930693069309,
"loss": 0.6686,
"step": 567
},
{
"epoch": 1.262572936926924,
"grad_norm": 0.014737764373421669,
"learning_rate": 0.0019282178217821782,
"loss": 0.5977,
"step": 568
},
{
"epoch": 1.2647957766046125,
"grad_norm": 0.02006666548550129,
"learning_rate": 0.001925742574257426,
"loss": 0.6276,
"step": 569
},
{
"epoch": 1.2670186162823005,
"grad_norm": 0.013658919371664524,
"learning_rate": 0.0019232673267326735,
"loss": 0.5861,
"step": 570
},
{
"epoch": 1.269241455959989,
"grad_norm": 0.011005178093910217,
"learning_rate": 0.0019207920792079208,
"loss": 0.8343,
"step": 571
},
{
"epoch": 1.2714642956376772,
"grad_norm": 0.016614580526947975,
"learning_rate": 0.0019183168316831685,
"loss": 0.7405,
"step": 572
},
{
"epoch": 1.2736871353153654,
"grad_norm": 0.013580153696238995,
"learning_rate": 0.001915841584158416,
"loss": 0.5955,
"step": 573
},
{
"epoch": 1.2759099749930536,
"grad_norm": 0.01571527309715748,
"learning_rate": 0.0019133663366336633,
"loss": 0.5994,
"step": 574
},
{
"epoch": 1.2781328146707418,
"grad_norm": 0.01320920791476965,
"learning_rate": 0.001910891089108911,
"loss": 0.5398,
"step": 575
},
{
"epoch": 1.2803556543484302,
"grad_norm": 0.02257618121802807,
"learning_rate": 0.0019084158415841586,
"loss": 0.6662,
"step": 576
},
{
"epoch": 1.2825784940261182,
"grad_norm": 0.020821325480937958,
"learning_rate": 0.001905940594059406,
"loss": 0.6462,
"step": 577
},
{
"epoch": 1.2848013337038067,
"grad_norm": 0.01534060388803482,
"learning_rate": 0.0019034653465346536,
"loss": 0.6875,
"step": 578
},
{
"epoch": 1.2870241733814949,
"grad_norm": 0.01106045302003622,
"learning_rate": 0.0019009900990099012,
"loss": 0.6197,
"step": 579
},
{
"epoch": 1.289247013059183,
"grad_norm": 0.023773077875375748,
"learning_rate": 0.0018985148514851485,
"loss": 0.6881,
"step": 580
},
{
"epoch": 1.2914698527368713,
"grad_norm": 0.014630904421210289,
"learning_rate": 0.0018960396039603962,
"loss": 0.8041,
"step": 581
},
{
"epoch": 1.2936926924145595,
"grad_norm": 0.021165568381547928,
"learning_rate": 0.0018935643564356437,
"loss": 0.7045,
"step": 582
},
{
"epoch": 1.295915532092248,
"grad_norm": 0.015476713888347149,
"learning_rate": 0.001891089108910891,
"loss": 0.5461,
"step": 583
},
{
"epoch": 1.298138371769936,
"grad_norm": 0.013936633244156837,
"learning_rate": 0.0018886138613861388,
"loss": 0.6052,
"step": 584
},
{
"epoch": 1.3003612114476244,
"grad_norm": 0.014164215885102749,
"learning_rate": 0.0018861386138613863,
"loss": 0.8044,
"step": 585
},
{
"epoch": 1.3025840511253126,
"grad_norm": 0.014623990282416344,
"learning_rate": 0.0018836633663366336,
"loss": 0.6213,
"step": 586
},
{
"epoch": 1.3048068908030008,
"grad_norm": 0.015673892572522163,
"learning_rate": 0.0018811881188118813,
"loss": 0.7006,
"step": 587
},
{
"epoch": 1.307029730480689,
"grad_norm": 0.020650116726756096,
"learning_rate": 0.0018787128712871289,
"loss": 0.6096,
"step": 588
},
{
"epoch": 1.3092525701583773,
"grad_norm": 0.019570233300328255,
"learning_rate": 0.0018762376237623762,
"loss": 0.7876,
"step": 589
},
{
"epoch": 1.3114754098360657,
"grad_norm": 0.024105733260512352,
"learning_rate": 0.001873762376237624,
"loss": 0.6646,
"step": 590
},
{
"epoch": 1.3136982495137537,
"grad_norm": 0.014969500713050365,
"learning_rate": 0.0018712871287128712,
"loss": 0.5828,
"step": 591
},
{
"epoch": 1.3159210891914421,
"grad_norm": 0.012277282774448395,
"learning_rate": 0.0018688118811881187,
"loss": 0.5273,
"step": 592
},
{
"epoch": 1.3181439288691303,
"grad_norm": 0.013523784466087818,
"learning_rate": 0.0018663366336633665,
"loss": 0.6394,
"step": 593
},
{
"epoch": 1.3203667685468186,
"grad_norm": 0.022080164402723312,
"learning_rate": 0.0018638613861386138,
"loss": 0.7142,
"step": 594
},
{
"epoch": 1.3225896082245068,
"grad_norm": 0.014192742295563221,
"learning_rate": 0.0018613861386138613,
"loss": 0.56,
"step": 595
},
{
"epoch": 1.324812447902195,
"grad_norm": 0.018689125776290894,
"learning_rate": 0.001858910891089109,
"loss": 0.6791,
"step": 596
},
{
"epoch": 1.3270352875798834,
"grad_norm": 0.01835326850414276,
"learning_rate": 0.0018564356435643563,
"loss": 0.6733,
"step": 597
},
{
"epoch": 1.3292581272575714,
"grad_norm": 0.022089499980211258,
"learning_rate": 0.0018539603960396039,
"loss": 0.6607,
"step": 598
},
{
"epoch": 1.3314809669352599,
"grad_norm": 0.011036070995032787,
"learning_rate": 0.0018514851485148516,
"loss": 0.5217,
"step": 599
},
{
"epoch": 1.333703806612948,
"grad_norm": 0.018533801659941673,
"learning_rate": 0.001849009900990099,
"loss": 0.7096,
"step": 600
},
{
"epoch": 1.3359266462906363,
"grad_norm": 0.017293326556682587,
"learning_rate": 0.0018465346534653464,
"loss": 0.6189,
"step": 601
},
{
"epoch": 1.3381494859683245,
"grad_norm": 0.01351864356547594,
"learning_rate": 0.0018440594059405942,
"loss": 0.7259,
"step": 602
},
{
"epoch": 1.3403723256460127,
"grad_norm": 0.018879210576415062,
"learning_rate": 0.0018415841584158415,
"loss": 0.5974,
"step": 603
},
{
"epoch": 1.3425951653237012,
"grad_norm": 0.023037323728203773,
"learning_rate": 0.001839108910891089,
"loss": 0.6754,
"step": 604
},
{
"epoch": 1.3448180050013891,
"grad_norm": 0.017983853816986084,
"learning_rate": 0.0018366336633663367,
"loss": 0.7237,
"step": 605
},
{
"epoch": 1.3470408446790776,
"grad_norm": 0.01909794472157955,
"learning_rate": 0.001834158415841584,
"loss": 0.7222,
"step": 606
},
{
"epoch": 1.3492636843567658,
"grad_norm": 0.014953205361962318,
"learning_rate": 0.0018316831683168316,
"loss": 0.6433,
"step": 607
},
{
"epoch": 1.351486524034454,
"grad_norm": 0.015649525448679924,
"learning_rate": 0.0018292079207920793,
"loss": 0.7764,
"step": 608
},
{
"epoch": 1.3537093637121422,
"grad_norm": 0.017241261899471283,
"learning_rate": 0.0018267326732673266,
"loss": 0.6004,
"step": 609
},
{
"epoch": 1.3559322033898304,
"grad_norm": 0.016156010329723358,
"learning_rate": 0.0018242574257425741,
"loss": 0.6187,
"step": 610
},
{
"epoch": 1.3581550430675189,
"grad_norm": 0.017765775322914124,
"learning_rate": 0.0018217821782178219,
"loss": 0.7092,
"step": 611
},
{
"epoch": 1.3603778827452069,
"grad_norm": 0.01482133287936449,
"learning_rate": 0.0018193069306930692,
"loss": 0.6921,
"step": 612
},
{
"epoch": 1.3626007224228953,
"grad_norm": 0.0338587686419487,
"learning_rate": 0.0018168316831683167,
"loss": 0.8791,
"step": 613
},
{
"epoch": 1.3648235621005835,
"grad_norm": 0.01598595455288887,
"learning_rate": 0.0018143564356435645,
"loss": 0.6677,
"step": 614
},
{
"epoch": 1.3670464017782717,
"grad_norm": 0.020157670602202415,
"learning_rate": 0.0018118811881188118,
"loss": 0.6577,
"step": 615
},
{
"epoch": 1.36926924145596,
"grad_norm": 0.017931604757905006,
"learning_rate": 0.0018094059405940597,
"loss": 0.5995,
"step": 616
},
{
"epoch": 1.3714920811336482,
"grad_norm": 0.015047606080770493,
"learning_rate": 0.001806930693069307,
"loss": 0.5683,
"step": 617
},
{
"epoch": 1.3737149208113366,
"grad_norm": 0.01254578959196806,
"learning_rate": 0.0018044554455445543,
"loss": 0.5574,
"step": 618
},
{
"epoch": 1.3759377604890246,
"grad_norm": 0.0250204186886549,
"learning_rate": 0.0018019801980198023,
"loss": 0.7746,
"step": 619
},
{
"epoch": 1.378160600166713,
"grad_norm": 0.0146646061912179,
"learning_rate": 0.0017995049504950496,
"loss": 0.5632,
"step": 620
},
{
"epoch": 1.3803834398444013,
"grad_norm": 0.019512226805090904,
"learning_rate": 0.0017970297029702969,
"loss": 0.6738,
"step": 621
},
{
"epoch": 1.3826062795220895,
"grad_norm": 0.013420785777270794,
"learning_rate": 0.0017945544554455448,
"loss": 0.6316,
"step": 622
},
{
"epoch": 1.3848291191997777,
"grad_norm": 0.023486951366066933,
"learning_rate": 0.0017920792079207922,
"loss": 0.5786,
"step": 623
},
{
"epoch": 1.387051958877466,
"grad_norm": 0.013036476448178291,
"learning_rate": 0.0017896039603960395,
"loss": 0.5637,
"step": 624
},
{
"epoch": 1.3892747985551543,
"grad_norm": 0.016444619745016098,
"learning_rate": 0.0017871287128712874,
"loss": 0.5816,
"step": 625
},
{
"epoch": 1.3914976382328423,
"grad_norm": 0.012313454411923885,
"learning_rate": 0.0017846534653465347,
"loss": 0.6912,
"step": 626
},
{
"epoch": 1.3937204779105308,
"grad_norm": 0.029828950762748718,
"learning_rate": 0.001782178217821782,
"loss": 0.5757,
"step": 627
},
{
"epoch": 1.395943317588219,
"grad_norm": 0.019931312650442123,
"learning_rate": 0.00177970297029703,
"loss": 0.833,
"step": 628
},
{
"epoch": 1.3981661572659072,
"grad_norm": 0.023598700761795044,
"learning_rate": 0.0017772277227722773,
"loss": 0.7982,
"step": 629
},
{
"epoch": 1.4003889969435954,
"grad_norm": 0.013721930794417858,
"learning_rate": 0.0017747524752475246,
"loss": 0.5859,
"step": 630
},
{
"epoch": 1.4026118366212836,
"grad_norm": 0.013073272071778774,
"learning_rate": 0.0017722772277227726,
"loss": 0.6802,
"step": 631
},
{
"epoch": 1.404834676298972,
"grad_norm": 0.015466667711734772,
"learning_rate": 0.0017698019801980199,
"loss": 0.6876,
"step": 632
},
{
"epoch": 1.40705751597666,
"grad_norm": 0.013860577717423439,
"learning_rate": 0.0017673267326732672,
"loss": 0.6569,
"step": 633
},
{
"epoch": 1.4092803556543485,
"grad_norm": 0.01486227661371231,
"learning_rate": 0.0017648514851485151,
"loss": 0.6061,
"step": 634
},
{
"epoch": 1.4115031953320367,
"grad_norm": 0.014290432445704937,
"learning_rate": 0.0017623762376237624,
"loss": 0.7512,
"step": 635
},
{
"epoch": 1.413726035009725,
"grad_norm": 0.014747899025678635,
"learning_rate": 0.0017599009900990097,
"loss": 0.6048,
"step": 636
},
{
"epoch": 1.4159488746874132,
"grad_norm": 0.02269178442656994,
"learning_rate": 0.0017574257425742577,
"loss": 0.5836,
"step": 637
},
{
"epoch": 1.4181717143651014,
"grad_norm": 0.019799571484327316,
"learning_rate": 0.001754950495049505,
"loss": 0.7016,
"step": 638
},
{
"epoch": 1.4203945540427896,
"grad_norm": 0.014343987219035625,
"learning_rate": 0.0017524752475247523,
"loss": 0.62,
"step": 639
},
{
"epoch": 1.4226173937204778,
"grad_norm": 0.01983143761754036,
"learning_rate": 0.0017500000000000003,
"loss": 0.5534,
"step": 640
},
{
"epoch": 1.4248402333981662,
"grad_norm": 0.012549810111522675,
"learning_rate": 0.0017475247524752476,
"loss": 0.6738,
"step": 641
},
{
"epoch": 1.4270630730758544,
"grad_norm": 0.019573699682950974,
"learning_rate": 0.0017450495049504949,
"loss": 0.6609,
"step": 642
},
{
"epoch": 1.4292859127535427,
"grad_norm": 0.017040664330124855,
"learning_rate": 0.0017425742574257428,
"loss": 0.5657,
"step": 643
},
{
"epoch": 1.4315087524312309,
"grad_norm": 0.021280160173773766,
"learning_rate": 0.0017400990099009901,
"loss": 0.7476,
"step": 644
},
{
"epoch": 1.433731592108919,
"grad_norm": 0.01822512224316597,
"learning_rate": 0.0017376237623762374,
"loss": 0.566,
"step": 645
},
{
"epoch": 1.4359544317866073,
"grad_norm": 0.015408231876790524,
"learning_rate": 0.0017351485148514854,
"loss": 0.6393,
"step": 646
},
{
"epoch": 1.4381772714642955,
"grad_norm": 0.012197799980640411,
"learning_rate": 0.0017326732673267327,
"loss": 0.6905,
"step": 647
},
{
"epoch": 1.440400111141984,
"grad_norm": 0.014727549627423286,
"learning_rate": 0.00173019801980198,
"loss": 0.6197,
"step": 648
},
{
"epoch": 1.4426229508196722,
"grad_norm": 0.021275896579027176,
"learning_rate": 0.001727722772277228,
"loss": 0.7485,
"step": 649
},
{
"epoch": 1.4448457904973604,
"grad_norm": 0.012766880914568901,
"learning_rate": 0.0017252475247524753,
"loss": 0.6318,
"step": 650
},
{
"epoch": 1.4470686301750486,
"grad_norm": 0.020205190405249596,
"learning_rate": 0.0017227722772277226,
"loss": 0.5067,
"step": 651
},
{
"epoch": 1.4492914698527368,
"grad_norm": 0.014094137586653233,
"learning_rate": 0.0017202970297029705,
"loss": 0.6953,
"step": 652
},
{
"epoch": 1.451514309530425,
"grad_norm": 0.015574413351714611,
"learning_rate": 0.0017178217821782178,
"loss": 0.5785,
"step": 653
},
{
"epoch": 1.4537371492081133,
"grad_norm": 0.01962168887257576,
"learning_rate": 0.0017153465346534651,
"loss": 0.5311,
"step": 654
},
{
"epoch": 1.4559599888858017,
"grad_norm": 0.014445985667407513,
"learning_rate": 0.001712871287128713,
"loss": 0.5768,
"step": 655
},
{
"epoch": 1.45818282856349,
"grad_norm": 0.013283908367156982,
"learning_rate": 0.0017103960396039604,
"loss": 0.6906,
"step": 656
},
{
"epoch": 1.4604056682411781,
"grad_norm": 0.014568808488547802,
"learning_rate": 0.0017079207920792077,
"loss": 0.5427,
"step": 657
},
{
"epoch": 1.4626285079188663,
"grad_norm": 0.01634564995765686,
"learning_rate": 0.0017054455445544557,
"loss": 0.7341,
"step": 658
},
{
"epoch": 1.4648513475965546,
"grad_norm": 0.015426786616444588,
"learning_rate": 0.001702970297029703,
"loss": 0.6963,
"step": 659
},
{
"epoch": 1.4670741872742428,
"grad_norm": 0.017544064670801163,
"learning_rate": 0.0017004950495049503,
"loss": 0.7626,
"step": 660
},
{
"epoch": 1.469297026951931,
"grad_norm": 0.017237959429621696,
"learning_rate": 0.0016980198019801982,
"loss": 0.5935,
"step": 661
},
{
"epoch": 1.4715198666296194,
"grad_norm": 0.019028373062610626,
"learning_rate": 0.0016955445544554455,
"loss": 0.6749,
"step": 662
},
{
"epoch": 1.4737427063073076,
"grad_norm": 0.012185648083686829,
"learning_rate": 0.0016930693069306928,
"loss": 0.6386,
"step": 663
},
{
"epoch": 1.4759655459849959,
"grad_norm": 0.01708870567381382,
"learning_rate": 0.0016905940594059408,
"loss": 0.691,
"step": 664
},
{
"epoch": 1.478188385662684,
"grad_norm": 0.013403747230768204,
"learning_rate": 0.001688118811881188,
"loss": 0.6614,
"step": 665
},
{
"epoch": 1.4804112253403723,
"grad_norm": 0.021224593743681908,
"learning_rate": 0.0016856435643564358,
"loss": 0.7882,
"step": 666
},
{
"epoch": 1.4826340650180605,
"grad_norm": 0.07430857419967651,
"learning_rate": 0.0016831683168316834,
"loss": 0.8449,
"step": 667
},
{
"epoch": 1.4848569046957487,
"grad_norm": 0.01444665901362896,
"learning_rate": 0.0016806930693069307,
"loss": 0.5309,
"step": 668
},
{
"epoch": 1.4870797443734372,
"grad_norm": 0.014784655533730984,
"learning_rate": 0.0016782178217821784,
"loss": 0.6546,
"step": 669
},
{
"epoch": 1.4893025840511254,
"grad_norm": 0.02172185480594635,
"learning_rate": 0.001675742574257426,
"loss": 0.673,
"step": 670
},
{
"epoch": 1.4915254237288136,
"grad_norm": 0.010736542753875256,
"learning_rate": 0.0016732673267326732,
"loss": 0.5521,
"step": 671
},
{
"epoch": 1.4937482634065018,
"grad_norm": 0.013517359271645546,
"learning_rate": 0.001670792079207921,
"loss": 0.6336,
"step": 672
},
{
"epoch": 1.49597110308419,
"grad_norm": 0.021300826221704483,
"learning_rate": 0.0016683168316831685,
"loss": 0.6419,
"step": 673
},
{
"epoch": 1.4981939427618782,
"grad_norm": 0.019188571721315384,
"learning_rate": 0.0016658415841584158,
"loss": 0.6618,
"step": 674
},
{
"epoch": 1.5004167824395664,
"grad_norm": 0.0127353984862566,
"learning_rate": 0.0016633663366336635,
"loss": 0.6328,
"step": 675
},
{
"epoch": 1.5026396221172549,
"grad_norm": 0.012695038691163063,
"learning_rate": 0.001660891089108911,
"loss": 0.5579,
"step": 676
},
{
"epoch": 1.5048624617949429,
"grad_norm": 0.02976105734705925,
"learning_rate": 0.0016584158415841584,
"loss": 0.8424,
"step": 677
},
{
"epoch": 1.5070853014726313,
"grad_norm": 0.016801824793219566,
"learning_rate": 0.0016559405940594061,
"loss": 0.5539,
"step": 678
},
{
"epoch": 1.5093081411503195,
"grad_norm": 0.021268382668495178,
"learning_rate": 0.0016534653465346536,
"loss": 0.7329,
"step": 679
},
{
"epoch": 1.5115309808280077,
"grad_norm": 0.014745811931788921,
"learning_rate": 0.001650990099009901,
"loss": 0.6433,
"step": 680
},
{
"epoch": 1.5137538205056962,
"grad_norm": 0.018007369711995125,
"learning_rate": 0.0016485148514851487,
"loss": 0.5846,
"step": 681
},
{
"epoch": 1.5159766601833842,
"grad_norm": 0.014517346397042274,
"learning_rate": 0.0016460396039603962,
"loss": 0.6494,
"step": 682
},
{
"epoch": 1.5181994998610726,
"grad_norm": 0.011836903169751167,
"learning_rate": 0.0016435643564356435,
"loss": 0.6144,
"step": 683
},
{
"epoch": 1.5204223395387606,
"grad_norm": 0.015668461099267006,
"learning_rate": 0.0016410891089108912,
"loss": 0.697,
"step": 684
},
{
"epoch": 1.522645179216449,
"grad_norm": 0.0159301720559597,
"learning_rate": 0.0016386138613861388,
"loss": 0.6344,
"step": 685
},
{
"epoch": 1.5248680188941373,
"grad_norm": 0.021761801093816757,
"learning_rate": 0.001636138613861386,
"loss": 0.7244,
"step": 686
},
{
"epoch": 1.5270908585718255,
"grad_norm": 0.017973683774471283,
"learning_rate": 0.0016336633663366338,
"loss": 0.6616,
"step": 687
},
{
"epoch": 1.529313698249514,
"grad_norm": 0.03193248063325882,
"learning_rate": 0.0016311881188118813,
"loss": 0.5955,
"step": 688
},
{
"epoch": 1.531536537927202,
"grad_norm": 0.022393716499209404,
"learning_rate": 0.0016287128712871286,
"loss": 0.6299,
"step": 689
},
{
"epoch": 1.5337593776048903,
"grad_norm": 0.022320229560136795,
"learning_rate": 0.0016262376237623764,
"loss": 0.7729,
"step": 690
},
{
"epoch": 1.5359822172825783,
"grad_norm": 0.014977824874222279,
"learning_rate": 0.0016237623762376237,
"loss": 0.6697,
"step": 691
},
{
"epoch": 1.5382050569602668,
"grad_norm": 0.01872638612985611,
"learning_rate": 0.0016212871287128712,
"loss": 0.5893,
"step": 692
},
{
"epoch": 1.540427896637955,
"grad_norm": 0.015523020178079605,
"learning_rate": 0.001618811881188119,
"loss": 0.6822,
"step": 693
},
{
"epoch": 1.5426507363156432,
"grad_norm": 0.014033573679625988,
"learning_rate": 0.0016163366336633663,
"loss": 0.6213,
"step": 694
},
{
"epoch": 1.5448735759933316,
"grad_norm": 0.014586709439754486,
"learning_rate": 0.0016138613861386138,
"loss": 0.7703,
"step": 695
},
{
"epoch": 1.5470964156710196,
"grad_norm": 0.014374952763319016,
"learning_rate": 0.0016113861386138615,
"loss": 0.6198,
"step": 696
},
{
"epoch": 1.549319255348708,
"grad_norm": 0.017982954159379005,
"learning_rate": 0.0016089108910891088,
"loss": 0.6394,
"step": 697
},
{
"epoch": 1.551542095026396,
"grad_norm": 0.014446941204369068,
"learning_rate": 0.0016064356435643563,
"loss": 0.6657,
"step": 698
},
{
"epoch": 1.5537649347040845,
"grad_norm": 0.018195228651165962,
"learning_rate": 0.001603960396039604,
"loss": 0.5129,
"step": 699
},
{
"epoch": 1.5559877743817727,
"grad_norm": 0.01925376057624817,
"learning_rate": 0.0016014851485148514,
"loss": 0.643,
"step": 700
},
{
"epoch": 1.558210614059461,
"grad_norm": 0.036713700741529465,
"learning_rate": 0.001599009900990099,
"loss": 0.6965,
"step": 701
},
{
"epoch": 1.5604334537371494,
"grad_norm": 0.01623792201280594,
"learning_rate": 0.0015965346534653467,
"loss": 0.5376,
"step": 702
},
{
"epoch": 1.5626562934148374,
"grad_norm": 0.0159526988863945,
"learning_rate": 0.001594059405940594,
"loss": 0.6076,
"step": 703
},
{
"epoch": 1.5648791330925258,
"grad_norm": 0.017126891762018204,
"learning_rate": 0.0015915841584158415,
"loss": 0.6329,
"step": 704
},
{
"epoch": 1.5671019727702138,
"grad_norm": 0.019651105627417564,
"learning_rate": 0.0015891089108910892,
"loss": 0.4878,
"step": 705
},
{
"epoch": 1.5693248124479022,
"grad_norm": 0.01890754885971546,
"learning_rate": 0.0015866336633663365,
"loss": 0.677,
"step": 706
},
{
"epoch": 1.5715476521255904,
"grad_norm": 0.014043249189853668,
"learning_rate": 0.001584158415841584,
"loss": 0.5893,
"step": 707
},
{
"epoch": 1.5737704918032787,
"grad_norm": 0.014365495182573795,
"learning_rate": 0.0015816831683168318,
"loss": 0.6114,
"step": 708
},
{
"epoch": 1.575993331480967,
"grad_norm": 0.021364012733101845,
"learning_rate": 0.001579207920792079,
"loss": 0.616,
"step": 709
},
{
"epoch": 1.578216171158655,
"grad_norm": 0.021359330043196678,
"learning_rate": 0.0015767326732673266,
"loss": 0.7248,
"step": 710
},
{
"epoch": 1.5804390108363435,
"grad_norm": 0.01866665668785572,
"learning_rate": 0.0015742574257425744,
"loss": 0.7569,
"step": 711
},
{
"epoch": 1.5826618505140315,
"grad_norm": 0.013409837149083614,
"learning_rate": 0.0015717821782178217,
"loss": 0.5397,
"step": 712
},
{
"epoch": 1.58488469019172,
"grad_norm": 0.014709453098475933,
"learning_rate": 0.0015693069306930692,
"loss": 0.5613,
"step": 713
},
{
"epoch": 1.5871075298694082,
"grad_norm": 0.014858447946608067,
"learning_rate": 0.001566831683168317,
"loss": 0.6546,
"step": 714
},
{
"epoch": 1.5893303695470964,
"grad_norm": 0.01660234108567238,
"learning_rate": 0.0015643564356435642,
"loss": 0.8238,
"step": 715
},
{
"epoch": 1.5915532092247848,
"grad_norm": 0.01475167740136385,
"learning_rate": 0.0015618811881188122,
"loss": 0.6471,
"step": 716
},
{
"epoch": 1.5937760489024728,
"grad_norm": 0.024248579517006874,
"learning_rate": 0.0015594059405940595,
"loss": 0.7741,
"step": 717
},
{
"epoch": 1.5959988885801613,
"grad_norm": 0.02008378878235817,
"learning_rate": 0.0015569306930693068,
"loss": 0.6338,
"step": 718
},
{
"epoch": 1.5982217282578493,
"grad_norm": 0.013149694539606571,
"learning_rate": 0.0015544554455445548,
"loss": 0.5793,
"step": 719
},
{
"epoch": 1.6004445679355377,
"grad_norm": 0.016647828742861748,
"learning_rate": 0.001551980198019802,
"loss": 0.571,
"step": 720
},
{
"epoch": 1.602667407613226,
"grad_norm": 0.020569197833538055,
"learning_rate": 0.0015495049504950494,
"loss": 0.747,
"step": 721
},
{
"epoch": 1.6048902472909141,
"grad_norm": 0.015371223911643028,
"learning_rate": 0.0015470297029702973,
"loss": 0.7479,
"step": 722
},
{
"epoch": 1.6071130869686026,
"grad_norm": 0.018335159868001938,
"learning_rate": 0.0015445544554455446,
"loss": 0.7619,
"step": 723
},
{
"epoch": 1.6093359266462905,
"grad_norm": 0.012718189507722855,
"learning_rate": 0.001542079207920792,
"loss": 0.6172,
"step": 724
},
{
"epoch": 1.611558766323979,
"grad_norm": 0.0307784266769886,
"learning_rate": 0.0015396039603960399,
"loss": 0.6105,
"step": 725
},
{
"epoch": 1.613781606001667,
"grad_norm": 0.017151063308119774,
"learning_rate": 0.0015371287128712872,
"loss": 0.6892,
"step": 726
},
{
"epoch": 1.6160044456793554,
"grad_norm": 0.020293843001127243,
"learning_rate": 0.0015346534653465345,
"loss": 0.6951,
"step": 727
},
{
"epoch": 1.6182272853570436,
"grad_norm": 0.018611643463373184,
"learning_rate": 0.0015321782178217825,
"loss": 0.7882,
"step": 728
},
{
"epoch": 1.6204501250347318,
"grad_norm": 0.0173718873411417,
"learning_rate": 0.0015297029702970298,
"loss": 0.6516,
"step": 729
},
{
"epoch": 1.62267296471242,
"grad_norm": 0.014312409795820713,
"learning_rate": 0.001527227722772277,
"loss": 0.5164,
"step": 730
},
{
"epoch": 1.6248958043901083,
"grad_norm": 0.01838870160281658,
"learning_rate": 0.001524752475247525,
"loss": 0.6303,
"step": 731
},
{
"epoch": 1.6271186440677967,
"grad_norm": 0.013072308152914047,
"learning_rate": 0.0015222772277227723,
"loss": 0.6555,
"step": 732
},
{
"epoch": 1.6293414837454847,
"grad_norm": 0.013176838867366314,
"learning_rate": 0.0015198019801980196,
"loss": 0.6303,
"step": 733
},
{
"epoch": 1.6315643234231731,
"grad_norm": 0.016583677381277084,
"learning_rate": 0.0015173267326732676,
"loss": 0.6323,
"step": 734
},
{
"epoch": 1.6337871631008614,
"grad_norm": 0.015155021101236343,
"learning_rate": 0.001514851485148515,
"loss": 0.6323,
"step": 735
},
{
"epoch": 1.6360100027785496,
"grad_norm": 0.013991404324769974,
"learning_rate": 0.0015123762376237622,
"loss": 0.6144,
"step": 736
},
{
"epoch": 1.6382328424562378,
"grad_norm": 0.021456275135278702,
"learning_rate": 0.0015099009900990102,
"loss": 0.6232,
"step": 737
},
{
"epoch": 1.640455682133926,
"grad_norm": 0.016199328005313873,
"learning_rate": 0.0015074257425742575,
"loss": 0.6021,
"step": 738
},
{
"epoch": 1.6426785218116144,
"grad_norm": 0.02143920212984085,
"learning_rate": 0.0015049504950495048,
"loss": 0.6952,
"step": 739
},
{
"epoch": 1.6449013614893024,
"grad_norm": 0.016033926978707314,
"learning_rate": 0.0015024752475247527,
"loss": 0.7172,
"step": 740
},
{
"epoch": 1.6471242011669909,
"grad_norm": 0.012828083708882332,
"learning_rate": 0.0015,
"loss": 0.6543,
"step": 741
},
{
"epoch": 1.649347040844679,
"grad_norm": 0.0132541349157691,
"learning_rate": 0.0014975247524752476,
"loss": 0.6105,
"step": 742
},
{
"epoch": 1.6515698805223673,
"grad_norm": 0.01240797434002161,
"learning_rate": 0.001495049504950495,
"loss": 0.6074,
"step": 743
},
{
"epoch": 1.6537927202000555,
"grad_norm": 0.0144476518034935,
"learning_rate": 0.0014925742574257426,
"loss": 0.6292,
"step": 744
},
{
"epoch": 1.6560155598777437,
"grad_norm": 0.013765274547040462,
"learning_rate": 0.0014900990099009901,
"loss": 0.6266,
"step": 745
},
{
"epoch": 1.6582383995554322,
"grad_norm": 0.015940168872475624,
"learning_rate": 0.0014876237623762376,
"loss": 0.6191,
"step": 746
},
{
"epoch": 1.6604612392331202,
"grad_norm": 0.013493590988218784,
"learning_rate": 0.0014851485148514852,
"loss": 0.6048,
"step": 747
},
{
"epoch": 1.6626840789108086,
"grad_norm": 0.01261868141591549,
"learning_rate": 0.0014826732673267327,
"loss": 0.6823,
"step": 748
},
{
"epoch": 1.6649069185884968,
"grad_norm": 0.024276770651340485,
"learning_rate": 0.0014801980198019802,
"loss": 0.7041,
"step": 749
},
{
"epoch": 1.667129758266185,
"grad_norm": 0.019383903592824936,
"learning_rate": 0.0014777227722772277,
"loss": 0.7646,
"step": 750
},
{
"epoch": 1.6693525979438733,
"grad_norm": 0.015850719064474106,
"learning_rate": 0.0014752475247524753,
"loss": 0.5642,
"step": 751
},
{
"epoch": 1.6715754376215615,
"grad_norm": 0.014828231185674667,
"learning_rate": 0.0014727722772277228,
"loss": 0.728,
"step": 752
},
{
"epoch": 1.67379827729925,
"grad_norm": 0.014795539900660515,
"learning_rate": 0.0014702970297029703,
"loss": 0.514,
"step": 753
},
{
"epoch": 1.676021116976938,
"grad_norm": 0.03070404753088951,
"learning_rate": 0.0014678217821782178,
"loss": 0.659,
"step": 754
},
{
"epoch": 1.6782439566546263,
"grad_norm": 0.01784861460328102,
"learning_rate": 0.0014653465346534656,
"loss": 0.6316,
"step": 755
},
{
"epoch": 1.6804667963323145,
"grad_norm": 0.020610976964235306,
"learning_rate": 0.0014628712871287129,
"loss": 0.5602,
"step": 756
},
{
"epoch": 1.6826896360100028,
"grad_norm": 0.01749056577682495,
"learning_rate": 0.0014603960396039604,
"loss": 0.7298,
"step": 757
},
{
"epoch": 1.684912475687691,
"grad_norm": 0.019512880593538284,
"learning_rate": 0.0014579207920792081,
"loss": 0.6296,
"step": 758
},
{
"epoch": 1.6871353153653792,
"grad_norm": 0.017783217132091522,
"learning_rate": 0.0014554455445544554,
"loss": 0.6959,
"step": 759
},
{
"epoch": 1.6893581550430676,
"grad_norm": 0.015032476745545864,
"learning_rate": 0.001452970297029703,
"loss": 0.54,
"step": 760
},
{
"epoch": 1.6915809947207556,
"grad_norm": 0.010041123256087303,
"learning_rate": 0.0014504950495049507,
"loss": 0.5299,
"step": 761
},
{
"epoch": 1.693803834398444,
"grad_norm": 0.013333450071513653,
"learning_rate": 0.001448019801980198,
"loss": 0.5786,
"step": 762
},
{
"epoch": 1.6960266740761323,
"grad_norm": 0.017033929005265236,
"learning_rate": 0.0014455445544554455,
"loss": 0.6885,
"step": 763
},
{
"epoch": 1.6982495137538205,
"grad_norm": 0.01851385273039341,
"learning_rate": 0.0014430693069306933,
"loss": 0.8939,
"step": 764
},
{
"epoch": 1.7004723534315087,
"grad_norm": 0.021369075402617455,
"learning_rate": 0.0014405940594059406,
"loss": 0.6529,
"step": 765
},
{
"epoch": 1.702695193109197,
"grad_norm": 0.012632249854505062,
"learning_rate": 0.001438118811881188,
"loss": 0.5608,
"step": 766
},
{
"epoch": 1.7049180327868854,
"grad_norm": 0.01696857064962387,
"learning_rate": 0.0014356435643564358,
"loss": 0.5658,
"step": 767
},
{
"epoch": 1.7071408724645734,
"grad_norm": 0.02316252514719963,
"learning_rate": 0.0014331683168316831,
"loss": 0.6628,
"step": 768
},
{
"epoch": 1.7093637121422618,
"grad_norm": 0.014780178666114807,
"learning_rate": 0.0014306930693069307,
"loss": 0.5469,
"step": 769
},
{
"epoch": 1.71158655181995,
"grad_norm": 0.02393134869635105,
"learning_rate": 0.0014282178217821784,
"loss": 0.6237,
"step": 770
},
{
"epoch": 1.7138093914976382,
"grad_norm": 0.017437728121876717,
"learning_rate": 0.0014257425742574257,
"loss": 0.598,
"step": 771
},
{
"epoch": 1.7160322311753264,
"grad_norm": 0.014657423831522465,
"learning_rate": 0.0014232673267326732,
"loss": 0.6574,
"step": 772
},
{
"epoch": 1.7182550708530147,
"grad_norm": 0.014511947520077229,
"learning_rate": 0.001420792079207921,
"loss": 0.4105,
"step": 773
},
{
"epoch": 1.720477910530703,
"grad_norm": 0.0161074697971344,
"learning_rate": 0.0014183168316831683,
"loss": 0.5853,
"step": 774
},
{
"epoch": 1.722700750208391,
"grad_norm": 0.015577802434563637,
"learning_rate": 0.0014158415841584158,
"loss": 0.7821,
"step": 775
},
{
"epoch": 1.7249235898860795,
"grad_norm": 0.013394194655120373,
"learning_rate": 0.0014133663366336635,
"loss": 0.6393,
"step": 776
},
{
"epoch": 1.7271464295637677,
"grad_norm": 0.011438438668847084,
"learning_rate": 0.0014108910891089108,
"loss": 0.5918,
"step": 777
},
{
"epoch": 1.729369269241456,
"grad_norm": 0.01416991837322712,
"learning_rate": 0.0014084158415841584,
"loss": 0.5726,
"step": 778
},
{
"epoch": 1.7315921089191442,
"grad_norm": 0.011613192036747932,
"learning_rate": 0.001405940594059406,
"loss": 0.6207,
"step": 779
},
{
"epoch": 1.7338149485968324,
"grad_norm": 0.014506768435239792,
"learning_rate": 0.0014034653465346536,
"loss": 0.7833,
"step": 780
},
{
"epoch": 1.7360377882745208,
"grad_norm": 0.016172049567103386,
"learning_rate": 0.001400990099009901,
"loss": 0.6071,
"step": 781
},
{
"epoch": 1.7382606279522088,
"grad_norm": 0.014774010516703129,
"learning_rate": 0.0013985148514851487,
"loss": 0.7103,
"step": 782
},
{
"epoch": 1.7404834676298973,
"grad_norm": 0.019349824637174606,
"learning_rate": 0.0013960396039603962,
"loss": 0.648,
"step": 783
},
{
"epoch": 1.7427063073075855,
"grad_norm": 0.012977352365851402,
"learning_rate": 0.0013935643564356435,
"loss": 0.5717,
"step": 784
},
{
"epoch": 1.7449291469852737,
"grad_norm": 0.01410915981978178,
"learning_rate": 0.0013910891089108912,
"loss": 0.5183,
"step": 785
},
{
"epoch": 1.747151986662962,
"grad_norm": 0.018457330763339996,
"learning_rate": 0.0013886138613861388,
"loss": 0.5707,
"step": 786
},
{
"epoch": 1.7493748263406501,
"grad_norm": 0.020965803414583206,
"learning_rate": 0.001386138613861386,
"loss": 0.6848,
"step": 787
},
{
"epoch": 1.7515976660183386,
"grad_norm": 0.01630076766014099,
"learning_rate": 0.0013836633663366338,
"loss": 0.8393,
"step": 788
},
{
"epoch": 1.7538205056960265,
"grad_norm": 0.013406305573880672,
"learning_rate": 0.0013811881188118813,
"loss": 0.4215,
"step": 789
},
{
"epoch": 1.756043345373715,
"grad_norm": 0.013798919506371021,
"learning_rate": 0.0013787128712871286,
"loss": 0.6094,
"step": 790
},
{
"epoch": 1.7582661850514032,
"grad_norm": 0.028619924560189247,
"learning_rate": 0.0013762376237623764,
"loss": 0.6902,
"step": 791
},
{
"epoch": 1.7604890247290914,
"grad_norm": 0.035553187131881714,
"learning_rate": 0.001373762376237624,
"loss": 0.5251,
"step": 792
},
{
"epoch": 1.7627118644067796,
"grad_norm": 0.013903150334954262,
"learning_rate": 0.0013712871287128712,
"loss": 0.7792,
"step": 793
},
{
"epoch": 1.7649347040844678,
"grad_norm": 0.012611381709575653,
"learning_rate": 0.0013688118811881187,
"loss": 0.592,
"step": 794
},
{
"epoch": 1.7671575437621563,
"grad_norm": 0.014770290814340115,
"learning_rate": 0.0013663366336633665,
"loss": 0.5483,
"step": 795
},
{
"epoch": 1.7693803834398443,
"grad_norm": 0.02011730521917343,
"learning_rate": 0.0013638613861386138,
"loss": 0.7013,
"step": 796
},
{
"epoch": 1.7716032231175327,
"grad_norm": 0.024151911959052086,
"learning_rate": 0.0013613861386138613,
"loss": 0.6834,
"step": 797
},
{
"epoch": 1.773826062795221,
"grad_norm": 0.020802835002541542,
"learning_rate": 0.001358910891089109,
"loss": 0.6703,
"step": 798
},
{
"epoch": 1.7760489024729091,
"grad_norm": 0.027099374681711197,
"learning_rate": 0.0013564356435643563,
"loss": 0.6492,
"step": 799
},
{
"epoch": 1.7782717421505974,
"grad_norm": 0.015476135537028313,
"learning_rate": 0.0013539603960396039,
"loss": 0.7148,
"step": 800
},
{
"epoch": 1.7804945818282856,
"grad_norm": 0.016380343586206436,
"learning_rate": 0.0013514851485148516,
"loss": 0.65,
"step": 801
},
{
"epoch": 1.782717421505974,
"grad_norm": 0.01689559780061245,
"learning_rate": 0.001349009900990099,
"loss": 0.5779,
"step": 802
},
{
"epoch": 1.784940261183662,
"grad_norm": 0.021533837541937828,
"learning_rate": 0.0013465346534653464,
"loss": 0.5734,
"step": 803
},
{
"epoch": 1.7871631008613504,
"grad_norm": 0.025379789993166924,
"learning_rate": 0.0013440594059405942,
"loss": 0.554,
"step": 804
},
{
"epoch": 1.7893859405390387,
"grad_norm": 0.022600479423999786,
"learning_rate": 0.0013415841584158417,
"loss": 0.7596,
"step": 805
},
{
"epoch": 1.7916087802167269,
"grad_norm": 0.029088463634252548,
"learning_rate": 0.001339108910891089,
"loss": 0.6969,
"step": 806
},
{
"epoch": 1.793831619894415,
"grad_norm": 0.02145700715482235,
"learning_rate": 0.0013366336633663367,
"loss": 0.7427,
"step": 807
},
{
"epoch": 1.7960544595721033,
"grad_norm": 0.019825611263513565,
"learning_rate": 0.0013341584158415843,
"loss": 0.5842,
"step": 808
},
{
"epoch": 1.7982772992497917,
"grad_norm": 0.018616992980241776,
"learning_rate": 0.0013316831683168316,
"loss": 0.5571,
"step": 809
},
{
"epoch": 1.8005001389274797,
"grad_norm": 0.014252702705562115,
"learning_rate": 0.0013292079207920793,
"loss": 0.6742,
"step": 810
},
{
"epoch": 1.8027229786051682,
"grad_norm": 0.010596544481813908,
"learning_rate": 0.0013267326732673268,
"loss": 0.4869,
"step": 811
},
{
"epoch": 1.8049458182828564,
"grad_norm": 0.02070692926645279,
"learning_rate": 0.0013242574257425741,
"loss": 0.7206,
"step": 812
},
{
"epoch": 1.8071686579605446,
"grad_norm": 0.017880035564303398,
"learning_rate": 0.0013217821782178219,
"loss": 0.6991,
"step": 813
},
{
"epoch": 1.8093914976382328,
"grad_norm": 0.02070513367652893,
"learning_rate": 0.0013193069306930694,
"loss": 0.8145,
"step": 814
},
{
"epoch": 1.811614337315921,
"grad_norm": 0.016880514100193977,
"learning_rate": 0.0013168316831683167,
"loss": 0.5353,
"step": 815
},
{
"epoch": 1.8138371769936095,
"grad_norm": 0.017728716135025024,
"learning_rate": 0.0013143564356435644,
"loss": 0.6704,
"step": 816
},
{
"epoch": 1.8160600166712975,
"grad_norm": 0.026395466178655624,
"learning_rate": 0.001311881188118812,
"loss": 0.672,
"step": 817
},
{
"epoch": 1.818282856348986,
"grad_norm": 0.019495896995067596,
"learning_rate": 0.0013094059405940593,
"loss": 0.5702,
"step": 818
},
{
"epoch": 1.8205056960266741,
"grad_norm": 0.027116477489471436,
"learning_rate": 0.001306930693069307,
"loss": 0.5935,
"step": 819
},
{
"epoch": 1.8227285357043623,
"grad_norm": 0.014737212099134922,
"learning_rate": 0.0013044554455445545,
"loss": 0.5925,
"step": 820
},
{
"epoch": 1.8249513753820505,
"grad_norm": 0.015115857124328613,
"learning_rate": 0.0013019801980198018,
"loss": 0.614,
"step": 821
},
{
"epoch": 1.8271742150597388,
"grad_norm": 0.011744903400540352,
"learning_rate": 0.0012995049504950496,
"loss": 0.5326,
"step": 822
},
{
"epoch": 1.8293970547374272,
"grad_norm": 0.013531588949263096,
"learning_rate": 0.001297029702970297,
"loss": 0.6255,
"step": 823
},
{
"epoch": 1.8316198944151152,
"grad_norm": 0.014545679092407227,
"learning_rate": 0.0012945544554455444,
"loss": 0.6711,
"step": 824
},
{
"epoch": 1.8338427340928036,
"grad_norm": 0.02696896158158779,
"learning_rate": 0.0012920792079207921,
"loss": 0.7379,
"step": 825
},
{
"epoch": 1.8360655737704918,
"grad_norm": 0.017231903970241547,
"learning_rate": 0.0012896039603960397,
"loss": 0.6318,
"step": 826
},
{
"epoch": 1.83828841344818,
"grad_norm": 0.020397018641233444,
"learning_rate": 0.001287128712871287,
"loss": 0.7356,
"step": 827
},
{
"epoch": 1.8405112531258683,
"grad_norm": 0.014277629554271698,
"learning_rate": 0.0012846534653465347,
"loss": 0.6417,
"step": 828
},
{
"epoch": 1.8427340928035565,
"grad_norm": 0.014533923007547855,
"learning_rate": 0.0012821782178217822,
"loss": 0.558,
"step": 829
},
{
"epoch": 1.844956932481245,
"grad_norm": 0.014783985912799835,
"learning_rate": 0.0012797029702970298,
"loss": 0.7035,
"step": 830
},
{
"epoch": 1.847179772158933,
"grad_norm": 0.017703086137771606,
"learning_rate": 0.0012772277227722773,
"loss": 0.7231,
"step": 831
},
{
"epoch": 1.8494026118366214,
"grad_norm": 0.03160193935036659,
"learning_rate": 0.0012747524752475248,
"loss": 0.6218,
"step": 832
},
{
"epoch": 1.8516254515143096,
"grad_norm": 0.01616073027253151,
"learning_rate": 0.0012722772277227723,
"loss": 0.5358,
"step": 833
},
{
"epoch": 1.8538482911919978,
"grad_norm": 0.015707818791270256,
"learning_rate": 0.0012698019801980198,
"loss": 0.6487,
"step": 834
},
{
"epoch": 1.856071130869686,
"grad_norm": 0.027285683900117874,
"learning_rate": 0.0012673267326732674,
"loss": 0.7571,
"step": 835
},
{
"epoch": 1.8582939705473742,
"grad_norm": 0.013436183333396912,
"learning_rate": 0.0012648514851485149,
"loss": 0.5062,
"step": 836
},
{
"epoch": 1.8605168102250627,
"grad_norm": 0.012100230902433395,
"learning_rate": 0.0012623762376237624,
"loss": 0.6001,
"step": 837
},
{
"epoch": 1.8627396499027506,
"grad_norm": 0.016179142519831657,
"learning_rate": 0.00125990099009901,
"loss": 0.5925,
"step": 838
},
{
"epoch": 1.864962489580439,
"grad_norm": 0.01836666651070118,
"learning_rate": 0.0012574257425742575,
"loss": 0.7492,
"step": 839
},
{
"epoch": 1.8671853292581273,
"grad_norm": 0.013563703745603561,
"learning_rate": 0.001254950495049505,
"loss": 0.5668,
"step": 840
},
{
"epoch": 1.8694081689358155,
"grad_norm": 0.02543150633573532,
"learning_rate": 0.0012524752475247525,
"loss": 0.5982,
"step": 841
},
{
"epoch": 1.8716310086135037,
"grad_norm": 0.017902787774801254,
"learning_rate": 0.00125,
"loss": 0.6887,
"step": 842
},
{
"epoch": 1.873853848291192,
"grad_norm": 0.018437763676047325,
"learning_rate": 0.0012475247524752475,
"loss": 0.6153,
"step": 843
},
{
"epoch": 1.8760766879688804,
"grad_norm": 0.01389164850115776,
"learning_rate": 0.001245049504950495,
"loss": 0.6548,
"step": 844
},
{
"epoch": 1.8782995276465684,
"grad_norm": 0.03561052680015564,
"learning_rate": 0.0012425742574257426,
"loss": 0.5777,
"step": 845
},
{
"epoch": 1.8805223673242568,
"grad_norm": 0.014207260683178902,
"learning_rate": 0.0012400990099009901,
"loss": 0.5582,
"step": 846
},
{
"epoch": 1.882745207001945,
"grad_norm": 0.046381477266550064,
"learning_rate": 0.0012376237623762376,
"loss": 0.605,
"step": 847
},
{
"epoch": 1.8849680466796332,
"grad_norm": 0.015709543600678444,
"learning_rate": 0.0012351485148514852,
"loss": 0.5713,
"step": 848
},
{
"epoch": 1.8871908863573215,
"grad_norm": 0.01592281460762024,
"learning_rate": 0.0012326732673267327,
"loss": 0.618,
"step": 849
},
{
"epoch": 1.8894137260350097,
"grad_norm": 0.01385457906872034,
"learning_rate": 0.0012301980198019802,
"loss": 0.5394,
"step": 850
},
{
"epoch": 1.8916365657126981,
"grad_norm": 0.02153988927602768,
"learning_rate": 0.0012277227722772277,
"loss": 0.5842,
"step": 851
},
{
"epoch": 1.893859405390386,
"grad_norm": 0.02466188557446003,
"learning_rate": 0.0012252475247524753,
"loss": 0.5083,
"step": 852
},
{
"epoch": 1.8960822450680745,
"grad_norm": 0.015681538730859756,
"learning_rate": 0.0012227722772277228,
"loss": 0.7941,
"step": 853
},
{
"epoch": 1.8983050847457628,
"grad_norm": 0.012338684871792793,
"learning_rate": 0.0012202970297029703,
"loss": 0.5342,
"step": 854
},
{
"epoch": 1.900527924423451,
"grad_norm": 0.01564796082675457,
"learning_rate": 0.001217821782178218,
"loss": 0.7584,
"step": 855
},
{
"epoch": 1.9027507641011392,
"grad_norm": 0.02049081400036812,
"learning_rate": 0.0012153465346534653,
"loss": 0.644,
"step": 856
},
{
"epoch": 1.9049736037788274,
"grad_norm": 0.029596436768770218,
"learning_rate": 0.0012128712871287129,
"loss": 0.7099,
"step": 857
},
{
"epoch": 1.9071964434565158,
"grad_norm": 0.013466271571815014,
"learning_rate": 0.0012103960396039606,
"loss": 0.649,
"step": 858
},
{
"epoch": 1.9094192831342038,
"grad_norm": 0.02464134432375431,
"learning_rate": 0.001207920792079208,
"loss": 0.5849,
"step": 859
},
{
"epoch": 1.9116421228118923,
"grad_norm": 0.0143961850553751,
"learning_rate": 0.0012054455445544554,
"loss": 0.6556,
"step": 860
},
{
"epoch": 1.9138649624895805,
"grad_norm": 0.016098588705062866,
"learning_rate": 0.0012029702970297032,
"loss": 0.6991,
"step": 861
},
{
"epoch": 1.9160878021672687,
"grad_norm": 0.013091151602566242,
"learning_rate": 0.0012004950495049505,
"loss": 0.6811,
"step": 862
},
{
"epoch": 1.918310641844957,
"grad_norm": 0.029850434511899948,
"learning_rate": 0.001198019801980198,
"loss": 0.703,
"step": 863
},
{
"epoch": 1.9205334815226451,
"grad_norm": 0.01120046991854906,
"learning_rate": 0.0011955445544554457,
"loss": 0.4938,
"step": 864
},
{
"epoch": 1.9227563212003336,
"grad_norm": 0.014592386782169342,
"learning_rate": 0.001193069306930693,
"loss": 0.6209,
"step": 865
},
{
"epoch": 1.9249791608780216,
"grad_norm": 0.015447413548827171,
"learning_rate": 0.0011905940594059406,
"loss": 0.5492,
"step": 866
},
{
"epoch": 1.92720200055571,
"grad_norm": 0.015509175136685371,
"learning_rate": 0.0011881188118811883,
"loss": 0.54,
"step": 867
},
{
"epoch": 1.9294248402333982,
"grad_norm": 0.01778079755604267,
"learning_rate": 0.0011856435643564356,
"loss": 0.6965,
"step": 868
},
{
"epoch": 1.9316476799110864,
"grad_norm": 0.02252313494682312,
"learning_rate": 0.0011831683168316831,
"loss": 0.5413,
"step": 869
},
{
"epoch": 1.9338705195887747,
"grad_norm": 0.012748087756335735,
"learning_rate": 0.0011806930693069309,
"loss": 0.5079,
"step": 870
},
{
"epoch": 1.9360933592664629,
"grad_norm": 0.016808731481432915,
"learning_rate": 0.0011782178217821782,
"loss": 0.6487,
"step": 871
},
{
"epoch": 1.9383161989441513,
"grad_norm": 0.017723703756928444,
"learning_rate": 0.0011757425742574257,
"loss": 0.7172,
"step": 872
},
{
"epoch": 1.9405390386218393,
"grad_norm": 0.014089664444327354,
"learning_rate": 0.0011732673267326734,
"loss": 0.4578,
"step": 873
},
{
"epoch": 1.9427618782995277,
"grad_norm": 0.019459979608654976,
"learning_rate": 0.0011707920792079207,
"loss": 0.5246,
"step": 874
},
{
"epoch": 1.9449847179772157,
"grad_norm": 0.017233047634363174,
"learning_rate": 0.0011683168316831683,
"loss": 0.6245,
"step": 875
},
{
"epoch": 1.9472075576549042,
"grad_norm": 0.019499676302075386,
"learning_rate": 0.001165841584158416,
"loss": 0.6322,
"step": 876
},
{
"epoch": 1.9494303973325924,
"grad_norm": 0.022460227832198143,
"learning_rate": 0.0011633663366336633,
"loss": 0.6601,
"step": 877
},
{
"epoch": 1.9516532370102806,
"grad_norm": 0.020194463431835175,
"learning_rate": 0.0011608910891089108,
"loss": 0.7348,
"step": 878
},
{
"epoch": 1.953876076687969,
"grad_norm": 0.018907731398940086,
"learning_rate": 0.0011584158415841586,
"loss": 0.6646,
"step": 879
},
{
"epoch": 1.956098916365657,
"grad_norm": 0.015820452943444252,
"learning_rate": 0.001155940594059406,
"loss": 0.5312,
"step": 880
},
{
"epoch": 1.9583217560433455,
"grad_norm": 0.018061013892292976,
"learning_rate": 0.0011534653465346534,
"loss": 0.6744,
"step": 881
},
{
"epoch": 1.9605445957210335,
"grad_norm": 0.02651524357497692,
"learning_rate": 0.0011509900990099011,
"loss": 0.6111,
"step": 882
},
{
"epoch": 1.962767435398722,
"grad_norm": 0.014137241058051586,
"learning_rate": 0.0011485148514851487,
"loss": 0.5668,
"step": 883
},
{
"epoch": 1.96499027507641,
"grad_norm": 0.02786339819431305,
"learning_rate": 0.001146039603960396,
"loss": 0.6426,
"step": 884
},
{
"epoch": 1.9672131147540983,
"grad_norm": 0.012125310488045216,
"learning_rate": 0.0011435643564356437,
"loss": 0.6304,
"step": 885
},
{
"epoch": 1.9694359544317868,
"grad_norm": 0.019771946594119072,
"learning_rate": 0.0011410891089108912,
"loss": 0.6049,
"step": 886
},
{
"epoch": 1.9716587941094748,
"grad_norm": 0.028338497504591942,
"learning_rate": 0.0011386138613861385,
"loss": 0.7123,
"step": 887
},
{
"epoch": 1.9738816337871632,
"grad_norm": 0.01594017632305622,
"learning_rate": 0.0011361386138613863,
"loss": 0.6499,
"step": 888
},
{
"epoch": 1.9761044734648512,
"grad_norm": 0.015689987689256668,
"learning_rate": 0.0011336633663366338,
"loss": 0.6692,
"step": 889
},
{
"epoch": 1.9783273131425396,
"grad_norm": 0.014462132938206196,
"learning_rate": 0.0011311881188118811,
"loss": 0.7722,
"step": 890
},
{
"epoch": 1.9805501528202278,
"grad_norm": 0.01385975256562233,
"learning_rate": 0.0011287128712871288,
"loss": 0.4916,
"step": 891
},
{
"epoch": 1.982772992497916,
"grad_norm": 0.01998106576502323,
"learning_rate": 0.0011262376237623764,
"loss": 0.4741,
"step": 892
},
{
"epoch": 1.9849958321756045,
"grad_norm": 0.0200062096118927,
"learning_rate": 0.0011237623762376237,
"loss": 0.6419,
"step": 893
},
{
"epoch": 1.9872186718532925,
"grad_norm": 0.01526566967368126,
"learning_rate": 0.0011212871287128712,
"loss": 0.7451,
"step": 894
},
{
"epoch": 1.989441511530981,
"grad_norm": 0.022502193227410316,
"learning_rate": 0.001118811881188119,
"loss": 0.7621,
"step": 895
},
{
"epoch": 1.991664351208669,
"grad_norm": 0.01716960035264492,
"learning_rate": 0.0011163366336633662,
"loss": 0.522,
"step": 896
},
{
"epoch": 1.9938871908863574,
"grad_norm": 0.013558654114603996,
"learning_rate": 0.0011138613861386138,
"loss": 0.5354,
"step": 897
},
{
"epoch": 1.9961100305640456,
"grad_norm": 0.012157892808318138,
"learning_rate": 0.0011113861386138615,
"loss": 0.5341,
"step": 898
},
{
"epoch": 1.9983328702417338,
"grad_norm": 0.020689917728304863,
"learning_rate": 0.0011089108910891088,
"loss": 0.7788,
"step": 899
},
{
"epoch": 2.000555709919422,
"grad_norm": 0.013700471259653568,
"learning_rate": 0.0011064356435643563,
"loss": 0.4876,
"step": 900
},
{
"epoch": 2.00277854959711,
"grad_norm": 0.022665875032544136,
"learning_rate": 0.001103960396039604,
"loss": 0.549,
"step": 901
},
{
"epoch": 2.0050013892747987,
"grad_norm": 0.012792624533176422,
"learning_rate": 0.0011014851485148514,
"loss": 0.5934,
"step": 902
},
{
"epoch": 2.0072242289524866,
"grad_norm": 0.014675344340503216,
"learning_rate": 0.001099009900990099,
"loss": 0.6914,
"step": 903
},
{
"epoch": 2.009447068630175,
"grad_norm": 0.02120724506676197,
"learning_rate": 0.0010965346534653466,
"loss": 0.6156,
"step": 904
},
{
"epoch": 2.011669908307863,
"grad_norm": 0.014748603105545044,
"learning_rate": 0.001094059405940594,
"loss": 0.622,
"step": 905
},
{
"epoch": 2.0138927479855515,
"grad_norm": 0.014371293596923351,
"learning_rate": 0.0010915841584158415,
"loss": 0.5474,
"step": 906
},
{
"epoch": 2.01611558766324,
"grad_norm": 0.017763959243893623,
"learning_rate": 0.0010891089108910892,
"loss": 0.5761,
"step": 907
},
{
"epoch": 2.018338427340928,
"grad_norm": 0.01812128722667694,
"learning_rate": 0.0010866336633663367,
"loss": 0.7893,
"step": 908
},
{
"epoch": 2.0205612670186164,
"grad_norm": 0.012955384328961372,
"learning_rate": 0.001084158415841584,
"loss": 0.5467,
"step": 909
},
{
"epoch": 2.0227841066963044,
"grad_norm": 0.01600860245525837,
"learning_rate": 0.0010816831683168318,
"loss": 0.6869,
"step": 910
},
{
"epoch": 2.025006946373993,
"grad_norm": 0.018638163805007935,
"learning_rate": 0.0010792079207920793,
"loss": 0.5909,
"step": 911
},
{
"epoch": 2.027229786051681,
"grad_norm": 0.018940769135951996,
"learning_rate": 0.0010767326732673266,
"loss": 0.7257,
"step": 912
},
{
"epoch": 2.0294526257293692,
"grad_norm": 0.016438838094472885,
"learning_rate": 0.0010742574257425743,
"loss": 0.6541,
"step": 913
},
{
"epoch": 2.0316754654070577,
"grad_norm": 0.015446559526026249,
"learning_rate": 0.0010717821782178219,
"loss": 0.4858,
"step": 914
},
{
"epoch": 2.0338983050847457,
"grad_norm": 0.01849900372326374,
"learning_rate": 0.0010693069306930692,
"loss": 0.642,
"step": 915
},
{
"epoch": 2.036121144762434,
"grad_norm": 0.016022328287363052,
"learning_rate": 0.001066831683168317,
"loss": 0.5797,
"step": 916
},
{
"epoch": 2.038343984440122,
"grad_norm": 0.011567950248718262,
"learning_rate": 0.0010643564356435644,
"loss": 0.5816,
"step": 917
},
{
"epoch": 2.0405668241178105,
"grad_norm": 0.013285503722727299,
"learning_rate": 0.0010618811881188117,
"loss": 0.651,
"step": 918
},
{
"epoch": 2.0427896637954985,
"grad_norm": 0.020052414387464523,
"learning_rate": 0.0010594059405940595,
"loss": 0.6495,
"step": 919
},
{
"epoch": 2.045012503473187,
"grad_norm": 0.02657526172697544,
"learning_rate": 0.001056930693069307,
"loss": 0.6453,
"step": 920
},
{
"epoch": 2.0472353431508754,
"grad_norm": 0.019164010882377625,
"learning_rate": 0.0010544554455445543,
"loss": 0.5374,
"step": 921
},
{
"epoch": 2.0494581828285634,
"grad_norm": 0.016299502924084663,
"learning_rate": 0.001051980198019802,
"loss": 0.6959,
"step": 922
},
{
"epoch": 2.051681022506252,
"grad_norm": 0.01600020006299019,
"learning_rate": 0.0010495049504950496,
"loss": 0.645,
"step": 923
},
{
"epoch": 2.05390386218394,
"grad_norm": 0.018981320783495903,
"learning_rate": 0.0010470297029702969,
"loss": 0.7247,
"step": 924
},
{
"epoch": 2.0561267018616283,
"grad_norm": 0.018881479278206825,
"learning_rate": 0.0010445544554455446,
"loss": 0.668,
"step": 925
},
{
"epoch": 2.0583495415393163,
"grad_norm": 0.030967555940151215,
"learning_rate": 0.0010420792079207921,
"loss": 0.5847,
"step": 926
},
{
"epoch": 2.0605723812170047,
"grad_norm": 0.017560211941599846,
"learning_rate": 0.0010396039603960394,
"loss": 0.6954,
"step": 927
},
{
"epoch": 2.062795220894693,
"grad_norm": 0.01360076479613781,
"learning_rate": 0.0010371287128712872,
"loss": 0.6609,
"step": 928
},
{
"epoch": 2.065018060572381,
"grad_norm": 0.016593527048826218,
"learning_rate": 0.0010346534653465347,
"loss": 0.6066,
"step": 929
},
{
"epoch": 2.0672409002500696,
"grad_norm": 0.014666872099041939,
"learning_rate": 0.001032178217821782,
"loss": 0.5952,
"step": 930
},
{
"epoch": 2.0694637399277576,
"grad_norm": 0.014250427484512329,
"learning_rate": 0.0010297029702970298,
"loss": 0.5504,
"step": 931
},
{
"epoch": 2.071686579605446,
"grad_norm": 0.02060864120721817,
"learning_rate": 0.0010272277227722773,
"loss": 0.5399,
"step": 932
},
{
"epoch": 2.073909419283134,
"grad_norm": 0.020173972472548485,
"learning_rate": 0.0010247524752475248,
"loss": 0.6484,
"step": 933
},
{
"epoch": 2.0761322589608224,
"grad_norm": 0.017474539577960968,
"learning_rate": 0.0010222772277227723,
"loss": 0.5387,
"step": 934
},
{
"epoch": 2.078355098638511,
"grad_norm": 0.014941662549972534,
"learning_rate": 0.0010198019801980198,
"loss": 0.5956,
"step": 935
},
{
"epoch": 2.080577938316199,
"grad_norm": 0.027800098061561584,
"learning_rate": 0.0010173267326732674,
"loss": 0.601,
"step": 936
},
{
"epoch": 2.0828007779938873,
"grad_norm": 0.01730935461819172,
"learning_rate": 0.0010148514851485149,
"loss": 0.7134,
"step": 937
},
{
"epoch": 2.0850236176715753,
"grad_norm": 0.01698504388332367,
"learning_rate": 0.0010123762376237624,
"loss": 0.5066,
"step": 938
},
{
"epoch": 2.0872464573492637,
"grad_norm": 0.015243062749505043,
"learning_rate": 0.00100990099009901,
"loss": 0.657,
"step": 939
},
{
"epoch": 2.0894692970269517,
"grad_norm": 0.025019872933626175,
"learning_rate": 0.0010074257425742575,
"loss": 0.6808,
"step": 940
},
{
"epoch": 2.09169213670464,
"grad_norm": 0.012755641713738441,
"learning_rate": 0.001004950495049505,
"loss": 0.5509,
"step": 941
},
{
"epoch": 2.0939149763823286,
"grad_norm": 0.01922733336687088,
"learning_rate": 0.0010024752475247525,
"loss": 0.8053,
"step": 942
},
{
"epoch": 2.0961378160600166,
"grad_norm": 0.03526071086525917,
"learning_rate": 0.001,
"loss": 0.6377,
"step": 943
},
{
"epoch": 2.098360655737705,
"grad_norm": 0.021753890439867973,
"learning_rate": 0.0009975247524752475,
"loss": 0.727,
"step": 944
},
{
"epoch": 2.100583495415393,
"grad_norm": 0.016603440046310425,
"learning_rate": 0.000995049504950495,
"loss": 0.6584,
"step": 945
},
{
"epoch": 2.1028063350930815,
"grad_norm": 0.021078625693917274,
"learning_rate": 0.0009925742574257426,
"loss": 0.6133,
"step": 946
},
{
"epoch": 2.1050291747707695,
"grad_norm": 0.037239789962768555,
"learning_rate": 0.0009900990099009901,
"loss": 0.6421,
"step": 947
},
{
"epoch": 2.107252014448458,
"grad_norm": 0.012814635410904884,
"learning_rate": 0.0009876237623762376,
"loss": 0.6296,
"step": 948
},
{
"epoch": 2.1094748541261463,
"grad_norm": 0.018249889835715294,
"learning_rate": 0.0009851485148514852,
"loss": 0.6202,
"step": 949
},
{
"epoch": 2.1116976938038343,
"grad_norm": 0.0173304695636034,
"learning_rate": 0.0009826732673267327,
"loss": 0.5801,
"step": 950
},
{
"epoch": 2.1139205334815228,
"grad_norm": 0.020902493968605995,
"learning_rate": 0.0009801980198019802,
"loss": 0.7202,
"step": 951
},
{
"epoch": 2.1161433731592107,
"grad_norm": 0.020156148821115494,
"learning_rate": 0.0009777227722772277,
"loss": 0.6196,
"step": 952
},
{
"epoch": 2.118366212836899,
"grad_norm": 0.020738055929541588,
"learning_rate": 0.0009752475247524752,
"loss": 0.573,
"step": 953
},
{
"epoch": 2.120589052514587,
"grad_norm": 0.015748674049973488,
"learning_rate": 0.0009727722772277229,
"loss": 0.5665,
"step": 954
},
{
"epoch": 2.1228118921922756,
"grad_norm": 0.01676134020090103,
"learning_rate": 0.0009702970297029702,
"loss": 0.6478,
"step": 955
},
{
"epoch": 2.125034731869964,
"grad_norm": 0.014315289445221424,
"learning_rate": 0.0009678217821782178,
"loss": 0.6188,
"step": 956
},
{
"epoch": 2.127257571547652,
"grad_norm": 0.013696223497390747,
"learning_rate": 0.0009653465346534654,
"loss": 0.6517,
"step": 957
},
{
"epoch": 2.1294804112253405,
"grad_norm": 0.01995706558227539,
"learning_rate": 0.000962871287128713,
"loss": 0.6499,
"step": 958
},
{
"epoch": 2.1317032509030285,
"grad_norm": 0.01407367642968893,
"learning_rate": 0.0009603960396039604,
"loss": 0.5505,
"step": 959
},
{
"epoch": 2.133926090580717,
"grad_norm": 0.013528737239539623,
"learning_rate": 0.000957920792079208,
"loss": 0.4975,
"step": 960
},
{
"epoch": 2.136148930258405,
"grad_norm": 0.020059697329998016,
"learning_rate": 0.0009554455445544555,
"loss": 0.7414,
"step": 961
},
{
"epoch": 2.1383717699360933,
"grad_norm": 0.01345049124211073,
"learning_rate": 0.000952970297029703,
"loss": 0.5884,
"step": 962
},
{
"epoch": 2.140594609613782,
"grad_norm": 0.013034477829933167,
"learning_rate": 0.0009504950495049506,
"loss": 0.5133,
"step": 963
},
{
"epoch": 2.14281744929147,
"grad_norm": 0.01752249337732792,
"learning_rate": 0.0009480198019801981,
"loss": 0.6086,
"step": 964
},
{
"epoch": 2.145040288969158,
"grad_norm": 0.02016667276620865,
"learning_rate": 0.0009455445544554455,
"loss": 0.6729,
"step": 965
},
{
"epoch": 2.147263128646846,
"grad_norm": 0.01611597090959549,
"learning_rate": 0.0009430693069306931,
"loss": 0.5042,
"step": 966
},
{
"epoch": 2.1494859683245346,
"grad_norm": 0.015491276048123837,
"learning_rate": 0.0009405940594059407,
"loss": 0.4197,
"step": 967
},
{
"epoch": 2.1517088080022226,
"grad_norm": 0.0135455671697855,
"learning_rate": 0.0009381188118811881,
"loss": 0.6672,
"step": 968
},
{
"epoch": 2.153931647679911,
"grad_norm": 0.018375838175415993,
"learning_rate": 0.0009356435643564356,
"loss": 0.6353,
"step": 969
},
{
"epoch": 2.1561544873575995,
"grad_norm": 0.019288305193185806,
"learning_rate": 0.0009331683168316832,
"loss": 0.5963,
"step": 970
},
{
"epoch": 2.1583773270352875,
"grad_norm": 0.018923213705420494,
"learning_rate": 0.0009306930693069307,
"loss": 0.7183,
"step": 971
},
{
"epoch": 2.160600166712976,
"grad_norm": 0.02921975590288639,
"learning_rate": 0.0009282178217821782,
"loss": 0.5991,
"step": 972
},
{
"epoch": 2.162823006390664,
"grad_norm": 0.020133277401328087,
"learning_rate": 0.0009257425742574258,
"loss": 0.5406,
"step": 973
},
{
"epoch": 2.1650458460683524,
"grad_norm": 0.014706325717270374,
"learning_rate": 0.0009232673267326732,
"loss": 0.5322,
"step": 974
},
{
"epoch": 2.1672686857460404,
"grad_norm": 0.030446428805589676,
"learning_rate": 0.0009207920792079207,
"loss": 0.6796,
"step": 975
},
{
"epoch": 2.169491525423729,
"grad_norm": 0.015138603746891022,
"learning_rate": 0.0009183168316831684,
"loss": 0.5747,
"step": 976
},
{
"epoch": 2.1717143651014172,
"grad_norm": 0.01757650263607502,
"learning_rate": 0.0009158415841584158,
"loss": 0.7159,
"step": 977
},
{
"epoch": 2.1739372047791052,
"grad_norm": 0.017245231196284294,
"learning_rate": 0.0009133663366336633,
"loss": 0.5669,
"step": 978
},
{
"epoch": 2.1761600444567937,
"grad_norm": 0.018706029281020164,
"learning_rate": 0.0009108910891089109,
"loss": 0.5436,
"step": 979
},
{
"epoch": 2.1783828841344817,
"grad_norm": 0.010909450240433216,
"learning_rate": 0.0009084158415841584,
"loss": 0.5101,
"step": 980
},
{
"epoch": 2.18060572381217,
"grad_norm": 0.01844753511250019,
"learning_rate": 0.0009059405940594059,
"loss": 0.5766,
"step": 981
},
{
"epoch": 2.182828563489858,
"grad_norm": 0.01436743326485157,
"learning_rate": 0.0009034653465346535,
"loss": 0.4609,
"step": 982
},
{
"epoch": 2.1850514031675465,
"grad_norm": 0.015225561335682869,
"learning_rate": 0.0009009900990099011,
"loss": 0.5411,
"step": 983
},
{
"epoch": 2.187274242845235,
"grad_norm": 0.020290294662117958,
"learning_rate": 0.0008985148514851484,
"loss": 0.6755,
"step": 984
},
{
"epoch": 2.189497082522923,
"grad_norm": 0.013398604467511177,
"learning_rate": 0.0008960396039603961,
"loss": 0.5362,
"step": 985
},
{
"epoch": 2.1917199222006114,
"grad_norm": 0.01631542108952999,
"learning_rate": 0.0008935643564356437,
"loss": 0.5372,
"step": 986
},
{
"epoch": 2.1939427618782994,
"grad_norm": 0.014349683187901974,
"learning_rate": 0.000891089108910891,
"loss": 0.4823,
"step": 987
},
{
"epoch": 2.196165601555988,
"grad_norm": 0.015834227204322815,
"learning_rate": 0.0008886138613861386,
"loss": 0.4641,
"step": 988
},
{
"epoch": 2.198388441233676,
"grad_norm": 0.022029682993888855,
"learning_rate": 0.0008861386138613863,
"loss": 0.5842,
"step": 989
},
{
"epoch": 2.2006112809113643,
"grad_norm": 0.014018687419593334,
"learning_rate": 0.0008836633663366336,
"loss": 0.5924,
"step": 990
},
{
"epoch": 2.2028341205890527,
"grad_norm": 0.0164606012403965,
"learning_rate": 0.0008811881188118812,
"loss": 0.4709,
"step": 991
},
{
"epoch": 2.2050569602667407,
"grad_norm": 0.014225888065993786,
"learning_rate": 0.0008787128712871288,
"loss": 0.6046,
"step": 992
},
{
"epoch": 2.207279799944429,
"grad_norm": 0.018137982115149498,
"learning_rate": 0.0008762376237623761,
"loss": 0.6395,
"step": 993
},
{
"epoch": 2.209502639622117,
"grad_norm": 0.015276088379323483,
"learning_rate": 0.0008737623762376238,
"loss": 0.6237,
"step": 994
},
{
"epoch": 2.2117254792998056,
"grad_norm": 0.019907942041754723,
"learning_rate": 0.0008712871287128714,
"loss": 0.6448,
"step": 995
},
{
"epoch": 2.2139483189774936,
"grad_norm": 0.024012433364987373,
"learning_rate": 0.0008688118811881187,
"loss": 0.7278,
"step": 996
},
{
"epoch": 2.216171158655182,
"grad_norm": 0.021498383954167366,
"learning_rate": 0.0008663366336633663,
"loss": 0.4841,
"step": 997
},
{
"epoch": 2.2183939983328704,
"grad_norm": 0.017866021022200584,
"learning_rate": 0.000863861386138614,
"loss": 0.6018,
"step": 998
},
{
"epoch": 2.2206168380105584,
"grad_norm": 0.01377987489104271,
"learning_rate": 0.0008613861386138613,
"loss": 0.5827,
"step": 999
},
{
"epoch": 2.222839677688247,
"grad_norm": 0.016971712931990623,
"learning_rate": 0.0008589108910891089,
"loss": 0.5961,
"step": 1000
},
{
"epoch": 2.225062517365935,
"grad_norm": 0.02038002572953701,
"learning_rate": 0.0008564356435643565,
"loss": 0.7805,
"step": 1001
},
{
"epoch": 2.2272853570436233,
"grad_norm": 0.016022466123104095,
"learning_rate": 0.0008539603960396039,
"loss": 0.5474,
"step": 1002
},
{
"epoch": 2.2295081967213113,
"grad_norm": 0.020060956478118896,
"learning_rate": 0.0008514851485148515,
"loss": 0.5361,
"step": 1003
},
{
"epoch": 2.2317310363989997,
"grad_norm": 0.03471532464027405,
"learning_rate": 0.0008490099009900991,
"loss": 0.6665,
"step": 1004
},
{
"epoch": 2.233953876076688,
"grad_norm": 0.013896548189222813,
"learning_rate": 0.0008465346534653464,
"loss": 0.7585,
"step": 1005
},
{
"epoch": 2.236176715754376,
"grad_norm": 0.01731082797050476,
"learning_rate": 0.000844059405940594,
"loss": 0.6095,
"step": 1006
},
{
"epoch": 2.2383995554320646,
"grad_norm": 0.022298304364085197,
"learning_rate": 0.0008415841584158417,
"loss": 0.6763,
"step": 1007
},
{
"epoch": 2.2406223951097526,
"grad_norm": 0.02241886593401432,
"learning_rate": 0.0008391089108910892,
"loss": 0.6923,
"step": 1008
},
{
"epoch": 2.242845234787441,
"grad_norm": 0.01641622744500637,
"learning_rate": 0.0008366336633663366,
"loss": 0.4781,
"step": 1009
},
{
"epoch": 2.245068074465129,
"grad_norm": 0.01847672462463379,
"learning_rate": 0.0008341584158415842,
"loss": 0.7357,
"step": 1010
},
{
"epoch": 2.2472909141428175,
"grad_norm": 0.0181216262280941,
"learning_rate": 0.0008316831683168318,
"loss": 0.6434,
"step": 1011
},
{
"epoch": 2.249513753820506,
"grad_norm": 0.016178471967577934,
"learning_rate": 0.0008292079207920792,
"loss": 0.4554,
"step": 1012
},
{
"epoch": 2.251736593498194,
"grad_norm": 0.017215240746736526,
"learning_rate": 0.0008267326732673268,
"loss": 0.4935,
"step": 1013
},
{
"epoch": 2.2539594331758823,
"grad_norm": 0.014085826463997364,
"learning_rate": 0.0008242574257425743,
"loss": 0.6251,
"step": 1014
},
{
"epoch": 2.2561822728535703,
"grad_norm": 0.02178933471441269,
"learning_rate": 0.0008217821782178218,
"loss": 0.4698,
"step": 1015
},
{
"epoch": 2.2584051125312588,
"grad_norm": 0.019534152001142502,
"learning_rate": 0.0008193069306930694,
"loss": 0.5568,
"step": 1016
},
{
"epoch": 2.2606279522089467,
"grad_norm": 0.012782914564013481,
"learning_rate": 0.0008168316831683169,
"loss": 0.6018,
"step": 1017
},
{
"epoch": 2.262850791886635,
"grad_norm": 0.014194606803357601,
"learning_rate": 0.0008143564356435643,
"loss": 0.5657,
"step": 1018
},
{
"epoch": 2.2650736315643236,
"grad_norm": 0.011684983968734741,
"learning_rate": 0.0008118811881188118,
"loss": 0.572,
"step": 1019
},
{
"epoch": 2.2672964712420116,
"grad_norm": 0.017416108399629593,
"learning_rate": 0.0008094059405940595,
"loss": 0.6647,
"step": 1020
},
{
"epoch": 2.2695193109197,
"grad_norm": 0.021715115755796432,
"learning_rate": 0.0008069306930693069,
"loss": 0.5813,
"step": 1021
},
{
"epoch": 2.271742150597388,
"grad_norm": 0.013240072876214981,
"learning_rate": 0.0008044554455445544,
"loss": 0.5668,
"step": 1022
},
{
"epoch": 2.2739649902750765,
"grad_norm": 0.020175835117697716,
"learning_rate": 0.000801980198019802,
"loss": 0.656,
"step": 1023
},
{
"epoch": 2.2761878299527645,
"grad_norm": 0.017000149935483932,
"learning_rate": 0.0007995049504950495,
"loss": 0.676,
"step": 1024
},
{
"epoch": 2.278410669630453,
"grad_norm": 0.016977574676275253,
"learning_rate": 0.000797029702970297,
"loss": 0.4404,
"step": 1025
},
{
"epoch": 2.2806335093081413,
"grad_norm": 0.01674072816967964,
"learning_rate": 0.0007945544554455446,
"loss": 0.7254,
"step": 1026
},
{
"epoch": 2.2828563489858293,
"grad_norm": 0.01809377409517765,
"learning_rate": 0.000792079207920792,
"loss": 0.6138,
"step": 1027
},
{
"epoch": 2.285079188663518,
"grad_norm": 0.01745619997382164,
"learning_rate": 0.0007896039603960395,
"loss": 0.5348,
"step": 1028
},
{
"epoch": 2.2873020283412058,
"grad_norm": 0.013998817652463913,
"learning_rate": 0.0007871287128712872,
"loss": 0.6114,
"step": 1029
},
{
"epoch": 2.289524868018894,
"grad_norm": 0.017970522865653038,
"learning_rate": 0.0007846534653465346,
"loss": 0.5384,
"step": 1030
},
{
"epoch": 2.291747707696582,
"grad_norm": 0.01691269688308239,
"learning_rate": 0.0007821782178217821,
"loss": 0.6129,
"step": 1031
},
{
"epoch": 2.2939705473742706,
"grad_norm": 0.015712127089500427,
"learning_rate": 0.0007797029702970297,
"loss": 0.5979,
"step": 1032
},
{
"epoch": 2.296193387051959,
"grad_norm": 0.01665470376610756,
"learning_rate": 0.0007772277227722774,
"loss": 0.6595,
"step": 1033
},
{
"epoch": 2.298416226729647,
"grad_norm": 0.018899960443377495,
"learning_rate": 0.0007747524752475247,
"loss": 0.6313,
"step": 1034
},
{
"epoch": 2.3006390664073355,
"grad_norm": 0.01907293312251568,
"learning_rate": 0.0007722772277227723,
"loss": 0.6861,
"step": 1035
},
{
"epoch": 2.3028619060850235,
"grad_norm": 0.01808004081249237,
"learning_rate": 0.0007698019801980199,
"loss": 0.5455,
"step": 1036
},
{
"epoch": 2.305084745762712,
"grad_norm": 0.02773713693022728,
"learning_rate": 0.0007673267326732672,
"loss": 0.6716,
"step": 1037
},
{
"epoch": 2.3073075854404,
"grad_norm": 0.0136567959561944,
"learning_rate": 0.0007648514851485149,
"loss": 0.5891,
"step": 1038
},
{
"epoch": 2.3095304251180884,
"grad_norm": 0.01570400409400463,
"learning_rate": 0.0007623762376237625,
"loss": 0.648,
"step": 1039
},
{
"epoch": 2.311753264795777,
"grad_norm": 0.020001349970698357,
"learning_rate": 0.0007599009900990098,
"loss": 0.8376,
"step": 1040
},
{
"epoch": 2.313976104473465,
"grad_norm": 0.025027025490999222,
"learning_rate": 0.0007574257425742574,
"loss": 0.7757,
"step": 1041
},
{
"epoch": 2.3161989441511532,
"grad_norm": 0.023692984133958817,
"learning_rate": 0.0007549504950495051,
"loss": 0.6278,
"step": 1042
},
{
"epoch": 2.3184217838288412,
"grad_norm": 0.015494839288294315,
"learning_rate": 0.0007524752475247524,
"loss": 0.5303,
"step": 1043
},
{
"epoch": 2.3206446235065297,
"grad_norm": 0.01560200285166502,
"learning_rate": 0.00075,
"loss": 0.5107,
"step": 1044
},
{
"epoch": 2.3228674631842177,
"grad_norm": 0.03477834537625313,
"learning_rate": 0.0007475247524752475,
"loss": 0.6573,
"step": 1045
},
{
"epoch": 2.325090302861906,
"grad_norm": 0.030586643144488335,
"learning_rate": 0.0007450495049504951,
"loss": 0.5893,
"step": 1046
},
{
"epoch": 2.3273131425395945,
"grad_norm": 0.0189833864569664,
"learning_rate": 0.0007425742574257426,
"loss": 0.4884,
"step": 1047
},
{
"epoch": 2.3295359822172825,
"grad_norm": 0.01656891033053398,
"learning_rate": 0.0007400990099009901,
"loss": 0.545,
"step": 1048
},
{
"epoch": 2.331758821894971,
"grad_norm": 0.017621248960494995,
"learning_rate": 0.0007376237623762376,
"loss": 0.5904,
"step": 1049
},
{
"epoch": 2.333981661572659,
"grad_norm": 0.029514534398913383,
"learning_rate": 0.0007351485148514852,
"loss": 0.7345,
"step": 1050
},
{
"epoch": 2.3362045012503474,
"grad_norm": 0.014921414665877819,
"learning_rate": 0.0007326732673267328,
"loss": 0.5596,
"step": 1051
},
{
"epoch": 2.3384273409280354,
"grad_norm": 0.020877940580248833,
"learning_rate": 0.0007301980198019802,
"loss": 0.598,
"step": 1052
},
{
"epoch": 2.340650180605724,
"grad_norm": 0.01902448944747448,
"learning_rate": 0.0007277227722772277,
"loss": 0.5555,
"step": 1053
},
{
"epoch": 2.3428730202834123,
"grad_norm": 0.01561102457344532,
"learning_rate": 0.0007252475247524753,
"loss": 0.547,
"step": 1054
},
{
"epoch": 2.3450958599611003,
"grad_norm": 0.0154028395190835,
"learning_rate": 0.0007227722772277228,
"loss": 0.5978,
"step": 1055
},
{
"epoch": 2.3473186996387887,
"grad_norm": 0.014076720923185349,
"learning_rate": 0.0007202970297029703,
"loss": 0.6162,
"step": 1056
},
{
"epoch": 2.3495415393164767,
"grad_norm": 0.01202862337231636,
"learning_rate": 0.0007178217821782179,
"loss": 0.5687,
"step": 1057
},
{
"epoch": 2.351764378994165,
"grad_norm": 0.015466923825442791,
"learning_rate": 0.0007153465346534653,
"loss": 0.499,
"step": 1058
},
{
"epoch": 2.353987218671853,
"grad_norm": 0.014686975628137589,
"learning_rate": 0.0007128712871287129,
"loss": 0.509,
"step": 1059
},
{
"epoch": 2.3562100583495416,
"grad_norm": 0.019549714401364326,
"learning_rate": 0.0007103960396039605,
"loss": 0.5659,
"step": 1060
},
{
"epoch": 2.35843289802723,
"grad_norm": 0.010959336534142494,
"learning_rate": 0.0007079207920792079,
"loss": 0.4723,
"step": 1061
},
{
"epoch": 2.360655737704918,
"grad_norm": 0.017420019954442978,
"learning_rate": 0.0007054455445544554,
"loss": 0.6579,
"step": 1062
},
{
"epoch": 2.3628785773826064,
"grad_norm": 0.016592038795351982,
"learning_rate": 0.000702970297029703,
"loss": 0.624,
"step": 1063
},
{
"epoch": 2.3651014170602944,
"grad_norm": 0.015422000549733639,
"learning_rate": 0.0007004950495049505,
"loss": 0.6795,
"step": 1064
},
{
"epoch": 2.367324256737983,
"grad_norm": 0.02857932634651661,
"learning_rate": 0.0006980198019801981,
"loss": 0.6188,
"step": 1065
},
{
"epoch": 2.369547096415671,
"grad_norm": 0.020330024883151054,
"learning_rate": 0.0006955445544554456,
"loss": 0.5202,
"step": 1066
},
{
"epoch": 2.3717699360933593,
"grad_norm": 0.04005992412567139,
"learning_rate": 0.000693069306930693,
"loss": 0.5341,
"step": 1067
},
{
"epoch": 2.3739927757710477,
"grad_norm": 0.014854200184345245,
"learning_rate": 0.0006905940594059407,
"loss": 0.55,
"step": 1068
},
{
"epoch": 2.3762156154487357,
"grad_norm": 0.012588760815560818,
"learning_rate": 0.0006881188118811882,
"loss": 0.5753,
"step": 1069
},
{
"epoch": 2.378438455126424,
"grad_norm": 0.034118834882974625,
"learning_rate": 0.0006856435643564356,
"loss": 0.602,
"step": 1070
},
{
"epoch": 2.380661294804112,
"grad_norm": 0.021048780530691147,
"learning_rate": 0.0006831683168316832,
"loss": 0.7728,
"step": 1071
},
{
"epoch": 2.3828841344818006,
"grad_norm": 0.020414873957633972,
"learning_rate": 0.0006806930693069306,
"loss": 0.6985,
"step": 1072
},
{
"epoch": 2.3851069741594886,
"grad_norm": 0.012938344851136208,
"learning_rate": 0.0006782178217821782,
"loss": 0.5337,
"step": 1073
},
{
"epoch": 2.387329813837177,
"grad_norm": 0.01313922181725502,
"learning_rate": 0.0006757425742574258,
"loss": 0.4339,
"step": 1074
},
{
"epoch": 2.3895526535148655,
"grad_norm": 0.01818598248064518,
"learning_rate": 0.0006732673267326732,
"loss": 0.6091,
"step": 1075
},
{
"epoch": 2.3917754931925534,
"grad_norm": 0.017337147146463394,
"learning_rate": 0.0006707920792079208,
"loss": 0.4966,
"step": 1076
},
{
"epoch": 2.393998332870242,
"grad_norm": 0.017781397327780724,
"learning_rate": 0.0006683168316831684,
"loss": 0.6172,
"step": 1077
},
{
"epoch": 2.39622117254793,
"grad_norm": 0.028531668707728386,
"learning_rate": 0.0006658415841584158,
"loss": 0.6679,
"step": 1078
},
{
"epoch": 2.3984440122256183,
"grad_norm": 0.023334413766860962,
"learning_rate": 0.0006633663366336634,
"loss": 0.558,
"step": 1079
},
{
"epoch": 2.4006668519033063,
"grad_norm": 0.018032846972346306,
"learning_rate": 0.0006608910891089109,
"loss": 0.5269,
"step": 1080
},
{
"epoch": 2.4028896915809947,
"grad_norm": 0.015402448363602161,
"learning_rate": 0.0006584158415841584,
"loss": 0.7979,
"step": 1081
},
{
"epoch": 2.405112531258683,
"grad_norm": 0.019905133172869682,
"learning_rate": 0.000655940594059406,
"loss": 0.6255,
"step": 1082
},
{
"epoch": 2.407335370936371,
"grad_norm": 0.022906949743628502,
"learning_rate": 0.0006534653465346535,
"loss": 0.5736,
"step": 1083
},
{
"epoch": 2.4095582106140596,
"grad_norm": 0.014906423166394234,
"learning_rate": 0.0006509900990099009,
"loss": 0.5573,
"step": 1084
},
{
"epoch": 2.4117810502917476,
"grad_norm": 0.014077015221118927,
"learning_rate": 0.0006485148514851485,
"loss": 0.4867,
"step": 1085
},
{
"epoch": 2.414003889969436,
"grad_norm": 0.02250657044351101,
"learning_rate": 0.0006460396039603961,
"loss": 0.6064,
"step": 1086
},
{
"epoch": 2.416226729647124,
"grad_norm": 0.019261633977293968,
"learning_rate": 0.0006435643564356435,
"loss": 0.6381,
"step": 1087
},
{
"epoch": 2.4184495693248125,
"grad_norm": 0.019322820007801056,
"learning_rate": 0.0006410891089108911,
"loss": 0.5413,
"step": 1088
},
{
"epoch": 2.420672409002501,
"grad_norm": 0.021883083507418633,
"learning_rate": 0.0006386138613861386,
"loss": 0.6144,
"step": 1089
},
{
"epoch": 2.422895248680189,
"grad_norm": 0.02103458158671856,
"learning_rate": 0.0006361386138613862,
"loss": 0.5646,
"step": 1090
},
{
"epoch": 2.4251180883578773,
"grad_norm": 0.01773696579039097,
"learning_rate": 0.0006336633663366337,
"loss": 0.5891,
"step": 1091
},
{
"epoch": 2.4273409280355653,
"grad_norm": 0.024175936356186867,
"learning_rate": 0.0006311881188118812,
"loss": 0.6319,
"step": 1092
},
{
"epoch": 2.4295637677132538,
"grad_norm": 0.01638614386320114,
"learning_rate": 0.0006287128712871287,
"loss": 0.6784,
"step": 1093
},
{
"epoch": 2.4317866073909418,
"grad_norm": 0.015058539807796478,
"learning_rate": 0.0006262376237623763,
"loss": 0.5012,
"step": 1094
},
{
"epoch": 2.43400944706863,
"grad_norm": 0.012811798602342606,
"learning_rate": 0.0006237623762376238,
"loss": 0.6107,
"step": 1095
},
{
"epoch": 2.4362322867463186,
"grad_norm": 0.0197319146245718,
"learning_rate": 0.0006212871287128713,
"loss": 0.5236,
"step": 1096
},
{
"epoch": 2.4384551264240066,
"grad_norm": 0.018934501335024834,
"learning_rate": 0.0006188118811881188,
"loss": 0.521,
"step": 1097
},
{
"epoch": 2.440677966101695,
"grad_norm": 0.014251308515667915,
"learning_rate": 0.0006163366336633663,
"loss": 0.7587,
"step": 1098
},
{
"epoch": 2.442900805779383,
"grad_norm": 0.018055707216262817,
"learning_rate": 0.0006138613861386139,
"loss": 0.5067,
"step": 1099
},
{
"epoch": 2.4451236454570715,
"grad_norm": 0.018532631918787956,
"learning_rate": 0.0006113861386138614,
"loss": 0.5683,
"step": 1100
},
{
"epoch": 2.4473464851347595,
"grad_norm": 0.01650720275938511,
"learning_rate": 0.000608910891089109,
"loss": 0.7914,
"step": 1101
},
{
"epoch": 2.449569324812448,
"grad_norm": 0.017233526334166527,
"learning_rate": 0.0006064356435643564,
"loss": 0.5865,
"step": 1102
},
{
"epoch": 2.4517921644901364,
"grad_norm": 0.012895757332444191,
"learning_rate": 0.000603960396039604,
"loss": 0.689,
"step": 1103
},
{
"epoch": 2.4540150041678244,
"grad_norm": 0.01163373701274395,
"learning_rate": 0.0006014851485148516,
"loss": 0.5351,
"step": 1104
},
{
"epoch": 2.456237843845513,
"grad_norm": 0.02952023595571518,
"learning_rate": 0.000599009900990099,
"loss": 0.6504,
"step": 1105
},
{
"epoch": 2.458460683523201,
"grad_norm": 0.017913196235895157,
"learning_rate": 0.0005965346534653465,
"loss": 0.7684,
"step": 1106
},
{
"epoch": 2.4606835232008892,
"grad_norm": 0.015341251157224178,
"learning_rate": 0.0005940594059405942,
"loss": 0.6273,
"step": 1107
},
{
"epoch": 2.4629063628785772,
"grad_norm": 0.02385122701525688,
"learning_rate": 0.0005915841584158416,
"loss": 0.641,
"step": 1108
},
{
"epoch": 2.4651292025562657,
"grad_norm": 0.020036252215504646,
"learning_rate": 0.0005891089108910891,
"loss": 0.5889,
"step": 1109
},
{
"epoch": 2.467352042233954,
"grad_norm": 0.019238587468862534,
"learning_rate": 0.0005866336633663367,
"loss": 0.6194,
"step": 1110
},
{
"epoch": 2.469574881911642,
"grad_norm": 0.01825280860066414,
"learning_rate": 0.0005841584158415841,
"loss": 0.6515,
"step": 1111
},
{
"epoch": 2.4717977215893305,
"grad_norm": 0.019387997686862946,
"learning_rate": 0.0005816831683168317,
"loss": 0.5575,
"step": 1112
},
{
"epoch": 2.4740205612670185,
"grad_norm": 0.019788188859820366,
"learning_rate": 0.0005792079207920793,
"loss": 0.5544,
"step": 1113
},
{
"epoch": 2.476243400944707,
"grad_norm": 0.011799083091318607,
"learning_rate": 0.0005767326732673267,
"loss": 0.5047,
"step": 1114
},
{
"epoch": 2.478466240622395,
"grad_norm": 0.01735115796327591,
"learning_rate": 0.0005742574257425743,
"loss": 0.6194,
"step": 1115
},
{
"epoch": 2.4806890803000834,
"grad_norm": 0.017334984615445137,
"learning_rate": 0.0005717821782178219,
"loss": 0.6659,
"step": 1116
},
{
"epoch": 2.482911919977772,
"grad_norm": 0.013627518899738789,
"learning_rate": 0.0005693069306930693,
"loss": 0.6804,
"step": 1117
},
{
"epoch": 2.48513475965546,
"grad_norm": 0.015926288440823555,
"learning_rate": 0.0005668316831683169,
"loss": 0.552,
"step": 1118
},
{
"epoch": 2.4873575993331483,
"grad_norm": 0.031809452921152115,
"learning_rate": 0.0005643564356435644,
"loss": 0.5977,
"step": 1119
},
{
"epoch": 2.4895804390108363,
"grad_norm": 0.013694511726498604,
"learning_rate": 0.0005618811881188118,
"loss": 0.4524,
"step": 1120
},
{
"epoch": 2.4918032786885247,
"grad_norm": 0.02304377593100071,
"learning_rate": 0.0005594059405940595,
"loss": 0.5375,
"step": 1121
},
{
"epoch": 2.4940261183662127,
"grad_norm": 0.014395623467862606,
"learning_rate": 0.0005569306930693069,
"loss": 0.6207,
"step": 1122
},
{
"epoch": 2.496248958043901,
"grad_norm": 0.019711505621671677,
"learning_rate": 0.0005544554455445544,
"loss": 0.7473,
"step": 1123
},
{
"epoch": 2.4984717977215896,
"grad_norm": 0.019002612680196762,
"learning_rate": 0.000551980198019802,
"loss": 0.5221,
"step": 1124
},
{
"epoch": 2.5006946373992776,
"grad_norm": 0.02028425969183445,
"learning_rate": 0.0005495049504950495,
"loss": 0.6702,
"step": 1125
},
{
"epoch": 2.5029174770769655,
"grad_norm": 0.018800638616085052,
"learning_rate": 0.000547029702970297,
"loss": 0.566,
"step": 1126
},
{
"epoch": 2.505140316754654,
"grad_norm": 0.01781066693365574,
"learning_rate": 0.0005445544554455446,
"loss": 0.5553,
"step": 1127
},
{
"epoch": 2.5073631564323424,
"grad_norm": 0.02512913942337036,
"learning_rate": 0.000542079207920792,
"loss": 0.5782,
"step": 1128
},
{
"epoch": 2.5095859961100304,
"grad_norm": 0.016941504552960396,
"learning_rate": 0.0005396039603960396,
"loss": 0.4379,
"step": 1129
},
{
"epoch": 2.511808835787719,
"grad_norm": 0.018688999116420746,
"learning_rate": 0.0005371287128712872,
"loss": 0.5909,
"step": 1130
},
{
"epoch": 2.5140316754654073,
"grad_norm": 0.014856253750622272,
"learning_rate": 0.0005346534653465346,
"loss": 0.6463,
"step": 1131
},
{
"epoch": 2.5162545151430953,
"grad_norm": 0.01583769917488098,
"learning_rate": 0.0005321782178217822,
"loss": 0.5773,
"step": 1132
},
{
"epoch": 2.5184773548207833,
"grad_norm": 0.017723005264997482,
"learning_rate": 0.0005297029702970297,
"loss": 0.6589,
"step": 1133
},
{
"epoch": 2.5207001944984717,
"grad_norm": 0.01602606289088726,
"learning_rate": 0.0005272277227722772,
"loss": 0.5239,
"step": 1134
},
{
"epoch": 2.52292303417616,
"grad_norm": 0.02923739328980446,
"learning_rate": 0.0005247524752475248,
"loss": 0.6907,
"step": 1135
},
{
"epoch": 2.525145873853848,
"grad_norm": 0.018406685441732407,
"learning_rate": 0.0005222772277227723,
"loss": 0.5979,
"step": 1136
},
{
"epoch": 2.5273687135315366,
"grad_norm": 0.01883380301296711,
"learning_rate": 0.0005198019801980197,
"loss": 0.5477,
"step": 1137
},
{
"epoch": 2.529591553209225,
"grad_norm": 0.021243581548333168,
"learning_rate": 0.0005173267326732674,
"loss": 0.6081,
"step": 1138
},
{
"epoch": 2.531814392886913,
"grad_norm": 0.0210957620292902,
"learning_rate": 0.0005148514851485149,
"loss": 0.6677,
"step": 1139
},
{
"epoch": 2.534037232564601,
"grad_norm": 0.014948558993637562,
"learning_rate": 0.0005123762376237624,
"loss": 0.6664,
"step": 1140
},
{
"epoch": 2.5362600722422894,
"grad_norm": 0.0169658400118351,
"learning_rate": 0.0005099009900990099,
"loss": 0.5975,
"step": 1141
},
{
"epoch": 2.538482911919978,
"grad_norm": 0.01910773292183876,
"learning_rate": 0.0005074257425742574,
"loss": 0.7887,
"step": 1142
},
{
"epoch": 2.540705751597666,
"grad_norm": 0.022958895191550255,
"learning_rate": 0.000504950495049505,
"loss": 0.6042,
"step": 1143
},
{
"epoch": 2.5429285912753543,
"grad_norm": 0.015756452456116676,
"learning_rate": 0.0005024752475247525,
"loss": 0.5066,
"step": 1144
},
{
"epoch": 2.5451514309530427,
"grad_norm": 0.023179860785603523,
"learning_rate": 0.0005,
"loss": 0.7382,
"step": 1145
},
{
"epoch": 2.5473742706307307,
"grad_norm": 0.018427975475788116,
"learning_rate": 0.0004975247524752475,
"loss": 0.7563,
"step": 1146
},
{
"epoch": 2.5495971103084187,
"grad_norm": 0.016231978312134743,
"learning_rate": 0.0004950495049504951,
"loss": 0.5783,
"step": 1147
},
{
"epoch": 2.551819949986107,
"grad_norm": 0.014167056418955326,
"learning_rate": 0.0004925742574257426,
"loss": 0.5216,
"step": 1148
},
{
"epoch": 2.5540427896637956,
"grad_norm": 0.022291019558906555,
"learning_rate": 0.0004900990099009901,
"loss": 0.6444,
"step": 1149
},
{
"epoch": 2.5562656293414836,
"grad_norm": 0.0183990690857172,
"learning_rate": 0.0004876237623762376,
"loss": 0.5068,
"step": 1150
},
{
"epoch": 2.558488469019172,
"grad_norm": 0.0216660313308239,
"learning_rate": 0.0004851485148514851,
"loss": 0.6325,
"step": 1151
},
{
"epoch": 2.5607113086968605,
"grad_norm": 0.018358584493398666,
"learning_rate": 0.0004826732673267327,
"loss": 0.6248,
"step": 1152
},
{
"epoch": 2.5629341483745485,
"grad_norm": 0.018779192119836807,
"learning_rate": 0.0004801980198019802,
"loss": 0.6257,
"step": 1153
},
{
"epoch": 2.5651569880522365,
"grad_norm": 0.02526138722896576,
"learning_rate": 0.00047772277227722777,
"loss": 0.6264,
"step": 1154
},
{
"epoch": 2.567379827729925,
"grad_norm": 0.016142774373292923,
"learning_rate": 0.0004752475247524753,
"loss": 0.4821,
"step": 1155
},
{
"epoch": 2.5696026674076133,
"grad_norm": 0.010809332132339478,
"learning_rate": 0.00047277227722772276,
"loss": 0.6248,
"step": 1156
},
{
"epoch": 2.5718255070853013,
"grad_norm": 0.01412492711097002,
"learning_rate": 0.00047029702970297034,
"loss": 0.5484,
"step": 1157
},
{
"epoch": 2.5740483467629898,
"grad_norm": 0.01630065217614174,
"learning_rate": 0.0004678217821782178,
"loss": 0.5974,
"step": 1158
},
{
"epoch": 2.576271186440678,
"grad_norm": 0.023423103615641594,
"learning_rate": 0.0004653465346534653,
"loss": 0.5882,
"step": 1159
},
{
"epoch": 2.578494026118366,
"grad_norm": 0.019574182108044624,
"learning_rate": 0.0004628712871287129,
"loss": 0.691,
"step": 1160
},
{
"epoch": 2.580716865796054,
"grad_norm": 0.02245352789759636,
"learning_rate": 0.00046039603960396037,
"loss": 0.5872,
"step": 1161
},
{
"epoch": 2.5829397054737426,
"grad_norm": 0.021161986514925957,
"learning_rate": 0.0004579207920792079,
"loss": 0.6218,
"step": 1162
},
{
"epoch": 2.585162545151431,
"grad_norm": 0.015376755967736244,
"learning_rate": 0.00045544554455445547,
"loss": 0.5453,
"step": 1163
},
{
"epoch": 2.587385384829119,
"grad_norm": 0.020387450233101845,
"learning_rate": 0.00045297029702970294,
"loss": 0.6943,
"step": 1164
},
{
"epoch": 2.5896082245068075,
"grad_norm": 0.017321715131402016,
"learning_rate": 0.00045049504950495057,
"loss": 0.5572,
"step": 1165
},
{
"epoch": 2.591831064184496,
"grad_norm": 0.013957494869828224,
"learning_rate": 0.00044801980198019804,
"loss": 0.4917,
"step": 1166
},
{
"epoch": 2.594053903862184,
"grad_norm": 0.024132177233695984,
"learning_rate": 0.0004455445544554455,
"loss": 0.5059,
"step": 1167
},
{
"epoch": 2.596276743539872,
"grad_norm": 0.014031797647476196,
"learning_rate": 0.00044306930693069314,
"loss": 0.5578,
"step": 1168
},
{
"epoch": 2.5984995832175604,
"grad_norm": 0.015123395249247551,
"learning_rate": 0.0004405940594059406,
"loss": 0.6005,
"step": 1169
},
{
"epoch": 2.600722422895249,
"grad_norm": 0.016194505617022514,
"learning_rate": 0.0004381188118811881,
"loss": 0.6959,
"step": 1170
},
{
"epoch": 2.602945262572937,
"grad_norm": 0.015521598979830742,
"learning_rate": 0.0004356435643564357,
"loss": 0.6823,
"step": 1171
},
{
"epoch": 2.6051681022506252,
"grad_norm": 0.015657033771276474,
"learning_rate": 0.0004331683168316832,
"loss": 0.6982,
"step": 1172
},
{
"epoch": 2.6073909419283137,
"grad_norm": 0.017940111458301544,
"learning_rate": 0.00043069306930693064,
"loss": 0.6088,
"step": 1173
},
{
"epoch": 2.6096137816060017,
"grad_norm": 0.023241877555847168,
"learning_rate": 0.0004282178217821783,
"loss": 0.551,
"step": 1174
},
{
"epoch": 2.6118366212836897,
"grad_norm": 0.015215943567454815,
"learning_rate": 0.00042574257425742574,
"loss": 0.6157,
"step": 1175
},
{
"epoch": 2.614059460961378,
"grad_norm": 0.016419822350144386,
"learning_rate": 0.0004232673267326732,
"loss": 0.6533,
"step": 1176
},
{
"epoch": 2.6162823006390665,
"grad_norm": 0.01776900142431259,
"learning_rate": 0.00042079207920792084,
"loss": 0.7526,
"step": 1177
},
{
"epoch": 2.6185051403167545,
"grad_norm": 0.04952996224164963,
"learning_rate": 0.0004183168316831683,
"loss": 0.6754,
"step": 1178
},
{
"epoch": 2.620727979994443,
"grad_norm": 0.013856332749128342,
"learning_rate": 0.0004158415841584159,
"loss": 0.5468,
"step": 1179
},
{
"epoch": 2.6229508196721314,
"grad_norm": 0.018015973269939423,
"learning_rate": 0.0004133663366336634,
"loss": 0.6287,
"step": 1180
},
{
"epoch": 2.6251736593498194,
"grad_norm": 0.017965400591492653,
"learning_rate": 0.0004108910891089109,
"loss": 0.5121,
"step": 1181
},
{
"epoch": 2.6273964990275074,
"grad_norm": 0.033831071108579636,
"learning_rate": 0.00040841584158415845,
"loss": 0.5647,
"step": 1182
},
{
"epoch": 2.629619338705196,
"grad_norm": 0.01705920323729515,
"learning_rate": 0.0004059405940594059,
"loss": 0.6454,
"step": 1183
},
{
"epoch": 2.6318421783828843,
"grad_norm": 0.014178541488945484,
"learning_rate": 0.00040346534653465344,
"loss": 0.5411,
"step": 1184
},
{
"epoch": 2.6340650180605722,
"grad_norm": 0.018709300085902214,
"learning_rate": 0.000400990099009901,
"loss": 0.7137,
"step": 1185
},
{
"epoch": 2.6362878577382607,
"grad_norm": 0.019753027707338333,
"learning_rate": 0.0003985148514851485,
"loss": 0.6662,
"step": 1186
},
{
"epoch": 2.638510697415949,
"grad_norm": 0.021427631378173828,
"learning_rate": 0.000396039603960396,
"loss": 0.557,
"step": 1187
},
{
"epoch": 2.640733537093637,
"grad_norm": 0.014400881715118885,
"learning_rate": 0.0003935643564356436,
"loss": 0.5467,
"step": 1188
},
{
"epoch": 2.642956376771325,
"grad_norm": 0.0270222220569849,
"learning_rate": 0.00039108910891089106,
"loss": 0.5585,
"step": 1189
},
{
"epoch": 2.6451792164490135,
"grad_norm": 0.022014157846570015,
"learning_rate": 0.0003886138613861387,
"loss": 0.433,
"step": 1190
},
{
"epoch": 2.647402056126702,
"grad_norm": 0.015185764990746975,
"learning_rate": 0.00038613861386138616,
"loss": 0.6045,
"step": 1191
},
{
"epoch": 2.64962489580439,
"grad_norm": 0.018142223358154297,
"learning_rate": 0.0003836633663366336,
"loss": 0.5797,
"step": 1192
},
{
"epoch": 2.6518477354820784,
"grad_norm": 0.015990694984793663,
"learning_rate": 0.00038118811881188126,
"loss": 0.6177,
"step": 1193
},
{
"epoch": 2.654070575159767,
"grad_norm": 0.01589980535209179,
"learning_rate": 0.0003787128712871287,
"loss": 0.5231,
"step": 1194
},
{
"epoch": 2.656293414837455,
"grad_norm": 0.020976124331355095,
"learning_rate": 0.0003762376237623762,
"loss": 0.6988,
"step": 1195
},
{
"epoch": 2.658516254515143,
"grad_norm": 0.01627793163061142,
"learning_rate": 0.00037376237623762377,
"loss": 0.6378,
"step": 1196
},
{
"epoch": 2.6607390941928313,
"grad_norm": 0.02804560214281082,
"learning_rate": 0.0003712871287128713,
"loss": 0.62,
"step": 1197
},
{
"epoch": 2.6629619338705197,
"grad_norm": 0.033843785524368286,
"learning_rate": 0.0003688118811881188,
"loss": 0.6269,
"step": 1198
},
{
"epoch": 2.6651847735482077,
"grad_norm": 0.0171663835644722,
"learning_rate": 0.0003663366336633664,
"loss": 0.6803,
"step": 1199
},
{
"epoch": 2.667407613225896,
"grad_norm": 0.01909332536160946,
"learning_rate": 0.00036386138613861386,
"loss": 0.5866,
"step": 1200
},
{
"epoch": 2.6696304529035846,
"grad_norm": 0.014213655143976212,
"learning_rate": 0.0003613861386138614,
"loss": 0.5731,
"step": 1201
},
{
"epoch": 2.6718532925812726,
"grad_norm": 0.024629587307572365,
"learning_rate": 0.00035891089108910896,
"loss": 0.6168,
"step": 1202
},
{
"epoch": 2.6740761322589606,
"grad_norm": 0.018423261120915413,
"learning_rate": 0.0003564356435643564,
"loss": 0.566,
"step": 1203
},
{
"epoch": 2.676298971936649,
"grad_norm": 0.01580965891480446,
"learning_rate": 0.00035396039603960395,
"loss": 0.5403,
"step": 1204
},
{
"epoch": 2.6785218116143374,
"grad_norm": 0.01555953361093998,
"learning_rate": 0.0003514851485148515,
"loss": 0.5308,
"step": 1205
},
{
"epoch": 2.6807446512920254,
"grad_norm": 0.023963620886206627,
"learning_rate": 0.00034900990099009905,
"loss": 0.5788,
"step": 1206
},
{
"epoch": 2.682967490969714,
"grad_norm": 0.015511863864958286,
"learning_rate": 0.0003465346534653465,
"loss": 0.707,
"step": 1207
},
{
"epoch": 2.6851903306474023,
"grad_norm": 0.01718342863023281,
"learning_rate": 0.0003440594059405941,
"loss": 0.5708,
"step": 1208
},
{
"epoch": 2.6874131703250903,
"grad_norm": 0.027808725833892822,
"learning_rate": 0.0003415841584158416,
"loss": 0.7655,
"step": 1209
},
{
"epoch": 2.6896360100027783,
"grad_norm": 0.016733597964048386,
"learning_rate": 0.0003391089108910891,
"loss": 0.5219,
"step": 1210
},
{
"epoch": 2.6918588496804667,
"grad_norm": 0.015038656070828438,
"learning_rate": 0.0003366336633663366,
"loss": 0.6082,
"step": 1211
},
{
"epoch": 2.694081689358155,
"grad_norm": 0.021239133551716805,
"learning_rate": 0.0003341584158415842,
"loss": 0.587,
"step": 1212
},
{
"epoch": 2.696304529035843,
"grad_norm": 0.018834024667739868,
"learning_rate": 0.0003316831683168317,
"loss": 0.5195,
"step": 1213
},
{
"epoch": 2.6985273687135316,
"grad_norm": 0.04993009567260742,
"learning_rate": 0.0003292079207920792,
"loss": 0.6812,
"step": 1214
},
{
"epoch": 2.70075020839122,
"grad_norm": 0.021933620795607567,
"learning_rate": 0.00032673267326732675,
"loss": 0.4748,
"step": 1215
},
{
"epoch": 2.702973048068908,
"grad_norm": 0.014722834341228008,
"learning_rate": 0.0003242574257425743,
"loss": 0.438,
"step": 1216
},
{
"epoch": 2.705195887746596,
"grad_norm": 0.013122837990522385,
"learning_rate": 0.00032178217821782174,
"loss": 0.6231,
"step": 1217
},
{
"epoch": 2.7074187274242845,
"grad_norm": 0.016432546079158783,
"learning_rate": 0.0003193069306930693,
"loss": 0.5908,
"step": 1218
},
{
"epoch": 2.709641567101973,
"grad_norm": 0.016996275633573532,
"learning_rate": 0.00031683168316831684,
"loss": 0.7369,
"step": 1219
},
{
"epoch": 2.711864406779661,
"grad_norm": 0.02186731994152069,
"learning_rate": 0.00031435643564356436,
"loss": 0.6629,
"step": 1220
},
{
"epoch": 2.7140872464573493,
"grad_norm": 0.016827069222927094,
"learning_rate": 0.0003118811881188119,
"loss": 0.5866,
"step": 1221
},
{
"epoch": 2.7163100861350378,
"grad_norm": 0.015328455716371536,
"learning_rate": 0.0003094059405940594,
"loss": 0.6301,
"step": 1222
},
{
"epoch": 2.7185329258127258,
"grad_norm": 0.02612834982573986,
"learning_rate": 0.00030693069306930693,
"loss": 0.4968,
"step": 1223
},
{
"epoch": 2.7207557654904138,
"grad_norm": 0.018258119001984596,
"learning_rate": 0.0003044554455445545,
"loss": 0.5323,
"step": 1224
},
{
"epoch": 2.722978605168102,
"grad_norm": 0.02044299617409706,
"learning_rate": 0.000301980198019802,
"loss": 0.7331,
"step": 1225
},
{
"epoch": 2.7252014448457906,
"grad_norm": 0.015368648804724216,
"learning_rate": 0.0002995049504950495,
"loss": 0.6208,
"step": 1226
},
{
"epoch": 2.7274242845234786,
"grad_norm": 0.018496476113796234,
"learning_rate": 0.0002970297029702971,
"loss": 0.5622,
"step": 1227
},
{
"epoch": 2.729647124201167,
"grad_norm": 0.021723395213484764,
"learning_rate": 0.00029455445544554455,
"loss": 0.6789,
"step": 1228
},
{
"epoch": 2.7318699638788555,
"grad_norm": 0.01531304232776165,
"learning_rate": 0.00029207920792079207,
"loss": 0.6396,
"step": 1229
},
{
"epoch": 2.7340928035565435,
"grad_norm": 0.02245800755918026,
"learning_rate": 0.00028960396039603964,
"loss": 0.6289,
"step": 1230
},
{
"epoch": 2.7363156432342315,
"grad_norm": 0.0517827607691288,
"learning_rate": 0.00028712871287128717,
"loss": 0.7297,
"step": 1231
},
{
"epoch": 2.73853848291192,
"grad_norm": 0.014105240814387798,
"learning_rate": 0.00028465346534653464,
"loss": 0.5948,
"step": 1232
},
{
"epoch": 2.7407613225896084,
"grad_norm": 0.018385961651802063,
"learning_rate": 0.0002821782178217822,
"loss": 0.6336,
"step": 1233
},
{
"epoch": 2.7429841622672964,
"grad_norm": 0.014009015634655952,
"learning_rate": 0.00027970297029702973,
"loss": 0.5458,
"step": 1234
},
{
"epoch": 2.745207001944985,
"grad_norm": 0.014900448732078075,
"learning_rate": 0.0002772277227722772,
"loss": 0.6029,
"step": 1235
},
{
"epoch": 2.7474298416226732,
"grad_norm": 0.01685742661356926,
"learning_rate": 0.0002747524752475247,
"loss": 0.5911,
"step": 1236
},
{
"epoch": 2.7496526813003612,
"grad_norm": 0.01696729101240635,
"learning_rate": 0.0002722772277227723,
"loss": 0.489,
"step": 1237
},
{
"epoch": 2.751875520978049,
"grad_norm": 0.01900607720017433,
"learning_rate": 0.0002698019801980198,
"loss": 0.6149,
"step": 1238
},
{
"epoch": 2.7540983606557377,
"grad_norm": 0.012541387230157852,
"learning_rate": 0.0002673267326732673,
"loss": 0.6309,
"step": 1239
},
{
"epoch": 2.756321200333426,
"grad_norm": 0.018375830724835396,
"learning_rate": 0.00026485148514851487,
"loss": 0.5464,
"step": 1240
},
{
"epoch": 2.758544040011114,
"grad_norm": 0.015132361091673374,
"learning_rate": 0.0002623762376237624,
"loss": 0.6358,
"step": 1241
},
{
"epoch": 2.7607668796888025,
"grad_norm": 0.0193571038544178,
"learning_rate": 0.00025990099009900986,
"loss": 0.5601,
"step": 1242
},
{
"epoch": 2.762989719366491,
"grad_norm": 0.024456117302179337,
"learning_rate": 0.00025742574257425744,
"loss": 0.7201,
"step": 1243
},
{
"epoch": 2.765212559044179,
"grad_norm": 0.014225888065993786,
"learning_rate": 0.00025495049504950496,
"loss": 0.6177,
"step": 1244
},
{
"epoch": 2.767435398721867,
"grad_norm": 0.0177957434207201,
"learning_rate": 0.0002524752475247525,
"loss": 0.6382,
"step": 1245
},
{
"epoch": 2.7696582383995554,
"grad_norm": 0.022806817665696144,
"learning_rate": 0.00025,
"loss": 0.565,
"step": 1246
},
{
"epoch": 2.771881078077244,
"grad_norm": 0.016606708988547325,
"learning_rate": 0.00024752475247524753,
"loss": 0.524,
"step": 1247
},
{
"epoch": 2.774103917754932,
"grad_norm": 0.013806411065161228,
"learning_rate": 0.00024504950495049505,
"loss": 0.4843,
"step": 1248
},
{
"epoch": 2.7763267574326203,
"grad_norm": 0.01461612805724144,
"learning_rate": 0.00024257425742574255,
"loss": 0.5291,
"step": 1249
},
{
"epoch": 2.7785495971103087,
"grad_norm": 0.02402338944375515,
"learning_rate": 0.0002400990099009901,
"loss": 0.5735,
"step": 1250
},
{
"epoch": 2.7807724367879967,
"grad_norm": 0.01479054894298315,
"learning_rate": 0.00023762376237623765,
"loss": 0.574,
"step": 1251
},
{
"epoch": 2.7829952764656847,
"grad_norm": 0.02644219435751438,
"learning_rate": 0.00023514851485148517,
"loss": 0.5969,
"step": 1252
},
{
"epoch": 2.785218116143373,
"grad_norm": 0.05949250981211662,
"learning_rate": 0.00023267326732673266,
"loss": 0.6232,
"step": 1253
},
{
"epoch": 2.7874409558210616,
"grad_norm": 0.015891224145889282,
"learning_rate": 0.00023019801980198019,
"loss": 0.5959,
"step": 1254
},
{
"epoch": 2.7896637954987495,
"grad_norm": 0.012178865261375904,
"learning_rate": 0.00022772277227722774,
"loss": 0.5764,
"step": 1255
},
{
"epoch": 2.791886635176438,
"grad_norm": 0.014039144851267338,
"learning_rate": 0.00022524752475247528,
"loss": 0.6795,
"step": 1256
},
{
"epoch": 2.7941094748541264,
"grad_norm": 0.012444275431334972,
"learning_rate": 0.00022277227722772275,
"loss": 0.538,
"step": 1257
},
{
"epoch": 2.7963323145318144,
"grad_norm": 0.016555974259972572,
"learning_rate": 0.0002202970297029703,
"loss": 0.5226,
"step": 1258
},
{
"epoch": 2.7985551542095024,
"grad_norm": 0.02042076364159584,
"learning_rate": 0.00021782178217821785,
"loss": 0.6182,
"step": 1259
},
{
"epoch": 2.800777993887191,
"grad_norm": 0.018156493082642555,
"learning_rate": 0.00021534653465346532,
"loss": 0.6077,
"step": 1260
},
{
"epoch": 2.8030008335648793,
"grad_norm": 0.015342988073825836,
"learning_rate": 0.00021287128712871287,
"loss": 0.6097,
"step": 1261
},
{
"epoch": 2.8052236732425673,
"grad_norm": 0.017595073208212852,
"learning_rate": 0.00021039603960396042,
"loss": 0.6639,
"step": 1262
},
{
"epoch": 2.8074465129202557,
"grad_norm": 0.015470032580196857,
"learning_rate": 0.00020792079207920794,
"loss": 0.6588,
"step": 1263
},
{
"epoch": 2.809669352597944,
"grad_norm": 0.016241345554590225,
"learning_rate": 0.00020544554455445544,
"loss": 0.5132,
"step": 1264
},
{
"epoch": 2.811892192275632,
"grad_norm": 0.024083595722913742,
"learning_rate": 0.00020297029702970296,
"loss": 0.6011,
"step": 1265
},
{
"epoch": 2.81411503195332,
"grad_norm": 0.013913623988628387,
"learning_rate": 0.0002004950495049505,
"loss": 0.5177,
"step": 1266
},
{
"epoch": 2.8163378716310086,
"grad_norm": 0.020045112818479538,
"learning_rate": 0.000198019801980198,
"loss": 0.6554,
"step": 1267
},
{
"epoch": 2.818560711308697,
"grad_norm": 0.02670402266085148,
"learning_rate": 0.00019554455445544553,
"loss": 0.5696,
"step": 1268
},
{
"epoch": 2.820783550986385,
"grad_norm": 0.015266619622707367,
"learning_rate": 0.00019306930693069308,
"loss": 0.5624,
"step": 1269
},
{
"epoch": 2.8230063906640734,
"grad_norm": 0.019668592140078545,
"learning_rate": 0.00019059405940594063,
"loss": 0.568,
"step": 1270
},
{
"epoch": 2.825229230341762,
"grad_norm": 0.04979453608393669,
"learning_rate": 0.0001881188118811881,
"loss": 0.5098,
"step": 1271
},
{
"epoch": 2.82745207001945,
"grad_norm": 0.028548695147037506,
"learning_rate": 0.00018564356435643565,
"loss": 0.6911,
"step": 1272
},
{
"epoch": 2.829674909697138,
"grad_norm": 0.02356465719640255,
"learning_rate": 0.0001831683168316832,
"loss": 0.5654,
"step": 1273
},
{
"epoch": 2.8318977493748263,
"grad_norm": 0.021715005859732628,
"learning_rate": 0.0001806930693069307,
"loss": 0.4995,
"step": 1274
},
{
"epoch": 2.8341205890525147,
"grad_norm": 0.014024381525814533,
"learning_rate": 0.0001782178217821782,
"loss": 0.6592,
"step": 1275
},
{
"epoch": 2.8363434287302027,
"grad_norm": 0.020154127851128578,
"learning_rate": 0.00017574257425742576,
"loss": 0.6721,
"step": 1276
},
{
"epoch": 2.838566268407891,
"grad_norm": 0.0298357754945755,
"learning_rate": 0.00017326732673267326,
"loss": 0.6803,
"step": 1277
},
{
"epoch": 2.840789108085579,
"grad_norm": 0.026704518124461174,
"learning_rate": 0.0001707920792079208,
"loss": 0.7333,
"step": 1278
},
{
"epoch": 2.8430119477632676,
"grad_norm": 0.017025306820869446,
"learning_rate": 0.0001683168316831683,
"loss": 0.6408,
"step": 1279
},
{
"epoch": 2.8452347874409556,
"grad_norm": 0.014133095741271973,
"learning_rate": 0.00016584158415841585,
"loss": 0.6176,
"step": 1280
},
{
"epoch": 2.847457627118644,
"grad_norm": 0.019554702565073967,
"learning_rate": 0.00016336633663366338,
"loss": 0.4898,
"step": 1281
},
{
"epoch": 2.8496804667963325,
"grad_norm": 0.027809258550405502,
"learning_rate": 0.00016089108910891087,
"loss": 0.5767,
"step": 1282
},
{
"epoch": 2.8519033064740205,
"grad_norm": 0.016597462818026543,
"learning_rate": 0.00015841584158415842,
"loss": 0.4662,
"step": 1283
},
{
"epoch": 2.854126146151709,
"grad_norm": 0.04323901981115341,
"learning_rate": 0.00015594059405940594,
"loss": 0.6596,
"step": 1284
},
{
"epoch": 2.856348985829397,
"grad_norm": 0.020414169877767563,
"learning_rate": 0.00015346534653465347,
"loss": 0.601,
"step": 1285
},
{
"epoch": 2.8585718255070853,
"grad_norm": 0.020976142957806587,
"learning_rate": 0.000150990099009901,
"loss": 0.6277,
"step": 1286
},
{
"epoch": 2.8607946651847733,
"grad_norm": 0.016226671636104584,
"learning_rate": 0.00014851485148514854,
"loss": 0.6328,
"step": 1287
},
{
"epoch": 2.8630175048624618,
"grad_norm": 0.022331437095999718,
"learning_rate": 0.00014603960396039603,
"loss": 0.5317,
"step": 1288
},
{
"epoch": 2.86524034454015,
"grad_norm": 0.028064247220754623,
"learning_rate": 0.00014356435643564358,
"loss": 0.5249,
"step": 1289
},
{
"epoch": 2.867463184217838,
"grad_norm": 0.011397906579077244,
"learning_rate": 0.0001410891089108911,
"loss": 0.5118,
"step": 1290
},
{
"epoch": 2.8696860238955266,
"grad_norm": 0.015670351684093475,
"learning_rate": 0.0001386138613861386,
"loss": 0.5159,
"step": 1291
},
{
"epoch": 2.8719088635732146,
"grad_norm": 0.015906330198049545,
"learning_rate": 0.00013613861386138615,
"loss": 0.5301,
"step": 1292
},
{
"epoch": 2.874131703250903,
"grad_norm": 0.014323754236102104,
"learning_rate": 0.00013366336633663365,
"loss": 0.6323,
"step": 1293
},
{
"epoch": 2.876354542928591,
"grad_norm": 0.017434075474739075,
"learning_rate": 0.0001311881188118812,
"loss": 0.7477,
"step": 1294
},
{
"epoch": 2.8785773826062795,
"grad_norm": 0.013494400307536125,
"learning_rate": 0.00012871287128712872,
"loss": 0.5316,
"step": 1295
},
{
"epoch": 2.880800222283968,
"grad_norm": 0.018436387181282043,
"learning_rate": 0.00012623762376237624,
"loss": 0.6895,
"step": 1296
},
{
"epoch": 2.883023061961656,
"grad_norm": 0.014047752134501934,
"learning_rate": 0.00012376237623762376,
"loss": 0.5798,
"step": 1297
},
{
"epoch": 2.8852459016393444,
"grad_norm": 0.014521469362080097,
"learning_rate": 0.00012128712871287127,
"loss": 0.7352,
"step": 1298
},
{
"epoch": 2.8874687413170324,
"grad_norm": 0.018711743876338005,
"learning_rate": 0.00011881188118811882,
"loss": 0.4314,
"step": 1299
},
{
"epoch": 2.889691580994721,
"grad_norm": 0.017838047817349434,
"learning_rate": 0.00011633663366336633,
"loss": 0.5504,
"step": 1300
},
{
"epoch": 2.891914420672409,
"grad_norm": 0.023302067071199417,
"learning_rate": 0.00011386138613861387,
"loss": 0.5713,
"step": 1301
},
{
"epoch": 2.894137260350097,
"grad_norm": 0.015559905208647251,
"learning_rate": 0.00011138613861386138,
"loss": 0.6113,
"step": 1302
},
{
"epoch": 2.8963601000277857,
"grad_norm": 0.023848090320825577,
"learning_rate": 0.00010891089108910893,
"loss": 0.7445,
"step": 1303
},
{
"epoch": 2.8985829397054736,
"grad_norm": 0.014597102999687195,
"learning_rate": 0.00010643564356435644,
"loss": 0.5805,
"step": 1304
},
{
"epoch": 2.900805779383162,
"grad_norm": 0.01882697455585003,
"learning_rate": 0.00010396039603960397,
"loss": 0.6368,
"step": 1305
},
{
"epoch": 2.90302861906085,
"grad_norm": 0.013598893769085407,
"learning_rate": 0.00010148514851485148,
"loss": 0.5894,
"step": 1306
},
{
"epoch": 2.9052514587385385,
"grad_norm": 0.019652416929602623,
"learning_rate": 9.9009900990099e-05,
"loss": 0.6455,
"step": 1307
},
{
"epoch": 2.9074742984162265,
"grad_norm": 0.014602046459913254,
"learning_rate": 9.653465346534654e-05,
"loss": 0.5992,
"step": 1308
},
{
"epoch": 2.909697138093915,
"grad_norm": 0.012488738633692265,
"learning_rate": 9.405940594059405e-05,
"loss": 0.5696,
"step": 1309
},
{
"epoch": 2.9119199777716034,
"grad_norm": 0.018612246960401535,
"learning_rate": 9.15841584158416e-05,
"loss": 0.5856,
"step": 1310
},
{
"epoch": 2.9141428174492914,
"grad_norm": 0.018312592059373856,
"learning_rate": 8.91089108910891e-05,
"loss": 0.6731,
"step": 1311
},
{
"epoch": 2.91636565712698,
"grad_norm": 0.011301212944090366,
"learning_rate": 8.663366336633663e-05,
"loss": 0.6148,
"step": 1312
},
{
"epoch": 2.918588496804668,
"grad_norm": 0.014970047399401665,
"learning_rate": 8.415841584158415e-05,
"loss": 0.5364,
"step": 1313
},
{
"epoch": 2.9208113364823562,
"grad_norm": 0.017927002161741257,
"learning_rate": 8.168316831683169e-05,
"loss": 0.7138,
"step": 1314
},
{
"epoch": 2.9230341761600442,
"grad_norm": 0.012390673160552979,
"learning_rate": 7.920792079207921e-05,
"loss": 0.5939,
"step": 1315
},
{
"epoch": 2.9252570158377327,
"grad_norm": 0.021033355966210365,
"learning_rate": 7.673267326732673e-05,
"loss": 0.5774,
"step": 1316
},
{
"epoch": 2.927479855515421,
"grad_norm": 0.019287073984742165,
"learning_rate": 7.425742574257427e-05,
"loss": 0.6687,
"step": 1317
},
{
"epoch": 2.929702695193109,
"grad_norm": 0.01410654280334711,
"learning_rate": 7.178217821782179e-05,
"loss": 0.5655,
"step": 1318
},
{
"epoch": 2.9319255348707975,
"grad_norm": 0.015142921358346939,
"learning_rate": 6.93069306930693e-05,
"loss": 0.6458,
"step": 1319
},
{
"epoch": 2.9341483745484855,
"grad_norm": 0.014644642360508442,
"learning_rate": 6.683168316831682e-05,
"loss": 0.5915,
"step": 1320
},
{
"epoch": 2.936371214226174,
"grad_norm": 0.018795736134052277,
"learning_rate": 6.435643564356436e-05,
"loss": 0.5382,
"step": 1321
},
{
"epoch": 2.938594053903862,
"grad_norm": 0.01939059980213642,
"learning_rate": 6.188118811881188e-05,
"loss": 0.5383,
"step": 1322
},
{
"epoch": 2.9408168935815504,
"grad_norm": 0.016617722809314728,
"learning_rate": 5.940594059405941e-05,
"loss": 0.5883,
"step": 1323
},
{
"epoch": 2.943039733259239,
"grad_norm": 0.01931897923350334,
"learning_rate": 5.6930693069306934e-05,
"loss": 0.5775,
"step": 1324
},
{
"epoch": 2.945262572936927,
"grad_norm": 0.018571315333247185,
"learning_rate": 5.445544554455446e-05,
"loss": 0.6164,
"step": 1325
},
{
"epoch": 2.9474854126146153,
"grad_norm": 0.018728012219071388,
"learning_rate": 5.1980198019801986e-05,
"loss": 0.6216,
"step": 1326
},
{
"epoch": 2.9497082522923033,
"grad_norm": 0.01738075353205204,
"learning_rate": 4.95049504950495e-05,
"loss": 0.6433,
"step": 1327
},
{
"epoch": 2.9519310919699917,
"grad_norm": 0.012660011649131775,
"learning_rate": 4.7029702970297024e-05,
"loss": 0.6901,
"step": 1328
},
{
"epoch": 2.9541539316476797,
"grad_norm": 0.01377065945416689,
"learning_rate": 4.455445544554455e-05,
"loss": 0.5237,
"step": 1329
},
{
"epoch": 2.956376771325368,
"grad_norm": 0.030198214575648308,
"learning_rate": 4.2079207920792076e-05,
"loss": 0.6441,
"step": 1330
},
{
"epoch": 2.9585996110030566,
"grad_norm": 0.01597333326935768,
"learning_rate": 3.9603960396039605e-05,
"loss": 0.6057,
"step": 1331
},
{
"epoch": 2.9608224506807446,
"grad_norm": 0.021931879222393036,
"learning_rate": 3.7128712871287135e-05,
"loss": 0.5539,
"step": 1332
},
{
"epoch": 2.963045290358433,
"grad_norm": 0.01602141186594963,
"learning_rate": 3.465346534653465e-05,
"loss": 0.6342,
"step": 1333
},
{
"epoch": 2.965268130036121,
"grad_norm": 0.026012420654296875,
"learning_rate": 3.217821782178218e-05,
"loss": 0.7019,
"step": 1334
},
{
"epoch": 2.9674909697138094,
"grad_norm": 0.016496378928422928,
"learning_rate": 2.9702970297029706e-05,
"loss": 0.586,
"step": 1335
},
{
"epoch": 2.9697138093914974,
"grad_norm": 0.014965620823204517,
"learning_rate": 2.722772277227723e-05,
"loss": 0.5953,
"step": 1336
},
{
"epoch": 2.971936649069186,
"grad_norm": 0.016745861619710922,
"learning_rate": 2.475247524752475e-05,
"loss": 0.5842,
"step": 1337
},
{
"epoch": 2.9741594887468743,
"grad_norm": 0.021782133728265762,
"learning_rate": 2.2277227722772277e-05,
"loss": 0.668,
"step": 1338
},
{
"epoch": 2.9763823284245623,
"grad_norm": 0.017215097323060036,
"learning_rate": 1.9801980198019803e-05,
"loss": 0.6326,
"step": 1339
},
{
"epoch": 2.9786051681022507,
"grad_norm": 0.012050891295075417,
"learning_rate": 1.7326732673267325e-05,
"loss": 0.6311,
"step": 1340
},
{
"epoch": 2.9808280077799387,
"grad_norm": 0.01476576179265976,
"learning_rate": 1.4851485148514853e-05,
"loss": 0.6109,
"step": 1341
},
{
"epoch": 2.983050847457627,
"grad_norm": 0.015637947246432304,
"learning_rate": 1.2376237623762375e-05,
"loss": 0.6767,
"step": 1342
},
{
"epoch": 2.985273687135315,
"grad_norm": 0.01803119294345379,
"learning_rate": 9.900990099009901e-06,
"loss": 0.587,
"step": 1343
},
{
"epoch": 2.9874965268130036,
"grad_norm": 0.021125512197613716,
"learning_rate": 7.425742574257426e-06,
"loss": 0.8802,
"step": 1344
},
{
"epoch": 2.989719366490692,
"grad_norm": 0.024220464751124382,
"learning_rate": 4.950495049504951e-06,
"loss": 0.7215,
"step": 1345
},
{
"epoch": 2.99194220616838,
"grad_norm": 0.022379010915756226,
"learning_rate": 2.4752475247524753e-06,
"loss": 0.6175,
"step": 1346
},
{
"epoch": 2.9941650458460685,
"grad_norm": 0.0142740523442626,
"learning_rate": 0.0,
"loss": 0.5943,
"step": 1347
},
{
"epoch": 2.9941650458460685,
"step": 1347,
"total_flos": 5.0749830055591936e+17,
"train_loss": 0.6960906616253771,
"train_runtime": 3387.1352,
"train_samples_per_second": 6.374,
"train_steps_per_second": 0.398
}
],
"logging_steps": 1.0,
"max_steps": 1347,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.0749830055591936e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}