q3-30b-ft-fsdp2-adpt-weights-ep1 / trainer_state.json
Fizzarolli's picture
Upload folder using huggingface_hub
a2c313b verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0025706940874035,
"eval_steps": 500,
"global_step": 390,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002570694087403599,
"grad_norm": 0.08436182141304016,
"learning_rate": 2.6315789473684213e-07,
"loss": 0.9321,
"step": 1
},
{
"epoch": 0.005141388174807198,
"grad_norm": 0.08981559425592422,
"learning_rate": 5.263157894736843e-07,
"loss": 0.9834,
"step": 2
},
{
"epoch": 0.007712082262210797,
"grad_norm": 0.10427704453468323,
"learning_rate": 7.894736842105264e-07,
"loss": 1.0591,
"step": 3
},
{
"epoch": 0.010282776349614395,
"grad_norm": 0.09508734196424484,
"learning_rate": 1.0526315789473685e-06,
"loss": 0.8936,
"step": 4
},
{
"epoch": 0.012853470437017995,
"grad_norm": 0.09615321457386017,
"learning_rate": 1.3157894736842106e-06,
"loss": 1.1475,
"step": 5
},
{
"epoch": 0.015424164524421594,
"grad_norm": 0.08302412927150726,
"learning_rate": 1.5789473684210528e-06,
"loss": 0.9795,
"step": 6
},
{
"epoch": 0.017994858611825194,
"grad_norm": 0.09016848355531693,
"learning_rate": 1.842105263157895e-06,
"loss": 0.8286,
"step": 7
},
{
"epoch": 0.02056555269922879,
"grad_norm": 0.10795813798904419,
"learning_rate": 2.105263157894737e-06,
"loss": 1.0537,
"step": 8
},
{
"epoch": 0.02313624678663239,
"grad_norm": 0.09489724785089493,
"learning_rate": 2.3684210526315793e-06,
"loss": 1.0142,
"step": 9
},
{
"epoch": 0.02570694087403599,
"grad_norm": 0.08715584129095078,
"learning_rate": 2.631578947368421e-06,
"loss": 0.9648,
"step": 10
},
{
"epoch": 0.028277634961439587,
"grad_norm": 0.08880320936441422,
"learning_rate": 2.8947368421052634e-06,
"loss": 0.9702,
"step": 11
},
{
"epoch": 0.030848329048843187,
"grad_norm": 0.13510990142822266,
"learning_rate": 3.1578947368421056e-06,
"loss": 0.9951,
"step": 12
},
{
"epoch": 0.033419023136246784,
"grad_norm": 0.09561849385499954,
"learning_rate": 3.421052631578948e-06,
"loss": 0.9883,
"step": 13
},
{
"epoch": 0.03598971722365039,
"grad_norm": 0.10157331079244614,
"learning_rate": 3.68421052631579e-06,
"loss": 1.0356,
"step": 14
},
{
"epoch": 0.038560411311053984,
"grad_norm": 0.07794145494699478,
"learning_rate": 3.947368421052632e-06,
"loss": 0.957,
"step": 15
},
{
"epoch": 0.04113110539845758,
"grad_norm": 0.07956437766551971,
"learning_rate": 4.210526315789474e-06,
"loss": 1.0615,
"step": 16
},
{
"epoch": 0.043701799485861184,
"grad_norm": 0.07735664397478104,
"learning_rate": 4.473684210526316e-06,
"loss": 0.9443,
"step": 17
},
{
"epoch": 0.04627249357326478,
"grad_norm": 0.07957728952169418,
"learning_rate": 4.736842105263159e-06,
"loss": 1.043,
"step": 18
},
{
"epoch": 0.04884318766066838,
"grad_norm": 0.06874791532754898,
"learning_rate": 5e-06,
"loss": 0.9434,
"step": 19
},
{
"epoch": 0.05141388174807198,
"grad_norm": 0.07967688143253326,
"learning_rate": 5.263157894736842e-06,
"loss": 1.0703,
"step": 20
},
{
"epoch": 0.05398457583547558,
"grad_norm": 0.06457515805959702,
"learning_rate": 5.526315789473684e-06,
"loss": 1.0767,
"step": 21
},
{
"epoch": 0.056555269922879174,
"grad_norm": 0.062354326248168945,
"learning_rate": 5.789473684210527e-06,
"loss": 0.96,
"step": 22
},
{
"epoch": 0.05912596401028278,
"grad_norm": 0.057665012776851654,
"learning_rate": 6.0526315789473685e-06,
"loss": 1.0117,
"step": 23
},
{
"epoch": 0.061696658097686374,
"grad_norm": 0.06438912451267242,
"learning_rate": 6.315789473684211e-06,
"loss": 0.958,
"step": 24
},
{
"epoch": 0.06426735218508997,
"grad_norm": 0.06496331095695496,
"learning_rate": 6.578947368421053e-06,
"loss": 1.0142,
"step": 25
},
{
"epoch": 0.06683804627249357,
"grad_norm": 0.061105743050575256,
"learning_rate": 6.842105263157896e-06,
"loss": 0.9189,
"step": 26
},
{
"epoch": 0.06940874035989718,
"grad_norm": 0.05827053263783455,
"learning_rate": 7.105263157894737e-06,
"loss": 0.9414,
"step": 27
},
{
"epoch": 0.07197943444730077,
"grad_norm": 0.07196812331676483,
"learning_rate": 7.36842105263158e-06,
"loss": 0.9814,
"step": 28
},
{
"epoch": 0.07455012853470437,
"grad_norm": 0.06901415437459946,
"learning_rate": 7.631578947368421e-06,
"loss": 0.9839,
"step": 29
},
{
"epoch": 0.07712082262210797,
"grad_norm": 0.06619318574666977,
"learning_rate": 7.894736842105265e-06,
"loss": 0.9443,
"step": 30
},
{
"epoch": 0.07969151670951156,
"grad_norm": 0.06467492878437042,
"learning_rate": 8.157894736842105e-06,
"loss": 0.9785,
"step": 31
},
{
"epoch": 0.08226221079691516,
"grad_norm": 0.06422286480665207,
"learning_rate": 8.421052631578948e-06,
"loss": 1.043,
"step": 32
},
{
"epoch": 0.08483290488431877,
"grad_norm": 0.05858265608549118,
"learning_rate": 8.68421052631579e-06,
"loss": 0.9551,
"step": 33
},
{
"epoch": 0.08740359897172237,
"grad_norm": 0.05510206148028374,
"learning_rate": 8.947368421052632e-06,
"loss": 0.8755,
"step": 34
},
{
"epoch": 0.08997429305912596,
"grad_norm": 0.060953687876462936,
"learning_rate": 9.210526315789475e-06,
"loss": 0.8745,
"step": 35
},
{
"epoch": 0.09254498714652956,
"grad_norm": 0.06641632318496704,
"learning_rate": 9.473684210526317e-06,
"loss": 0.8706,
"step": 36
},
{
"epoch": 0.09511568123393316,
"grad_norm": 0.058570634573698044,
"learning_rate": 9.736842105263159e-06,
"loss": 0.9771,
"step": 37
},
{
"epoch": 0.09768637532133675,
"grad_norm": 0.06269422173500061,
"learning_rate": 1e-05,
"loss": 0.9067,
"step": 38
},
{
"epoch": 0.10025706940874037,
"grad_norm": 0.11592331528663635,
"learning_rate": 9.998782302800705e-06,
"loss": 0.9648,
"step": 39
},
{
"epoch": 0.10282776349614396,
"grad_norm": 0.05759728327393532,
"learning_rate": 9.997561636412896e-06,
"loss": 0.9253,
"step": 40
},
{
"epoch": 0.10539845758354756,
"grad_norm": 0.05665018409490585,
"learning_rate": 9.996337989963381e-06,
"loss": 0.8413,
"step": 41
},
{
"epoch": 0.10796915167095116,
"grad_norm": 0.06908894330263138,
"learning_rate": 9.995111352525802e-06,
"loss": 0.9404,
"step": 42
},
{
"epoch": 0.11053984575835475,
"grad_norm": 0.05868048593401909,
"learning_rate": 9.993881713120325e-06,
"loss": 0.9863,
"step": 43
},
{
"epoch": 0.11311053984575835,
"grad_norm": 0.054700467735528946,
"learning_rate": 9.992649060713315e-06,
"loss": 0.8315,
"step": 44
},
{
"epoch": 0.11568123393316196,
"grad_norm": 0.05400869995355606,
"learning_rate": 9.991413384216984e-06,
"loss": 0.9678,
"step": 45
},
{
"epoch": 0.11825192802056556,
"grad_norm": 0.097419373691082,
"learning_rate": 9.990174672489083e-06,
"loss": 0.9155,
"step": 46
},
{
"epoch": 0.12082262210796915,
"grad_norm": 0.056131381541490555,
"learning_rate": 9.98893291433256e-06,
"loss": 0.8457,
"step": 47
},
{
"epoch": 0.12339331619537275,
"grad_norm": 0.05909228324890137,
"learning_rate": 9.987688098495213e-06,
"loss": 1.0063,
"step": 48
},
{
"epoch": 0.12596401028277635,
"grad_norm": 0.05639795586466789,
"learning_rate": 9.986440213669361e-06,
"loss": 0.9312,
"step": 49
},
{
"epoch": 0.12853470437017994,
"grad_norm": 0.05743168666958809,
"learning_rate": 9.985189248491498e-06,
"loss": 1.0137,
"step": 50
},
{
"epoch": 0.13110539845758354,
"grad_norm": 0.05487934499979019,
"learning_rate": 9.983935191541947e-06,
"loss": 0.9834,
"step": 51
},
{
"epoch": 0.13367609254498714,
"grad_norm": 0.06367425620555878,
"learning_rate": 9.982678031344515e-06,
"loss": 0.9819,
"step": 52
},
{
"epoch": 0.13624678663239073,
"grad_norm": 0.05434500426054001,
"learning_rate": 9.98141775636614e-06,
"loss": 0.981,
"step": 53
},
{
"epoch": 0.13881748071979436,
"grad_norm": 0.0519779697060585,
"learning_rate": 9.980154355016539e-06,
"loss": 0.853,
"step": 54
},
{
"epoch": 0.14138817480719795,
"grad_norm": 0.053186558187007904,
"learning_rate": 9.978887815647856e-06,
"loss": 1.043,
"step": 55
},
{
"epoch": 0.14395886889460155,
"grad_norm": 0.06038914993405342,
"learning_rate": 9.977618126554297e-06,
"loss": 1.0234,
"step": 56
},
{
"epoch": 0.14652956298200515,
"grad_norm": 0.061486050486564636,
"learning_rate": 9.976345275971782e-06,
"loss": 0.999,
"step": 57
},
{
"epoch": 0.14910025706940874,
"grad_norm": 0.05314328148961067,
"learning_rate": 9.975069252077563e-06,
"loss": 0.8911,
"step": 58
},
{
"epoch": 0.15167095115681234,
"grad_norm": 0.05191374570131302,
"learning_rate": 9.973790042989877e-06,
"loss": 0.9053,
"step": 59
},
{
"epoch": 0.15424164524421594,
"grad_norm": 0.0591641366481781,
"learning_rate": 9.972507636767566e-06,
"loss": 0.8862,
"step": 60
},
{
"epoch": 0.15681233933161953,
"grad_norm": 0.055462323129177094,
"learning_rate": 9.971222021409705e-06,
"loss": 0.8442,
"step": 61
},
{
"epoch": 0.15938303341902313,
"grad_norm": 0.05295965448021889,
"learning_rate": 9.969933184855235e-06,
"loss": 0.8765,
"step": 62
},
{
"epoch": 0.16195372750642673,
"grad_norm": 0.08050302416086197,
"learning_rate": 9.96864111498258e-06,
"loss": 0.8735,
"step": 63
},
{
"epoch": 0.16452442159383032,
"grad_norm": 0.07282618433237076,
"learning_rate": 9.967345799609267e-06,
"loss": 0.9307,
"step": 64
},
{
"epoch": 0.16709511568123395,
"grad_norm": 0.05436393991112709,
"learning_rate": 9.966047226491548e-06,
"loss": 0.8174,
"step": 65
},
{
"epoch": 0.16966580976863754,
"grad_norm": 0.05547136813402176,
"learning_rate": 9.964745383324008e-06,
"loss": 0.8633,
"step": 66
},
{
"epoch": 0.17223650385604114,
"grad_norm": 0.05089852213859558,
"learning_rate": 9.96344025773918e-06,
"loss": 0.8999,
"step": 67
},
{
"epoch": 0.17480719794344474,
"grad_norm": 0.06282460689544678,
"learning_rate": 9.962131837307155e-06,
"loss": 0.8545,
"step": 68
},
{
"epoch": 0.17737789203084833,
"grad_norm": 0.054014649242162704,
"learning_rate": 9.960820109535178e-06,
"loss": 1.0195,
"step": 69
},
{
"epoch": 0.17994858611825193,
"grad_norm": 0.05466762185096741,
"learning_rate": 9.959505061867267e-06,
"loss": 0.9785,
"step": 70
},
{
"epoch": 0.18251928020565553,
"grad_norm": 0.05074343457818031,
"learning_rate": 9.958186681683795e-06,
"loss": 0.853,
"step": 71
},
{
"epoch": 0.18508997429305912,
"grad_norm": 0.055692434310913086,
"learning_rate": 9.9568649563011e-06,
"loss": 0.8608,
"step": 72
},
{
"epoch": 0.18766066838046272,
"grad_norm": 0.053501665592193604,
"learning_rate": 9.955539872971066e-06,
"loss": 1.083,
"step": 73
},
{
"epoch": 0.19023136246786632,
"grad_norm": 0.0508846677839756,
"learning_rate": 9.954211418880725e-06,
"loss": 0.9199,
"step": 74
},
{
"epoch": 0.1928020565552699,
"grad_norm": 0.049147725105285645,
"learning_rate": 9.952879581151834e-06,
"loss": 0.9028,
"step": 75
},
{
"epoch": 0.1953727506426735,
"grad_norm": 0.05541389435529709,
"learning_rate": 9.951544346840466e-06,
"loss": 0.9097,
"step": 76
},
{
"epoch": 0.19794344473007713,
"grad_norm": 0.05445914342999458,
"learning_rate": 9.950205702936587e-06,
"loss": 1.0361,
"step": 77
},
{
"epoch": 0.20051413881748073,
"grad_norm": 0.051683664321899414,
"learning_rate": 9.948863636363638e-06,
"loss": 0.8433,
"step": 78
},
{
"epoch": 0.20308483290488433,
"grad_norm": 0.05170630291104317,
"learning_rate": 9.947518133978098e-06,
"loss": 0.9438,
"step": 79
},
{
"epoch": 0.20565552699228792,
"grad_norm": 0.057460807263851166,
"learning_rate": 9.94616918256907e-06,
"loss": 0.8032,
"step": 80
},
{
"epoch": 0.20822622107969152,
"grad_norm": 0.1279779076576233,
"learning_rate": 9.944816768857836e-06,
"loss": 1.019,
"step": 81
},
{
"epoch": 0.21079691516709512,
"grad_norm": 0.048682596534490585,
"learning_rate": 9.943460879497431e-06,
"loss": 0.9692,
"step": 82
},
{
"epoch": 0.2133676092544987,
"grad_norm": 0.04913777485489845,
"learning_rate": 9.942101501072196e-06,
"loss": 0.8569,
"step": 83
},
{
"epoch": 0.2159383033419023,
"grad_norm": 0.054994840174913406,
"learning_rate": 9.940738620097337e-06,
"loss": 0.876,
"step": 84
},
{
"epoch": 0.2185089974293059,
"grad_norm": 0.05107712373137474,
"learning_rate": 9.939372223018491e-06,
"loss": 0.9136,
"step": 85
},
{
"epoch": 0.2210796915167095,
"grad_norm": 0.05990511178970337,
"learning_rate": 9.938002296211251e-06,
"loss": 0.9683,
"step": 86
},
{
"epoch": 0.2236503856041131,
"grad_norm": 0.05146004632115364,
"learning_rate": 9.936628825980745e-06,
"loss": 0.8701,
"step": 87
},
{
"epoch": 0.2262210796915167,
"grad_norm": 0.051048219203948975,
"learning_rate": 9.93525179856115e-06,
"loss": 0.896,
"step": 88
},
{
"epoch": 0.22879177377892032,
"grad_norm": 0.054575320333242416,
"learning_rate": 9.933871200115258e-06,
"loss": 0.8853,
"step": 89
},
{
"epoch": 0.23136246786632392,
"grad_norm": 0.0543404147028923,
"learning_rate": 9.932487016733988e-06,
"loss": 0.9272,
"step": 90
},
{
"epoch": 0.23393316195372751,
"grad_norm": 0.08317338675260544,
"learning_rate": 9.931099234435939e-06,
"loss": 0.8892,
"step": 91
},
{
"epoch": 0.2365038560411311,
"grad_norm": 0.051799483597278595,
"learning_rate": 9.929707839166908e-06,
"loss": 0.9038,
"step": 92
},
{
"epoch": 0.2390745501285347,
"grad_norm": 0.0485287643969059,
"learning_rate": 9.928312816799421e-06,
"loss": 0.8154,
"step": 93
},
{
"epoch": 0.2416452442159383,
"grad_norm": 0.04724659398198128,
"learning_rate": 9.92691415313225e-06,
"loss": 0.7871,
"step": 94
},
{
"epoch": 0.2442159383033419,
"grad_norm": 0.051631320267915726,
"learning_rate": 9.925511833889939e-06,
"loss": 0.9146,
"step": 95
},
{
"epoch": 0.2467866323907455,
"grad_norm": 0.05385703966021538,
"learning_rate": 9.924105844722303e-06,
"loss": 0.9297,
"step": 96
},
{
"epoch": 0.2493573264781491,
"grad_norm": 0.09320125728845596,
"learning_rate": 9.922696171203961e-06,
"loss": 0.8818,
"step": 97
},
{
"epoch": 0.2519280205655527,
"grad_norm": 0.05302536487579346,
"learning_rate": 9.921282798833818e-06,
"loss": 0.8369,
"step": 98
},
{
"epoch": 0.2544987146529563,
"grad_norm": 0.05064469203352928,
"learning_rate": 9.919865713034594e-06,
"loss": 0.9038,
"step": 99
},
{
"epoch": 0.2570694087403599,
"grad_norm": 0.055192429572343826,
"learning_rate": 9.918444899152296e-06,
"loss": 0.8843,
"step": 100
},
{
"epoch": 0.2596401028277635,
"grad_norm": 0.0517449676990509,
"learning_rate": 9.91702034245573e-06,
"loss": 0.9277,
"step": 101
},
{
"epoch": 0.2622107969151671,
"grad_norm": 0.062362704426050186,
"learning_rate": 9.915592028135992e-06,
"loss": 0.8643,
"step": 102
},
{
"epoch": 0.2647814910025707,
"grad_norm": 0.05347563698887825,
"learning_rate": 9.914159941305944e-06,
"loss": 0.8767,
"step": 103
},
{
"epoch": 0.26735218508997427,
"grad_norm": 0.05177872255444527,
"learning_rate": 9.912724066999706e-06,
"loss": 0.8186,
"step": 104
},
{
"epoch": 0.2699228791773779,
"grad_norm": 0.05178435519337654,
"learning_rate": 9.911284390172136e-06,
"loss": 0.8301,
"step": 105
},
{
"epoch": 0.27249357326478146,
"grad_norm": 0.05058378726243973,
"learning_rate": 9.909840895698292e-06,
"loss": 0.8242,
"step": 106
},
{
"epoch": 0.2750642673521851,
"grad_norm": 0.05525752529501915,
"learning_rate": 9.908393568372917e-06,
"loss": 0.9287,
"step": 107
},
{
"epoch": 0.2776349614395887,
"grad_norm": 0.056954048573970795,
"learning_rate": 9.906942392909897e-06,
"loss": 0.8618,
"step": 108
},
{
"epoch": 0.2802056555269923,
"grad_norm": 0.052674438804388046,
"learning_rate": 9.905487353941726e-06,
"loss": 0.8745,
"step": 109
},
{
"epoch": 0.2827763496143959,
"grad_norm": 0.05273294448852539,
"learning_rate": 9.904028436018958e-06,
"loss": 0.9653,
"step": 110
},
{
"epoch": 0.2853470437017995,
"grad_norm": 0.05354757234454155,
"learning_rate": 9.90256562360967e-06,
"loss": 0.8643,
"step": 111
},
{
"epoch": 0.2879177377892031,
"grad_norm": 0.06856588274240494,
"learning_rate": 9.901098901098903e-06,
"loss": 0.8179,
"step": 112
},
{
"epoch": 0.29048843187660667,
"grad_norm": 0.05660005658864975,
"learning_rate": 9.899628252788105e-06,
"loss": 0.8325,
"step": 113
},
{
"epoch": 0.2930591259640103,
"grad_norm": 0.05592386797070503,
"learning_rate": 9.89815366289458e-06,
"loss": 0.9209,
"step": 114
},
{
"epoch": 0.29562982005141386,
"grad_norm": 0.05370522290468216,
"learning_rate": 9.896675115550918e-06,
"loss": 0.9277,
"step": 115
},
{
"epoch": 0.2982005141388175,
"grad_norm": 0.05046721547842026,
"learning_rate": 9.895192594804421e-06,
"loss": 0.9165,
"step": 116
},
{
"epoch": 0.30077120822622105,
"grad_norm": 0.04982549697160721,
"learning_rate": 9.893706084616536e-06,
"loss": 0.7983,
"step": 117
},
{
"epoch": 0.3033419023136247,
"grad_norm": 0.06228173151612282,
"learning_rate": 9.892215568862277e-06,
"loss": 0.9219,
"step": 118
},
{
"epoch": 0.3059125964010283,
"grad_norm": 0.055755749344825745,
"learning_rate": 9.890721031329637e-06,
"loss": 0.8994,
"step": 119
},
{
"epoch": 0.30848329048843187,
"grad_norm": 0.05125127360224724,
"learning_rate": 9.889222455719005e-06,
"loss": 0.8315,
"step": 120
},
{
"epoch": 0.3110539845758355,
"grad_norm": 0.060036104172468185,
"learning_rate": 9.887719825642569e-06,
"loss": 0.9575,
"step": 121
},
{
"epoch": 0.31362467866323906,
"grad_norm": 0.05109132453799248,
"learning_rate": 9.886213124623721e-06,
"loss": 0.9683,
"step": 122
},
{
"epoch": 0.3161953727506427,
"grad_norm": 0.050311293452978134,
"learning_rate": 9.884702336096457e-06,
"loss": 1.002,
"step": 123
},
{
"epoch": 0.31876606683804626,
"grad_norm": 0.051465220749378204,
"learning_rate": 9.88318744340477e-06,
"loss": 0.959,
"step": 124
},
{
"epoch": 0.3213367609254499,
"grad_norm": 0.055082421749830246,
"learning_rate": 9.881668429802025e-06,
"loss": 0.9424,
"step": 125
},
{
"epoch": 0.32390745501285345,
"grad_norm": 0.05376673862338066,
"learning_rate": 9.880145278450364e-06,
"loss": 0.833,
"step": 126
},
{
"epoch": 0.3264781491002571,
"grad_norm": 0.05093827843666077,
"learning_rate": 9.878617972420063e-06,
"loss": 0.9312,
"step": 127
},
{
"epoch": 0.32904884318766064,
"grad_norm": 0.045088816434144974,
"learning_rate": 9.877086494688923e-06,
"loss": 0.8423,
"step": 128
},
{
"epoch": 0.33161953727506427,
"grad_norm": 0.049116652458906174,
"learning_rate": 9.87555082814162e-06,
"loss": 0.8467,
"step": 129
},
{
"epoch": 0.3341902313624679,
"grad_norm": 0.052838873118162155,
"learning_rate": 9.87401095556908e-06,
"loss": 0.9038,
"step": 130
},
{
"epoch": 0.33676092544987146,
"grad_norm": 0.051667746156454086,
"learning_rate": 9.872466859667836e-06,
"loss": 0.8433,
"step": 131
},
{
"epoch": 0.3393316195372751,
"grad_norm": 0.051519960165023804,
"learning_rate": 9.870918523039367e-06,
"loss": 0.8535,
"step": 132
},
{
"epoch": 0.34190231362467866,
"grad_norm": 0.05033135414123535,
"learning_rate": 9.869365928189458e-06,
"loss": 1.0623,
"step": 133
},
{
"epoch": 0.3444730077120823,
"grad_norm": 0.05721587687730789,
"learning_rate": 9.86780905752754e-06,
"loss": 0.8481,
"step": 134
},
{
"epoch": 0.34704370179948585,
"grad_norm": 0.05643352121114731,
"learning_rate": 9.86624789336602e-06,
"loss": 0.9263,
"step": 135
},
{
"epoch": 0.3496143958868895,
"grad_norm": 0.051524702459573746,
"learning_rate": 9.864682417919608e-06,
"loss": 0.8252,
"step": 136
},
{
"epoch": 0.35218508997429304,
"grad_norm": 0.050419341772794724,
"learning_rate": 9.863112613304656e-06,
"loss": 0.8091,
"step": 137
},
{
"epoch": 0.35475578406169667,
"grad_norm": 0.04712434485554695,
"learning_rate": 9.861538461538462e-06,
"loss": 0.9023,
"step": 138
},
{
"epoch": 0.35732647814910024,
"grad_norm": 0.05018655210733414,
"learning_rate": 9.859959944538593e-06,
"loss": 0.8362,
"step": 139
},
{
"epoch": 0.35989717223650386,
"grad_norm": 0.0635514110326767,
"learning_rate": 9.858377044122186e-06,
"loss": 0.8818,
"step": 140
},
{
"epoch": 0.36246786632390743,
"grad_norm": 0.06259825825691223,
"learning_rate": 9.856789742005253e-06,
"loss": 0.8423,
"step": 141
},
{
"epoch": 0.36503856041131105,
"grad_norm": 0.05616484954953194,
"learning_rate": 9.85519801980198e-06,
"loss": 0.8696,
"step": 142
},
{
"epoch": 0.3676092544987147,
"grad_norm": 0.05006776750087738,
"learning_rate": 9.853601859024013e-06,
"loss": 0.9873,
"step": 143
},
{
"epoch": 0.37017994858611825,
"grad_norm": 0.04944387823343277,
"learning_rate": 9.85200124107974e-06,
"loss": 0.9453,
"step": 144
},
{
"epoch": 0.37275064267352187,
"grad_norm": 0.048190075904130936,
"learning_rate": 9.850396147273575e-06,
"loss": 0.8677,
"step": 145
},
{
"epoch": 0.37532133676092544,
"grad_norm": 0.1091238260269165,
"learning_rate": 9.848786558805228e-06,
"loss": 0.8442,
"step": 146
},
{
"epoch": 0.37789203084832906,
"grad_norm": 0.04897330328822136,
"learning_rate": 9.847172456768967e-06,
"loss": 0.9849,
"step": 147
},
{
"epoch": 0.38046272493573263,
"grad_norm": 0.05173799395561218,
"learning_rate": 9.845553822152887e-06,
"loss": 0.8701,
"step": 148
},
{
"epoch": 0.38303341902313626,
"grad_norm": 0.058186061680316925,
"learning_rate": 9.843930635838151e-06,
"loss": 0.9551,
"step": 149
},
{
"epoch": 0.3856041131105398,
"grad_norm": 0.06420359015464783,
"learning_rate": 9.84230287859825e-06,
"loss": 0.9351,
"step": 150
},
{
"epoch": 0.38817480719794345,
"grad_norm": 0.05216103047132492,
"learning_rate": 9.840670531098232e-06,
"loss": 0.9629,
"step": 151
},
{
"epoch": 0.390745501285347,
"grad_norm": 0.06294068694114685,
"learning_rate": 9.839033573893945e-06,
"loss": 0.8647,
"step": 152
},
{
"epoch": 0.39331619537275064,
"grad_norm": 0.05219503119587898,
"learning_rate": 9.837391987431266e-06,
"loss": 0.8042,
"step": 153
},
{
"epoch": 0.39588688946015427,
"grad_norm": 0.052275657653808594,
"learning_rate": 9.835745752045313e-06,
"loss": 0.9263,
"step": 154
},
{
"epoch": 0.39845758354755784,
"grad_norm": 0.04824310168623924,
"learning_rate": 9.834094847959667e-06,
"loss": 0.9258,
"step": 155
},
{
"epoch": 0.40102827763496146,
"grad_norm": 0.05650007724761963,
"learning_rate": 9.83243925528558e-06,
"loss": 0.957,
"step": 156
},
{
"epoch": 0.40359897172236503,
"grad_norm": 0.09102229028940201,
"learning_rate": 9.830778954021173e-06,
"loss": 0.9434,
"step": 157
},
{
"epoch": 0.40616966580976865,
"grad_norm": 0.05498167499899864,
"learning_rate": 9.829113924050634e-06,
"loss": 0.9219,
"step": 158
},
{
"epoch": 0.4087403598971722,
"grad_norm": 0.053989022970199585,
"learning_rate": 9.8274441451434e-06,
"loss": 0.9297,
"step": 159
},
{
"epoch": 0.41131105398457585,
"grad_norm": 0.055620986968278885,
"learning_rate": 9.82576959695335e-06,
"loss": 0.9141,
"step": 160
},
{
"epoch": 0.4138817480719794,
"grad_norm": 0.05333478003740311,
"learning_rate": 9.824090259017958e-06,
"loss": 0.9053,
"step": 161
},
{
"epoch": 0.41645244215938304,
"grad_norm": 0.048174161463975906,
"learning_rate": 9.82240611075748e-06,
"loss": 0.7944,
"step": 162
},
{
"epoch": 0.4190231362467866,
"grad_norm": 0.05324098467826843,
"learning_rate": 9.820717131474105e-06,
"loss": 0.8779,
"step": 163
},
{
"epoch": 0.42159383033419023,
"grad_norm": 0.05260465666651726,
"learning_rate": 9.819023300351102e-06,
"loss": 1.0474,
"step": 164
},
{
"epoch": 0.4241645244215938,
"grad_norm": 0.04889111965894699,
"learning_rate": 9.817324596451974e-06,
"loss": 0.8452,
"step": 165
},
{
"epoch": 0.4267352185089974,
"grad_norm": 0.052814241498708725,
"learning_rate": 9.81562099871959e-06,
"loss": 0.9253,
"step": 166
},
{
"epoch": 0.42930591259640105,
"grad_norm": 0.05090152099728584,
"learning_rate": 9.813912485975318e-06,
"loss": 0.8994,
"step": 167
},
{
"epoch": 0.4318766066838046,
"grad_norm": 0.05313080549240112,
"learning_rate": 9.812199036918139e-06,
"loss": 0.9429,
"step": 168
},
{
"epoch": 0.43444730077120824,
"grad_norm": 0.059309348464012146,
"learning_rate": 9.810480630123775e-06,
"loss": 0.9697,
"step": 169
},
{
"epoch": 0.4370179948586118,
"grad_norm": 0.05176108330488205,
"learning_rate": 9.808757244043786e-06,
"loss": 0.9204,
"step": 170
},
{
"epoch": 0.43958868894601544,
"grad_norm": 0.05403340235352516,
"learning_rate": 9.807028857004676e-06,
"loss": 0.9077,
"step": 171
},
{
"epoch": 0.442159383033419,
"grad_norm": 0.05564318597316742,
"learning_rate": 9.805295447206974e-06,
"loss": 0.9058,
"step": 172
},
{
"epoch": 0.44473007712082263,
"grad_norm": 0.049466319382190704,
"learning_rate": 9.803556992724334e-06,
"loss": 0.8521,
"step": 173
},
{
"epoch": 0.4473007712082262,
"grad_norm": 0.05796355754137039,
"learning_rate": 9.80181347150259e-06,
"loss": 0.9028,
"step": 174
},
{
"epoch": 0.4498714652956298,
"grad_norm": 0.052372533828020096,
"learning_rate": 9.800064861358847e-06,
"loss": 0.9624,
"step": 175
},
{
"epoch": 0.4524421593830334,
"grad_norm": 0.04954168573021889,
"learning_rate": 9.798311139980513e-06,
"loss": 0.8271,
"step": 176
},
{
"epoch": 0.455012853470437,
"grad_norm": 0.051455263048410416,
"learning_rate": 9.79655228492438e-06,
"loss": 0.814,
"step": 177
},
{
"epoch": 0.45758354755784064,
"grad_norm": 0.0496753565967083,
"learning_rate": 9.794788273615637e-06,
"loss": 0.7856,
"step": 178
},
{
"epoch": 0.4601542416452442,
"grad_norm": 0.05122137442231178,
"learning_rate": 9.793019083346927e-06,
"loss": 0.7886,
"step": 179
},
{
"epoch": 0.46272493573264784,
"grad_norm": 0.05189337953925133,
"learning_rate": 9.79124469127736e-06,
"loss": 0.8931,
"step": 180
},
{
"epoch": 0.4652956298200514,
"grad_norm": 0.052611690014600754,
"learning_rate": 9.78946507443154e-06,
"loss": 0.8657,
"step": 181
},
{
"epoch": 0.46786632390745503,
"grad_norm": 0.05381154641509056,
"learning_rate": 9.787680209698559e-06,
"loss": 0.9116,
"step": 182
},
{
"epoch": 0.4704370179948586,
"grad_norm": 0.05444910377264023,
"learning_rate": 9.78589007383101e-06,
"loss": 0.8716,
"step": 183
},
{
"epoch": 0.4730077120822622,
"grad_norm": 0.05494491383433342,
"learning_rate": 9.78409464344397e-06,
"loss": 0.8936,
"step": 184
},
{
"epoch": 0.4755784061696658,
"grad_norm": 0.052697766572237015,
"learning_rate": 9.78229389501399e-06,
"loss": 0.9004,
"step": 185
},
{
"epoch": 0.4781491002570694,
"grad_norm": 0.05403268709778786,
"learning_rate": 9.780487804878049e-06,
"loss": 0.9561,
"step": 186
},
{
"epoch": 0.480719794344473,
"grad_norm": 0.05628068372607231,
"learning_rate": 9.778676349232548e-06,
"loss": 0.8818,
"step": 187
},
{
"epoch": 0.4832904884318766,
"grad_norm": 0.04996819794178009,
"learning_rate": 9.776859504132232e-06,
"loss": 0.8843,
"step": 188
},
{
"epoch": 0.48586118251928023,
"grad_norm": 0.06892653554677963,
"learning_rate": 9.775037245489159e-06,
"loss": 0.9004,
"step": 189
},
{
"epoch": 0.4884318766066838,
"grad_norm": 0.12632861733436584,
"learning_rate": 9.773209549071618e-06,
"loss": 1.0562,
"step": 190
},
{
"epoch": 0.4910025706940874,
"grad_norm": 0.05255817621946335,
"learning_rate": 9.771376390503072e-06,
"loss": 0.9155,
"step": 191
},
{
"epoch": 0.493573264781491,
"grad_norm": 0.07260231673717499,
"learning_rate": 9.76953774526106e-06,
"loss": 0.8818,
"step": 192
},
{
"epoch": 0.4961439588688946,
"grad_norm": 0.053487952798604965,
"learning_rate": 9.767693588676105e-06,
"loss": 0.938,
"step": 193
},
{
"epoch": 0.4987146529562982,
"grad_norm": 0.05333535373210907,
"learning_rate": 9.76584389593062e-06,
"loss": 0.8901,
"step": 194
},
{
"epoch": 0.5012853470437018,
"grad_norm": 0.052906159311532974,
"learning_rate": 9.763988642057791e-06,
"loss": 0.9014,
"step": 195
},
{
"epoch": 0.5038560411311054,
"grad_norm": 0.06862504035234451,
"learning_rate": 9.762127801940448e-06,
"loss": 0.7451,
"step": 196
},
{
"epoch": 0.506426735218509,
"grad_norm": 0.05256706848740578,
"learning_rate": 9.760261350309935e-06,
"loss": 0.8257,
"step": 197
},
{
"epoch": 0.5089974293059126,
"grad_norm": 0.05243777483701706,
"learning_rate": 9.758389261744968e-06,
"loss": 0.9609,
"step": 198
},
{
"epoch": 0.5115681233933161,
"grad_norm": 0.04881735518574715,
"learning_rate": 9.756511510670476e-06,
"loss": 0.8149,
"step": 199
},
{
"epoch": 0.5141388174807198,
"grad_norm": 0.051057204604148865,
"learning_rate": 9.754628071356445e-06,
"loss": 0.8906,
"step": 200
},
{
"epoch": 0.5167095115681234,
"grad_norm": 0.05244697630405426,
"learning_rate": 9.752738917916738e-06,
"loss": 0.9429,
"step": 201
},
{
"epoch": 0.519280205655527,
"grad_norm": 0.04986201226711273,
"learning_rate": 9.750844024307901e-06,
"loss": 0.8267,
"step": 202
},
{
"epoch": 0.5218508997429306,
"grad_norm": 0.059997860342264175,
"learning_rate": 9.74894336432798e-06,
"loss": 0.9497,
"step": 203
},
{
"epoch": 0.5244215938303342,
"grad_norm": 0.05549291893839836,
"learning_rate": 9.747036911615308e-06,
"loss": 0.9771,
"step": 204
},
{
"epoch": 0.5269922879177378,
"grad_norm": 0.052215415984392166,
"learning_rate": 9.745124639647277e-06,
"loss": 0.8462,
"step": 205
},
{
"epoch": 0.5295629820051414,
"grad_norm": 0.06002132222056389,
"learning_rate": 9.74320652173913e-06,
"loss": 0.8823,
"step": 206
},
{
"epoch": 0.532133676092545,
"grad_norm": 0.055552612990140915,
"learning_rate": 9.741282531042695e-06,
"loss": 0.8638,
"step": 207
},
{
"epoch": 0.5347043701799485,
"grad_norm": 0.058621279895305634,
"learning_rate": 9.739352640545145e-06,
"loss": 0.9531,
"step": 208
},
{
"epoch": 0.5372750642673522,
"grad_norm": 0.05218435078859329,
"learning_rate": 9.737416823067735e-06,
"loss": 0.8994,
"step": 209
},
{
"epoch": 0.5398457583547558,
"grad_norm": 0.05027312785387039,
"learning_rate": 9.735475051264526e-06,
"loss": 0.8926,
"step": 210
},
{
"epoch": 0.5424164524421594,
"grad_norm": 0.05271938070654869,
"learning_rate": 9.733527297621086e-06,
"loss": 0.96,
"step": 211
},
{
"epoch": 0.5449871465295629,
"grad_norm": 0.061470016837120056,
"learning_rate": 9.731573534453205e-06,
"loss": 0.9233,
"step": 212
},
{
"epoch": 0.5475578406169666,
"grad_norm": 0.05070161819458008,
"learning_rate": 9.72961373390558e-06,
"loss": 0.8794,
"step": 213
},
{
"epoch": 0.5501285347043702,
"grad_norm": 0.05650542303919792,
"learning_rate": 9.727647867950482e-06,
"loss": 0.8892,
"step": 214
},
{
"epoch": 0.5526992287917738,
"grad_norm": 0.05301105976104736,
"learning_rate": 9.725675908386431e-06,
"loss": 0.8643,
"step": 215
},
{
"epoch": 0.5552699228791774,
"grad_norm": 0.055119194090366364,
"learning_rate": 9.723697826836842e-06,
"loss": 1.1055,
"step": 216
},
{
"epoch": 0.5578406169665809,
"grad_norm": 0.052684854716062546,
"learning_rate": 9.721713594748662e-06,
"loss": 0.8765,
"step": 217
},
{
"epoch": 0.5604113110539846,
"grad_norm": 0.060191500931978226,
"learning_rate": 9.719723183391004e-06,
"loss": 0.9316,
"step": 218
},
{
"epoch": 0.5629820051413882,
"grad_norm": 0.0570666678249836,
"learning_rate": 9.71772656385375e-06,
"loss": 0.8291,
"step": 219
},
{
"epoch": 0.5655526992287918,
"grad_norm": 0.05079682916402817,
"learning_rate": 9.715723707046166e-06,
"loss": 0.9233,
"step": 220
},
{
"epoch": 0.5681233933161953,
"grad_norm": 0.05589374899864197,
"learning_rate": 9.713714583695464e-06,
"loss": 0.9663,
"step": 221
},
{
"epoch": 0.570694087403599,
"grad_norm": 0.049071066081523895,
"learning_rate": 9.711699164345404e-06,
"loss": 0.9448,
"step": 222
},
{
"epoch": 0.5732647814910026,
"grad_norm": 0.048681292682886124,
"learning_rate": 9.709677419354838e-06,
"loss": 0.7961,
"step": 223
},
{
"epoch": 0.5758354755784062,
"grad_norm": 0.05470262095332146,
"learning_rate": 9.707649318896264e-06,
"loss": 0.9468,
"step": 224
},
{
"epoch": 0.5784061696658098,
"grad_norm": 0.05211283266544342,
"learning_rate": 9.705614832954347e-06,
"loss": 0.8276,
"step": 225
},
{
"epoch": 0.5809768637532133,
"grad_norm": 0.04872957989573479,
"learning_rate": 9.703573931324458e-06,
"loss": 0.7153,
"step": 226
},
{
"epoch": 0.583547557840617,
"grad_norm": 0.05287624150514603,
"learning_rate": 9.70152658361116e-06,
"loss": 0.8726,
"step": 227
},
{
"epoch": 0.5861182519280206,
"grad_norm": 0.05211886763572693,
"learning_rate": 9.699472759226714e-06,
"loss": 0.9302,
"step": 228
},
{
"epoch": 0.5886889460154242,
"grad_norm": 0.05334436520934105,
"learning_rate": 9.697412427389544e-06,
"loss": 1.0332,
"step": 229
},
{
"epoch": 0.5912596401028277,
"grad_norm": 0.053637877106666565,
"learning_rate": 9.695345557122709e-06,
"loss": 0.8999,
"step": 230
},
{
"epoch": 0.5938303341902313,
"grad_norm": 0.04681619256734848,
"learning_rate": 9.69327211725234e-06,
"loss": 0.8076,
"step": 231
},
{
"epoch": 0.596401028277635,
"grad_norm": 0.052718836814165115,
"learning_rate": 9.691192076406085e-06,
"loss": 0.8906,
"step": 232
},
{
"epoch": 0.5989717223650386,
"grad_norm": 0.05302907153964043,
"learning_rate": 9.689105403011516e-06,
"loss": 0.937,
"step": 233
},
{
"epoch": 0.6015424164524421,
"grad_norm": 0.05149109661579132,
"learning_rate": 9.687012065294536e-06,
"loss": 0.8506,
"step": 234
},
{
"epoch": 0.6041131105398457,
"grad_norm": 0.05238654837012291,
"learning_rate": 9.68491203127777e-06,
"loss": 0.8628,
"step": 235
},
{
"epoch": 0.6066838046272494,
"grad_norm": 0.06081896275281906,
"learning_rate": 9.682805268778926e-06,
"loss": 0.9048,
"step": 236
},
{
"epoch": 0.609254498714653,
"grad_norm": 0.05550535395741463,
"learning_rate": 9.680691745409164e-06,
"loss": 0.9307,
"step": 237
},
{
"epoch": 0.6118251928020566,
"grad_norm": 0.05248388275504112,
"learning_rate": 9.678571428571429e-06,
"loss": 0.9956,
"step": 238
},
{
"epoch": 0.6143958868894601,
"grad_norm": 0.05890145152807236,
"learning_rate": 9.676444285458773e-06,
"loss": 0.832,
"step": 239
},
{
"epoch": 0.6169665809768637,
"grad_norm": 0.051260773092508316,
"learning_rate": 9.67431028305267e-06,
"loss": 0.8467,
"step": 240
},
{
"epoch": 0.6195372750642674,
"grad_norm": 0.05052540451288223,
"learning_rate": 9.6721693881213e-06,
"loss": 0.8301,
"step": 241
},
{
"epoch": 0.622107969151671,
"grad_norm": 0.0522305965423584,
"learning_rate": 9.67002156721783e-06,
"loss": 0.8354,
"step": 242
},
{
"epoch": 0.6246786632390745,
"grad_norm": 0.0485188327729702,
"learning_rate": 9.66786678667867e-06,
"loss": 0.8994,
"step": 243
},
{
"epoch": 0.6272493573264781,
"grad_norm": 0.0574263334274292,
"learning_rate": 9.66570501262171e-06,
"loss": 1.042,
"step": 244
},
{
"epoch": 0.6298200514138818,
"grad_norm": 0.05562338978052139,
"learning_rate": 9.663536210944555e-06,
"loss": 0.9741,
"step": 245
},
{
"epoch": 0.6323907455012854,
"grad_norm": 0.05564079061150551,
"learning_rate": 9.661360347322722e-06,
"loss": 0.9346,
"step": 246
},
{
"epoch": 0.6349614395886889,
"grad_norm": 0.053419217467308044,
"learning_rate": 9.659177387207828e-06,
"loss": 0.9951,
"step": 247
},
{
"epoch": 0.6375321336760925,
"grad_norm": 0.057053521275520325,
"learning_rate": 9.656987295825771e-06,
"loss": 0.9004,
"step": 248
},
{
"epoch": 0.6401028277634961,
"grad_norm": 0.05263104662299156,
"learning_rate": 9.654790038174878e-06,
"loss": 0.8872,
"step": 249
},
{
"epoch": 0.6426735218508998,
"grad_norm": 0.049765683710575104,
"learning_rate": 9.652585579024035e-06,
"loss": 0.8008,
"step": 250
},
{
"epoch": 0.6452442159383034,
"grad_norm": 0.05088181048631668,
"learning_rate": 9.650373882910816e-06,
"loss": 0.939,
"step": 251
},
{
"epoch": 0.6478149100257069,
"grad_norm": 0.05318666622042656,
"learning_rate": 9.648154914139569e-06,
"loss": 0.9712,
"step": 252
},
{
"epoch": 0.6503856041131105,
"grad_norm": 0.055674273520708084,
"learning_rate": 9.645928636779507e-06,
"loss": 0.9609,
"step": 253
},
{
"epoch": 0.6529562982005142,
"grad_norm": 0.05109839141368866,
"learning_rate": 9.643695014662755e-06,
"loss": 0.8394,
"step": 254
},
{
"epoch": 0.6555269922879178,
"grad_norm": 0.051385875791311264,
"learning_rate": 9.641454011382413e-06,
"loss": 0.853,
"step": 255
},
{
"epoch": 0.6580976863753213,
"grad_norm": 0.05000017583370209,
"learning_rate": 9.639205590290549e-06,
"loss": 0.8999,
"step": 256
},
{
"epoch": 0.6606683804627249,
"grad_norm": 0.0533922053873539,
"learning_rate": 9.636949714496225e-06,
"loss": 0.9468,
"step": 257
},
{
"epoch": 0.6632390745501285,
"grad_norm": 0.057750582695007324,
"learning_rate": 9.63468634686347e-06,
"loss": 0.8018,
"step": 258
},
{
"epoch": 0.6658097686375322,
"grad_norm": 0.05343909561634064,
"learning_rate": 9.632415450009242e-06,
"loss": 0.9253,
"step": 259
},
{
"epoch": 0.6683804627249358,
"grad_norm": 0.15355075895786285,
"learning_rate": 9.63013698630137e-06,
"loss": 0.8589,
"step": 260
},
{
"epoch": 0.6709511568123393,
"grad_norm": 0.05503278225660324,
"learning_rate": 9.627850917856482e-06,
"loss": 0.9248,
"step": 261
},
{
"epoch": 0.6735218508997429,
"grad_norm": 0.055749524384737015,
"learning_rate": 9.62555720653789e-06,
"loss": 1.0229,
"step": 262
},
{
"epoch": 0.6760925449871465,
"grad_norm": 0.05361119285225868,
"learning_rate": 9.62325581395349e-06,
"loss": 0.876,
"step": 263
},
{
"epoch": 0.6786632390745502,
"grad_norm": 0.0522209070622921,
"learning_rate": 9.620946701453597e-06,
"loss": 0.7866,
"step": 264
},
{
"epoch": 0.6812339331619537,
"grad_norm": 0.05726565048098564,
"learning_rate": 9.618629830128805e-06,
"loss": 0.9082,
"step": 265
},
{
"epoch": 0.6838046272493573,
"grad_norm": 0.056867945939302444,
"learning_rate": 9.616305160807779e-06,
"loss": 1.0034,
"step": 266
},
{
"epoch": 0.6863753213367609,
"grad_norm": 0.0533023402094841,
"learning_rate": 9.613972654055067e-06,
"loss": 0.8677,
"step": 267
},
{
"epoch": 0.6889460154241646,
"grad_norm": 0.04977701976895332,
"learning_rate": 9.611632270168855e-06,
"loss": 0.8765,
"step": 268
},
{
"epoch": 0.6915167095115681,
"grad_norm": 0.0571221187710762,
"learning_rate": 9.609283969178727e-06,
"loss": 0.8521,
"step": 269
},
{
"epoch": 0.6940874035989717,
"grad_norm": 0.0479142926633358,
"learning_rate": 9.606927710843375e-06,
"loss": 1.0391,
"step": 270
},
{
"epoch": 0.6966580976863753,
"grad_norm": 0.049262482672929764,
"learning_rate": 9.604563454648314e-06,
"loss": 0.8076,
"step": 271
},
{
"epoch": 0.699228791773779,
"grad_norm": 0.048339374363422394,
"learning_rate": 9.602191159803553e-06,
"loss": 0.9209,
"step": 272
},
{
"epoch": 0.7017994858611826,
"grad_norm": 0.05326506868004799,
"learning_rate": 9.59981078524125e-06,
"loss": 0.8999,
"step": 273
},
{
"epoch": 0.7043701799485861,
"grad_norm": 0.05416352301836014,
"learning_rate": 9.597422289613344e-06,
"loss": 0.8525,
"step": 274
},
{
"epoch": 0.7069408740359897,
"grad_norm": 0.054313864558935165,
"learning_rate": 9.595025631289162e-06,
"loss": 0.8623,
"step": 275
},
{
"epoch": 0.7095115681233933,
"grad_norm": 0.05430547520518303,
"learning_rate": 9.592620768352987e-06,
"loss": 0.9419,
"step": 276
},
{
"epoch": 0.712082262210797,
"grad_norm": 0.05323995649814606,
"learning_rate": 9.59020765860164e-06,
"loss": 0.7759,
"step": 277
},
{
"epoch": 0.7146529562982005,
"grad_norm": 0.05427195131778717,
"learning_rate": 9.587786259541986e-06,
"loss": 0.7837,
"step": 278
},
{
"epoch": 0.7172236503856041,
"grad_norm": 0.05332670360803604,
"learning_rate": 9.585356528388456e-06,
"loss": 0.7959,
"step": 279
},
{
"epoch": 0.7197943444730077,
"grad_norm": 0.05898366868495941,
"learning_rate": 9.582918422060514e-06,
"loss": 0.9204,
"step": 280
},
{
"epoch": 0.7223650385604113,
"grad_norm": 0.05881262570619583,
"learning_rate": 9.580471897180128e-06,
"loss": 0.8223,
"step": 281
},
{
"epoch": 0.7249357326478149,
"grad_norm": 0.05124802514910698,
"learning_rate": 9.578016910069179e-06,
"loss": 0.9062,
"step": 282
},
{
"epoch": 0.7275064267352185,
"grad_norm": 0.052421871572732925,
"learning_rate": 9.575553416746873e-06,
"loss": 0.8809,
"step": 283
},
{
"epoch": 0.7300771208226221,
"grad_norm": 0.05334588140249252,
"learning_rate": 9.573081372927112e-06,
"loss": 0.915,
"step": 284
},
{
"epoch": 0.7326478149100257,
"grad_norm": 0.05507427453994751,
"learning_rate": 9.57060073401584e-06,
"loss": 0.8315,
"step": 285
},
{
"epoch": 0.7352185089974294,
"grad_norm": 0.060719288885593414,
"learning_rate": 9.56811145510836e-06,
"loss": 0.812,
"step": 286
},
{
"epoch": 0.7377892030848329,
"grad_norm": 0.05697711929678917,
"learning_rate": 9.565613490986625e-06,
"loss": 0.9199,
"step": 287
},
{
"epoch": 0.7403598971722365,
"grad_norm": 0.05150134861469269,
"learning_rate": 9.563106796116506e-06,
"loss": 0.8496,
"step": 288
},
{
"epoch": 0.7429305912596401,
"grad_norm": 0.05363132804632187,
"learning_rate": 9.56059132464501e-06,
"loss": 0.7959,
"step": 289
},
{
"epoch": 0.7455012853470437,
"grad_norm": 0.05483046919107437,
"learning_rate": 9.558067030397506e-06,
"loss": 0.8789,
"step": 290
},
{
"epoch": 0.7480719794344473,
"grad_norm": 0.061016347259283066,
"learning_rate": 9.555533866874878e-06,
"loss": 1.0029,
"step": 291
},
{
"epoch": 0.7506426735218509,
"grad_norm": 0.058808423578739166,
"learning_rate": 9.552991787250685e-06,
"loss": 0.978,
"step": 292
},
{
"epoch": 0.7532133676092545,
"grad_norm": 0.05964820086956024,
"learning_rate": 9.550440744368267e-06,
"loss": 0.9038,
"step": 293
},
{
"epoch": 0.7557840616966581,
"grad_norm": 0.053899601101875305,
"learning_rate": 9.547880690737834e-06,
"loss": 0.8784,
"step": 294
},
{
"epoch": 0.7583547557840618,
"grad_norm": 0.05554712936282158,
"learning_rate": 9.545311578533517e-06,
"loss": 0.895,
"step": 295
},
{
"epoch": 0.7609254498714653,
"grad_norm": 0.05656404420733452,
"learning_rate": 9.54273335959039e-06,
"loss": 0.9814,
"step": 296
},
{
"epoch": 0.7634961439588689,
"grad_norm": 0.061445970088243484,
"learning_rate": 9.54014598540146e-06,
"loss": 0.8188,
"step": 297
},
{
"epoch": 0.7660668380462725,
"grad_norm": 0.05222717672586441,
"learning_rate": 9.537549407114626e-06,
"loss": 0.8276,
"step": 298
},
{
"epoch": 0.7686375321336761,
"grad_norm": 0.05029478669166565,
"learning_rate": 9.534943575529599e-06,
"loss": 0.8457,
"step": 299
},
{
"epoch": 0.7712082262210797,
"grad_norm": 0.055182084441185,
"learning_rate": 9.532328441094805e-06,
"loss": 1.0103,
"step": 300
},
{
"epoch": 0.7737789203084833,
"grad_norm": 0.051256079226732254,
"learning_rate": 9.529703953904232e-06,
"loss": 0.9023,
"step": 301
},
{
"epoch": 0.7763496143958869,
"grad_norm": 0.05435854196548462,
"learning_rate": 9.527070063694269e-06,
"loss": 0.895,
"step": 302
},
{
"epoch": 0.7789203084832905,
"grad_norm": 0.05296122655272484,
"learning_rate": 9.524426719840478e-06,
"loss": 0.9106,
"step": 303
},
{
"epoch": 0.781491002570694,
"grad_norm": 0.05457408353686333,
"learning_rate": 9.521773871354376e-06,
"loss": 0.9546,
"step": 304
},
{
"epoch": 0.7840616966580977,
"grad_norm": 0.0539555624127388,
"learning_rate": 9.519111466880127e-06,
"loss": 0.8506,
"step": 305
},
{
"epoch": 0.7866323907455013,
"grad_norm": 0.05563870817422867,
"learning_rate": 9.51643945469126e-06,
"loss": 0.9604,
"step": 306
},
{
"epoch": 0.7892030848329049,
"grad_norm": 0.05861511453986168,
"learning_rate": 9.513757782687287e-06,
"loss": 0.7812,
"step": 307
},
{
"epoch": 0.7917737789203085,
"grad_norm": 0.05230727419257164,
"learning_rate": 9.511066398390344e-06,
"loss": 0.8594,
"step": 308
},
{
"epoch": 0.794344473007712,
"grad_norm": 0.05595692992210388,
"learning_rate": 9.508365248941746e-06,
"loss": 0.9385,
"step": 309
},
{
"epoch": 0.7969151670951157,
"grad_norm": 0.05891738831996918,
"learning_rate": 9.505654281098548e-06,
"loss": 0.8284,
"step": 310
},
{
"epoch": 0.7994858611825193,
"grad_norm": 0.05573743209242821,
"learning_rate": 9.502933441230022e-06,
"loss": 0.9092,
"step": 311
},
{
"epoch": 0.8020565552699229,
"grad_norm": 0.05030527338385582,
"learning_rate": 9.500202675314149e-06,
"loss": 0.8823,
"step": 312
},
{
"epoch": 0.8046272493573264,
"grad_norm": 0.08709961920976639,
"learning_rate": 9.497461928934011e-06,
"loss": 0.8071,
"step": 313
},
{
"epoch": 0.8071979434447301,
"grad_norm": 0.10105069726705551,
"learning_rate": 9.494711147274209e-06,
"loss": 0.8877,
"step": 314
},
{
"epoch": 0.8097686375321337,
"grad_norm": 0.054532554000616074,
"learning_rate": 9.49195027511718e-06,
"loss": 0.8984,
"step": 315
},
{
"epoch": 0.8123393316195373,
"grad_norm": 0.05196183919906616,
"learning_rate": 9.489179256839527e-06,
"loss": 0.9268,
"step": 316
},
{
"epoch": 0.8149100257069408,
"grad_norm": 0.054404161870479584,
"learning_rate": 9.486398036408264e-06,
"loss": 0.9834,
"step": 317
},
{
"epoch": 0.8174807197943444,
"grad_norm": 0.051278892904520035,
"learning_rate": 9.483606557377051e-06,
"loss": 0.8032,
"step": 318
},
{
"epoch": 0.8200514138817481,
"grad_norm": 0.05271079018712044,
"learning_rate": 9.480804762882367e-06,
"loss": 0.8359,
"step": 319
},
{
"epoch": 0.8226221079691517,
"grad_norm": 0.05727388709783554,
"learning_rate": 9.477992595639656e-06,
"loss": 0.8398,
"step": 320
},
{
"epoch": 0.8251928020565553,
"grad_norm": 0.056882213801145554,
"learning_rate": 9.47516999793942e-06,
"loss": 0.8169,
"step": 321
},
{
"epoch": 0.8277634961439588,
"grad_norm": 0.05790659040212631,
"learning_rate": 9.472336911643271e-06,
"loss": 0.874,
"step": 322
},
{
"epoch": 0.8303341902313625,
"grad_norm": 0.052441854029893875,
"learning_rate": 9.469493278179939e-06,
"loss": 0.8394,
"step": 323
},
{
"epoch": 0.8329048843187661,
"grad_norm": 0.05015621706843376,
"learning_rate": 9.466639038541236e-06,
"loss": 0.8447,
"step": 324
},
{
"epoch": 0.8354755784061697,
"grad_norm": 0.06067253649234772,
"learning_rate": 9.463774133277976e-06,
"loss": 0.8623,
"step": 325
},
{
"epoch": 0.8380462724935732,
"grad_norm": 0.05550958961248398,
"learning_rate": 9.46089850249584e-06,
"loss": 0.9023,
"step": 326
},
{
"epoch": 0.8406169665809768,
"grad_norm": 0.05439194291830063,
"learning_rate": 9.45801208585122e-06,
"loss": 0.8516,
"step": 327
},
{
"epoch": 0.8431876606683805,
"grad_norm": 0.051754411309957504,
"learning_rate": 9.455114822546972e-06,
"loss": 0.8628,
"step": 328
},
{
"epoch": 0.8457583547557841,
"grad_norm": 0.05375438183546066,
"learning_rate": 9.452206651328176e-06,
"loss": 0.8701,
"step": 329
},
{
"epoch": 0.8483290488431876,
"grad_norm": 0.05142388865351677,
"learning_rate": 9.449287510477788e-06,
"loss": 0.9287,
"step": 330
},
{
"epoch": 0.8508997429305912,
"grad_norm": 0.054648611694574356,
"learning_rate": 9.446357337812303e-06,
"loss": 0.8638,
"step": 331
},
{
"epoch": 0.8534704370179949,
"grad_norm": 0.053628887981176376,
"learning_rate": 9.443416070677324e-06,
"loss": 0.8325,
"step": 332
},
{
"epoch": 0.8560411311053985,
"grad_norm": 0.05632541701197624,
"learning_rate": 9.440463645943098e-06,
"loss": 0.8271,
"step": 333
},
{
"epoch": 0.8586118251928021,
"grad_norm": 0.05134792625904083,
"learning_rate": 9.4375e-06,
"loss": 0.8179,
"step": 334
},
{
"epoch": 0.8611825192802056,
"grad_norm": 0.06385616213083267,
"learning_rate": 9.434525068753969e-06,
"loss": 0.7827,
"step": 335
},
{
"epoch": 0.8637532133676092,
"grad_norm": 0.054239194840192795,
"learning_rate": 9.431538787621873e-06,
"loss": 0.9009,
"step": 336
},
{
"epoch": 0.8663239074550129,
"grad_norm": 0.05689682811498642,
"learning_rate": 9.428541091526865e-06,
"loss": 0.8677,
"step": 337
},
{
"epoch": 0.8688946015424165,
"grad_norm": 0.05371352657675743,
"learning_rate": 9.425531914893617e-06,
"loss": 0.9834,
"step": 338
},
{
"epoch": 0.87146529562982,
"grad_norm": 0.054534927010536194,
"learning_rate": 9.422511191643574e-06,
"loss": 0.8618,
"step": 339
},
{
"epoch": 0.8740359897172236,
"grad_norm": 0.0579465813934803,
"learning_rate": 9.41947885519009e-06,
"loss": 0.8364,
"step": 340
},
{
"epoch": 0.8766066838046273,
"grad_norm": 0.04870595410466194,
"learning_rate": 9.416434838433555e-06,
"loss": 0.8594,
"step": 341
},
{
"epoch": 0.8791773778920309,
"grad_norm": 0.057385556399822235,
"learning_rate": 9.413379073756432e-06,
"loss": 0.8442,
"step": 342
},
{
"epoch": 0.8817480719794345,
"grad_norm": 0.07567571103572845,
"learning_rate": 9.410311493018262e-06,
"loss": 0.9521,
"step": 343
},
{
"epoch": 0.884318766066838,
"grad_norm": 0.0651511698961258,
"learning_rate": 9.407232027550581e-06,
"loss": 0.9077,
"step": 344
},
{
"epoch": 0.8868894601542416,
"grad_norm": 0.05570966750383377,
"learning_rate": 9.404140608151824e-06,
"loss": 0.9414,
"step": 345
},
{
"epoch": 0.8894601542416453,
"grad_norm": 0.05844790115952492,
"learning_rate": 9.40103716508211e-06,
"loss": 0.8198,
"step": 346
},
{
"epoch": 0.8920308483290489,
"grad_norm": 0.05628007650375366,
"learning_rate": 9.397921628058021e-06,
"loss": 0.9131,
"step": 347
},
{
"epoch": 0.8946015424164524,
"grad_norm": 0.05682618170976639,
"learning_rate": 9.394793926247288e-06,
"loss": 0.9604,
"step": 348
},
{
"epoch": 0.897172236503856,
"grad_norm": 0.05488917976617813,
"learning_rate": 9.391653988263423e-06,
"loss": 0.9189,
"step": 349
},
{
"epoch": 0.8997429305912596,
"grad_norm": 0.06217147782444954,
"learning_rate": 9.38850174216028e-06,
"loss": 0.782,
"step": 350
},
{
"epoch": 0.9023136246786633,
"grad_norm": 0.04932795837521553,
"learning_rate": 9.385337115426577e-06,
"loss": 0.7764,
"step": 351
},
{
"epoch": 0.9048843187660668,
"grad_norm": 0.056303396821022034,
"learning_rate": 9.382160034980325e-06,
"loss": 0.8872,
"step": 352
},
{
"epoch": 0.9074550128534704,
"grad_norm": 0.05455457419157028,
"learning_rate": 9.3789704271632e-06,
"loss": 0.7891,
"step": 353
},
{
"epoch": 0.910025706940874,
"grad_norm": 0.05396741256117821,
"learning_rate": 9.375768217734856e-06,
"loss": 0.9321,
"step": 354
},
{
"epoch": 0.9125964010282777,
"grad_norm": 0.0583309531211853,
"learning_rate": 9.372553331867167e-06,
"loss": 0.8823,
"step": 355
},
{
"epoch": 0.9151670951156813,
"grad_norm": 0.057046517729759216,
"learning_rate": 9.369325694138386e-06,
"loss": 0.9805,
"step": 356
},
{
"epoch": 0.9177377892030848,
"grad_norm": 0.05479217320680618,
"learning_rate": 9.36608522852727e-06,
"loss": 0.8901,
"step": 357
},
{
"epoch": 0.9203084832904884,
"grad_norm": 0.0561579167842865,
"learning_rate": 9.36283185840708e-06,
"loss": 0.8428,
"step": 358
},
{
"epoch": 0.922879177377892,
"grad_norm": 0.05958212539553642,
"learning_rate": 9.35956550653957e-06,
"loss": 1.0117,
"step": 359
},
{
"epoch": 0.9254498714652957,
"grad_norm": 0.05765485391020775,
"learning_rate": 9.356286095068858e-06,
"loss": 0.8096,
"step": 360
},
{
"epoch": 0.9280205655526992,
"grad_norm": 0.05282951146364212,
"learning_rate": 9.352993545515247e-06,
"loss": 0.813,
"step": 361
},
{
"epoch": 0.9305912596401028,
"grad_norm": 0.05436195805668831,
"learning_rate": 9.349687778768957e-06,
"loss": 0.835,
"step": 362
},
{
"epoch": 0.9331619537275064,
"grad_norm": 0.05730157718062401,
"learning_rate": 9.346368715083801e-06,
"loss": 0.7751,
"step": 363
},
{
"epoch": 0.9357326478149101,
"grad_norm": 0.059381358325481415,
"learning_rate": 9.343036274070756e-06,
"loss": 0.8716,
"step": 364
},
{
"epoch": 0.9383033419023136,
"grad_norm": 0.057895347476005554,
"learning_rate": 9.339690374691496e-06,
"loss": 0.8506,
"step": 365
},
{
"epoch": 0.9408740359897172,
"grad_norm": 0.052358921617269516,
"learning_rate": 9.336330935251799e-06,
"loss": 0.9102,
"step": 366
},
{
"epoch": 0.9434447300771208,
"grad_norm": 0.060520391911268234,
"learning_rate": 9.332957873394909e-06,
"loss": 0.9258,
"step": 367
},
{
"epoch": 0.9460154241645244,
"grad_norm": 0.055070772767066956,
"learning_rate": 9.329571106094809e-06,
"loss": 0.9722,
"step": 368
},
{
"epoch": 0.9485861182519281,
"grad_norm": 0.053908202797174454,
"learning_rate": 9.3261705496494e-06,
"loss": 0.8896,
"step": 369
},
{
"epoch": 0.9511568123393316,
"grad_norm": 0.057451847940683365,
"learning_rate": 9.322756119673619e-06,
"loss": 0.8467,
"step": 370
},
{
"epoch": 0.9537275064267352,
"grad_norm": 0.05375353991985321,
"learning_rate": 9.319327731092437e-06,
"loss": 0.8037,
"step": 371
},
{
"epoch": 0.9562982005141388,
"grad_norm": 0.05543424189090729,
"learning_rate": 9.31588529813382e-06,
"loss": 0.7646,
"step": 372
},
{
"epoch": 0.9588688946015425,
"grad_norm": 0.05233384296298027,
"learning_rate": 9.31242873432155e-06,
"loss": 0.8687,
"step": 373
},
{
"epoch": 0.961439588688946,
"grad_norm": 0.050029926002025604,
"learning_rate": 9.308957952468008e-06,
"loss": 0.8315,
"step": 374
},
{
"epoch": 0.9640102827763496,
"grad_norm": 0.07100562006235123,
"learning_rate": 9.30547286466682e-06,
"loss": 0.8276,
"step": 375
},
{
"epoch": 0.9665809768637532,
"grad_norm": 0.05098907649517059,
"learning_rate": 9.301973382285454e-06,
"loss": 0.9155,
"step": 376
},
{
"epoch": 0.9691516709511568,
"grad_norm": 0.04964357987046242,
"learning_rate": 9.298459415957691e-06,
"loss": 0.7583,
"step": 377
},
{
"epoch": 0.9717223650385605,
"grad_norm": 0.05745205283164978,
"learning_rate": 9.294930875576038e-06,
"loss": 0.9585,
"step": 378
},
{
"epoch": 0.974293059125964,
"grad_norm": 0.07048569619655609,
"learning_rate": 9.291387670284e-06,
"loss": 0.8726,
"step": 379
},
{
"epoch": 0.9768637532133676,
"grad_norm": 0.06114361807703972,
"learning_rate": 9.287829708468302e-06,
"loss": 0.8765,
"step": 380
},
{
"epoch": 0.9794344473007712,
"grad_norm": 0.052654024213552475,
"learning_rate": 9.284256897750985e-06,
"loss": 0.8281,
"step": 381
},
{
"epoch": 0.9820051413881749,
"grad_norm": 0.05415409430861473,
"learning_rate": 9.280669144981413e-06,
"loss": 0.8589,
"step": 382
},
{
"epoch": 0.9845758354755784,
"grad_norm": 0.05175313353538513,
"learning_rate": 9.277066356228173e-06,
"loss": 0.9443,
"step": 383
},
{
"epoch": 0.987146529562982,
"grad_norm": 0.05429228022694588,
"learning_rate": 9.273448436770884e-06,
"loss": 0.873,
"step": 384
},
{
"epoch": 0.9897172236503856,
"grad_norm": 0.05616312101483345,
"learning_rate": 9.269815291091886e-06,
"loss": 0.8818,
"step": 385
},
{
"epoch": 0.9922879177377892,
"grad_norm": 0.057027895003557205,
"learning_rate": 9.266166822867854e-06,
"loss": 0.8325,
"step": 386
},
{
"epoch": 0.9948586118251928,
"grad_norm": 0.0524408258497715,
"learning_rate": 9.26250293496126e-06,
"loss": 0.7954,
"step": 387
},
{
"epoch": 0.9974293059125964,
"grad_norm": 0.058645591139793396,
"learning_rate": 9.258823529411765e-06,
"loss": 0.8589,
"step": 388
},
{
"epoch": 1.0,
"grad_norm": 0.057895876467227936,
"learning_rate": 9.255128507427495e-06,
"loss": 0.8862,
"step": 389
},
{
"epoch": 1.0025706940874035,
"grad_norm": 0.04984109103679657,
"learning_rate": 9.251417769376181e-06,
"loss": 0.8076,
"step": 390
}
],
"logging_steps": 1,
"max_steps": 778,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 195,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1343631119821046e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}