beast33's picture
Training in progress, step 400, checkpoint
ba7ce92 verified
{
"best_metric": 0.615675151348114,
"best_model_checkpoint": "miner_id_24/checkpoint-400",
"epoch": 0.05393743257820928,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00013484358144552318,
"grad_norm": 0.46096381545066833,
"learning_rate": 1e-05,
"loss": 0.7976,
"step": 1
},
{
"epoch": 0.00013484358144552318,
"eval_loss": 1.1117002964019775,
"eval_runtime": 803.9898,
"eval_samples_per_second": 15.535,
"eval_steps_per_second": 3.884,
"step": 1
},
{
"epoch": 0.00026968716289104636,
"grad_norm": 0.4742763638496399,
"learning_rate": 2e-05,
"loss": 0.8452,
"step": 2
},
{
"epoch": 0.0004045307443365696,
"grad_norm": 0.5058476328849792,
"learning_rate": 3e-05,
"loss": 0.9725,
"step": 3
},
{
"epoch": 0.0005393743257820927,
"grad_norm": 0.5678961277008057,
"learning_rate": 4e-05,
"loss": 1.0304,
"step": 4
},
{
"epoch": 0.000674217907227616,
"grad_norm": 0.5502512454986572,
"learning_rate": 5e-05,
"loss": 1.1092,
"step": 5
},
{
"epoch": 0.0008090614886731392,
"grad_norm": 0.6693419218063354,
"learning_rate": 6e-05,
"loss": 1.0478,
"step": 6
},
{
"epoch": 0.0009439050701186624,
"grad_norm": 0.6592850089073181,
"learning_rate": 7e-05,
"loss": 0.7008,
"step": 7
},
{
"epoch": 0.0010787486515641855,
"grad_norm": 0.7339627146720886,
"learning_rate": 8e-05,
"loss": 0.9296,
"step": 8
},
{
"epoch": 0.0012135922330097086,
"grad_norm": 0.7497692704200745,
"learning_rate": 9e-05,
"loss": 0.7438,
"step": 9
},
{
"epoch": 0.001348435814455232,
"grad_norm": 1.0232096910476685,
"learning_rate": 0.0001,
"loss": 1.0474,
"step": 10
},
{
"epoch": 0.0014832793959007552,
"grad_norm": 0.631252646446228,
"learning_rate": 9.99983777858264e-05,
"loss": 0.7932,
"step": 11
},
{
"epoch": 0.0016181229773462784,
"grad_norm": 0.868119478225708,
"learning_rate": 9.999351124856874e-05,
"loss": 0.9099,
"step": 12
},
{
"epoch": 0.0017529665587918016,
"grad_norm": 0.6615088582038879,
"learning_rate": 9.998540070400966e-05,
"loss": 0.7893,
"step": 13
},
{
"epoch": 0.0018878101402373248,
"grad_norm": 0.5393303036689758,
"learning_rate": 9.997404667843075e-05,
"loss": 0.726,
"step": 14
},
{
"epoch": 0.0020226537216828477,
"grad_norm": 0.5994777679443359,
"learning_rate": 9.995944990857849e-05,
"loss": 0.7615,
"step": 15
},
{
"epoch": 0.002157497303128371,
"grad_norm": 0.4877713918685913,
"learning_rate": 9.994161134161634e-05,
"loss": 0.7348,
"step": 16
},
{
"epoch": 0.002292340884573894,
"grad_norm": 0.5485960245132446,
"learning_rate": 9.992053213506334e-05,
"loss": 0.7739,
"step": 17
},
{
"epoch": 0.0024271844660194173,
"grad_norm": 0.5762727856636047,
"learning_rate": 9.989621365671902e-05,
"loss": 0.8168,
"step": 18
},
{
"epoch": 0.002562028047464941,
"grad_norm": 0.5295676589012146,
"learning_rate": 9.986865748457457e-05,
"loss": 0.6737,
"step": 19
},
{
"epoch": 0.002696871628910464,
"grad_norm": 0.5712666511535645,
"learning_rate": 9.983786540671051e-05,
"loss": 0.7949,
"step": 20
},
{
"epoch": 0.0028317152103559872,
"grad_norm": 0.542948842048645,
"learning_rate": 9.980383942118066e-05,
"loss": 0.5611,
"step": 21
},
{
"epoch": 0.0029665587918015104,
"grad_norm": 0.5364521145820618,
"learning_rate": 9.976658173588244e-05,
"loss": 0.6554,
"step": 22
},
{
"epoch": 0.0031014023732470336,
"grad_norm": 0.5057823061943054,
"learning_rate": 9.972609476841367e-05,
"loss": 0.7143,
"step": 23
},
{
"epoch": 0.003236245954692557,
"grad_norm": 0.5525686144828796,
"learning_rate": 9.968238114591566e-05,
"loss": 0.6624,
"step": 24
},
{
"epoch": 0.00337108953613808,
"grad_norm": 0.6571288704872131,
"learning_rate": 9.96354437049027e-05,
"loss": 0.5435,
"step": 25
},
{
"epoch": 0.003505933117583603,
"grad_norm": 0.5711979269981384,
"learning_rate": 9.95852854910781e-05,
"loss": 0.7177,
"step": 26
},
{
"epoch": 0.0036407766990291263,
"grad_norm": 0.5688961148262024,
"learning_rate": 9.953190975913647e-05,
"loss": 0.6087,
"step": 27
},
{
"epoch": 0.0037756202804746495,
"grad_norm": 0.6181890964508057,
"learning_rate": 9.947531997255256e-05,
"loss": 0.5233,
"step": 28
},
{
"epoch": 0.003910463861920173,
"grad_norm": 0.6405938863754272,
"learning_rate": 9.941551980335652e-05,
"loss": 0.6902,
"step": 29
},
{
"epoch": 0.0040453074433656954,
"grad_norm": 0.6901422142982483,
"learning_rate": 9.935251313189564e-05,
"loss": 0.7528,
"step": 30
},
{
"epoch": 0.004180151024811219,
"grad_norm": 0.6896430253982544,
"learning_rate": 9.928630404658255e-05,
"loss": 0.6692,
"step": 31
},
{
"epoch": 0.004314994606256742,
"grad_norm": 0.6770252585411072,
"learning_rate": 9.921689684362989e-05,
"loss": 0.5303,
"step": 32
},
{
"epoch": 0.004449838187702265,
"grad_norm": 0.6999014019966125,
"learning_rate": 9.914429602677162e-05,
"loss": 0.6234,
"step": 33
},
{
"epoch": 0.004584681769147788,
"grad_norm": 0.6385924220085144,
"learning_rate": 9.906850630697068e-05,
"loss": 0.8011,
"step": 34
},
{
"epoch": 0.004719525350593312,
"grad_norm": 0.641307532787323,
"learning_rate": 9.898953260211338e-05,
"loss": 0.5336,
"step": 35
},
{
"epoch": 0.0048543689320388345,
"grad_norm": 0.6961727738380432,
"learning_rate": 9.890738003669029e-05,
"loss": 0.6346,
"step": 36
},
{
"epoch": 0.004989212513484358,
"grad_norm": 0.7356176376342773,
"learning_rate": 9.882205394146361e-05,
"loss": 0.6464,
"step": 37
},
{
"epoch": 0.005124056094929882,
"grad_norm": 0.7595506906509399,
"learning_rate": 9.87335598531214e-05,
"loss": 0.6172,
"step": 38
},
{
"epoch": 0.0052588996763754045,
"grad_norm": 0.7338278889656067,
"learning_rate": 9.864190351391822e-05,
"loss": 0.6536,
"step": 39
},
{
"epoch": 0.005393743257820928,
"grad_norm": 0.7370137572288513,
"learning_rate": 9.85470908713026e-05,
"loss": 0.6816,
"step": 40
},
{
"epoch": 0.005528586839266451,
"grad_norm": 0.9426000118255615,
"learning_rate": 9.844912807753104e-05,
"loss": 0.5608,
"step": 41
},
{
"epoch": 0.0056634304207119745,
"grad_norm": 0.7755903601646423,
"learning_rate": 9.834802148926882e-05,
"loss": 0.6795,
"step": 42
},
{
"epoch": 0.005798274002157497,
"grad_norm": 0.8503265976905823,
"learning_rate": 9.824377766717759e-05,
"loss": 0.716,
"step": 43
},
{
"epoch": 0.005933117583603021,
"grad_norm": 0.9363091588020325,
"learning_rate": 9.813640337548954e-05,
"loss": 0.6649,
"step": 44
},
{
"epoch": 0.006067961165048544,
"grad_norm": 0.8452575206756592,
"learning_rate": 9.802590558156862e-05,
"loss": 0.6178,
"step": 45
},
{
"epoch": 0.006202804746494067,
"grad_norm": 1.1449545621871948,
"learning_rate": 9.791229145545831e-05,
"loss": 0.7926,
"step": 46
},
{
"epoch": 0.00633764832793959,
"grad_norm": 1.28661048412323,
"learning_rate": 9.779556836941645e-05,
"loss": 1.107,
"step": 47
},
{
"epoch": 0.006472491909385114,
"grad_norm": 1.3061388731002808,
"learning_rate": 9.767574389743682e-05,
"loss": 0.7023,
"step": 48
},
{
"epoch": 0.006607335490830636,
"grad_norm": 1.5271786451339722,
"learning_rate": 9.755282581475769e-05,
"loss": 0.7403,
"step": 49
},
{
"epoch": 0.00674217907227616,
"grad_norm": 1.7419681549072266,
"learning_rate": 9.742682209735727e-05,
"loss": 1.0364,
"step": 50
},
{
"epoch": 0.00674217907227616,
"eval_loss": 0.7143002152442932,
"eval_runtime": 808.5398,
"eval_samples_per_second": 15.448,
"eval_steps_per_second": 3.863,
"step": 50
},
{
"epoch": 0.006877022653721683,
"grad_norm": 0.38730719685554504,
"learning_rate": 9.729774092143627e-05,
"loss": 0.6539,
"step": 51
},
{
"epoch": 0.007011866235167206,
"grad_norm": 0.41461649537086487,
"learning_rate": 9.716559066288715e-05,
"loss": 0.711,
"step": 52
},
{
"epoch": 0.007146709816612729,
"grad_norm": 0.5456146001815796,
"learning_rate": 9.703037989675087e-05,
"loss": 0.8807,
"step": 53
},
{
"epoch": 0.007281553398058253,
"grad_norm": 0.46824124455451965,
"learning_rate": 9.689211739666023e-05,
"loss": 0.8765,
"step": 54
},
{
"epoch": 0.007416396979503775,
"grad_norm": 0.45686405897140503,
"learning_rate": 9.675081213427076e-05,
"loss": 0.8085,
"step": 55
},
{
"epoch": 0.007551240560949299,
"grad_norm": 0.44312959909439087,
"learning_rate": 9.66064732786784e-05,
"loss": 0.8294,
"step": 56
},
{
"epoch": 0.007686084142394822,
"grad_norm": 0.49523454904556274,
"learning_rate": 9.645911019582467e-05,
"loss": 0.9952,
"step": 57
},
{
"epoch": 0.007820927723840345,
"grad_norm": 0.7094590663909912,
"learning_rate": 9.630873244788883e-05,
"loss": 0.6038,
"step": 58
},
{
"epoch": 0.007955771305285868,
"grad_norm": 0.57602459192276,
"learning_rate": 9.615534979266745e-05,
"loss": 0.7637,
"step": 59
},
{
"epoch": 0.008090614886731391,
"grad_norm": 0.46149370074272156,
"learning_rate": 9.599897218294122e-05,
"loss": 0.7613,
"step": 60
},
{
"epoch": 0.008225458468176915,
"grad_norm": 0.4930979907512665,
"learning_rate": 9.583960976582913e-05,
"loss": 0.6701,
"step": 61
},
{
"epoch": 0.008360302049622438,
"grad_norm": 0.63169264793396,
"learning_rate": 9.567727288213005e-05,
"loss": 0.7589,
"step": 62
},
{
"epoch": 0.00849514563106796,
"grad_norm": 0.4967760741710663,
"learning_rate": 9.551197206565173e-05,
"loss": 0.7446,
"step": 63
},
{
"epoch": 0.008629989212513484,
"grad_norm": 0.4685230851173401,
"learning_rate": 9.534371804252728e-05,
"loss": 0.9313,
"step": 64
},
{
"epoch": 0.008764832793959008,
"grad_norm": 0.5008507370948792,
"learning_rate": 9.517252173051911e-05,
"loss": 0.7179,
"step": 65
},
{
"epoch": 0.00889967637540453,
"grad_norm": 0.47081446647644043,
"learning_rate": 9.49983942383106e-05,
"loss": 0.5974,
"step": 66
},
{
"epoch": 0.009034519956850054,
"grad_norm": 0.4435614347457886,
"learning_rate": 9.482134686478519e-05,
"loss": 0.7662,
"step": 67
},
{
"epoch": 0.009169363538295576,
"grad_norm": 0.43143922090530396,
"learning_rate": 9.464139109829321e-05,
"loss": 0.654,
"step": 68
},
{
"epoch": 0.0093042071197411,
"grad_norm": 0.4331086575984955,
"learning_rate": 9.445853861590647e-05,
"loss": 0.8423,
"step": 69
},
{
"epoch": 0.009439050701186624,
"grad_norm": 0.4335334599018097,
"learning_rate": 9.42728012826605e-05,
"loss": 0.6996,
"step": 70
},
{
"epoch": 0.009573894282632146,
"grad_norm": 0.5013962388038635,
"learning_rate": 9.408419115078471e-05,
"loss": 0.7172,
"step": 71
},
{
"epoch": 0.009708737864077669,
"grad_norm": 0.4722919166088104,
"learning_rate": 9.389272045892024e-05,
"loss": 0.6182,
"step": 72
},
{
"epoch": 0.009843581445523194,
"grad_norm": 0.43432602286338806,
"learning_rate": 9.36984016313259e-05,
"loss": 0.6019,
"step": 73
},
{
"epoch": 0.009978425026968716,
"grad_norm": 0.46934643387794495,
"learning_rate": 9.350124727707197e-05,
"loss": 0.6449,
"step": 74
},
{
"epoch": 0.010113268608414239,
"grad_norm": 0.41017603874206543,
"learning_rate": 9.330127018922194e-05,
"loss": 0.6382,
"step": 75
},
{
"epoch": 0.010248112189859764,
"grad_norm": 0.4711523652076721,
"learning_rate": 9.309848334400246e-05,
"loss": 0.5444,
"step": 76
},
{
"epoch": 0.010382955771305286,
"grad_norm": 0.49385398626327515,
"learning_rate": 9.289289989996133e-05,
"loss": 0.6478,
"step": 77
},
{
"epoch": 0.010517799352750809,
"grad_norm": 0.5090382099151611,
"learning_rate": 9.268453319711363e-05,
"loss": 0.7604,
"step": 78
},
{
"epoch": 0.010652642934196332,
"grad_norm": 0.5241708159446716,
"learning_rate": 9.247339675607605e-05,
"loss": 0.6198,
"step": 79
},
{
"epoch": 0.010787486515641856,
"grad_norm": 0.5437560081481934,
"learning_rate": 9.225950427718975e-05,
"loss": 0.55,
"step": 80
},
{
"epoch": 0.010922330097087379,
"grad_norm": 0.5280182361602783,
"learning_rate": 9.204286963963111e-05,
"loss": 0.5952,
"step": 81
},
{
"epoch": 0.011057173678532902,
"grad_norm": 0.6034506559371948,
"learning_rate": 9.182350690051133e-05,
"loss": 0.5583,
"step": 82
},
{
"epoch": 0.011192017259978424,
"grad_norm": 0.5680975317955017,
"learning_rate": 9.160143029396422e-05,
"loss": 0.5799,
"step": 83
},
{
"epoch": 0.011326860841423949,
"grad_norm": 0.5396384000778198,
"learning_rate": 9.13766542302225e-05,
"loss": 0.5709,
"step": 84
},
{
"epoch": 0.011461704422869472,
"grad_norm": 0.5622548460960388,
"learning_rate": 9.114919329468282e-05,
"loss": 0.6419,
"step": 85
},
{
"epoch": 0.011596548004314994,
"grad_norm": 0.5228815674781799,
"learning_rate": 9.091906224695935e-05,
"loss": 0.5859,
"step": 86
},
{
"epoch": 0.011731391585760517,
"grad_norm": 0.5865054130554199,
"learning_rate": 9.068627601992598e-05,
"loss": 0.6394,
"step": 87
},
{
"epoch": 0.011866235167206042,
"grad_norm": 0.6381607055664062,
"learning_rate": 9.045084971874738e-05,
"loss": 0.6746,
"step": 88
},
{
"epoch": 0.012001078748651564,
"grad_norm": 0.5563323497772217,
"learning_rate": 9.021279861989885e-05,
"loss": 0.512,
"step": 89
},
{
"epoch": 0.012135922330097087,
"grad_norm": 0.6685919165611267,
"learning_rate": 8.997213817017507e-05,
"loss": 0.625,
"step": 90
},
{
"epoch": 0.01227076591154261,
"grad_norm": 0.6518629789352417,
"learning_rate": 8.972888398568772e-05,
"loss": 0.5988,
"step": 91
},
{
"epoch": 0.012405609492988134,
"grad_norm": 0.6607906818389893,
"learning_rate": 8.948305185085225e-05,
"loss": 0.617,
"step": 92
},
{
"epoch": 0.012540453074433657,
"grad_norm": 0.8087469339370728,
"learning_rate": 8.92346577173636e-05,
"loss": 0.581,
"step": 93
},
{
"epoch": 0.01267529665587918,
"grad_norm": 0.6895301342010498,
"learning_rate": 8.898371770316111e-05,
"loss": 0.42,
"step": 94
},
{
"epoch": 0.012810140237324703,
"grad_norm": 0.7558472752571106,
"learning_rate": 8.873024809138272e-05,
"loss": 0.5648,
"step": 95
},
{
"epoch": 0.012944983818770227,
"grad_norm": 1.1604480743408203,
"learning_rate": 8.847426532930831e-05,
"loss": 0.6935,
"step": 96
},
{
"epoch": 0.01307982740021575,
"grad_norm": 1.0954399108886719,
"learning_rate": 8.821578602729242e-05,
"loss": 0.7195,
"step": 97
},
{
"epoch": 0.013214670981661273,
"grad_norm": 1.1974776983261108,
"learning_rate": 8.795482695768658e-05,
"loss": 0.8272,
"step": 98
},
{
"epoch": 0.013349514563106795,
"grad_norm": 1.1786168813705444,
"learning_rate": 8.769140505375085e-05,
"loss": 0.6922,
"step": 99
},
{
"epoch": 0.01348435814455232,
"grad_norm": 1.5181611776351929,
"learning_rate": 8.742553740855506e-05,
"loss": 0.6546,
"step": 100
},
{
"epoch": 0.01348435814455232,
"eval_loss": 0.6889885067939758,
"eval_runtime": 809.3771,
"eval_samples_per_second": 15.432,
"eval_steps_per_second": 3.859,
"step": 100
},
{
"epoch": 0.013619201725997843,
"grad_norm": 0.3629307746887207,
"learning_rate": 8.715724127386972e-05,
"loss": 0.6545,
"step": 101
},
{
"epoch": 0.013754045307443365,
"grad_norm": 0.4986977279186249,
"learning_rate": 8.688653405904652e-05,
"loss": 0.7939,
"step": 102
},
{
"epoch": 0.013888888888888888,
"grad_norm": 0.4081737995147705,
"learning_rate": 8.661343332988869e-05,
"loss": 0.6722,
"step": 103
},
{
"epoch": 0.014023732470334413,
"grad_norm": 0.4756498336791992,
"learning_rate": 8.633795680751116e-05,
"loss": 0.9048,
"step": 104
},
{
"epoch": 0.014158576051779935,
"grad_norm": 0.48325270414352417,
"learning_rate": 8.606012236719073e-05,
"loss": 0.7899,
"step": 105
},
{
"epoch": 0.014293419633225458,
"grad_norm": 0.43557488918304443,
"learning_rate": 8.577994803720606e-05,
"loss": 0.7734,
"step": 106
},
{
"epoch": 0.01442826321467098,
"grad_norm": 0.40467625856399536,
"learning_rate": 8.549745199766792e-05,
"loss": 0.802,
"step": 107
},
{
"epoch": 0.014563106796116505,
"grad_norm": 0.4233931601047516,
"learning_rate": 8.521265257933948e-05,
"loss": 0.6263,
"step": 108
},
{
"epoch": 0.014697950377562028,
"grad_norm": 0.4349559247493744,
"learning_rate": 8.492556826244687e-05,
"loss": 0.755,
"step": 109
},
{
"epoch": 0.01483279395900755,
"grad_norm": 0.5495539307594299,
"learning_rate": 8.463621767547998e-05,
"loss": 0.7725,
"step": 110
},
{
"epoch": 0.014967637540453074,
"grad_norm": 0.5753733515739441,
"learning_rate": 8.434461959398376e-05,
"loss": 0.8626,
"step": 111
},
{
"epoch": 0.015102481121898598,
"grad_norm": 0.503030002117157,
"learning_rate": 8.405079293933986e-05,
"loss": 0.7574,
"step": 112
},
{
"epoch": 0.01523732470334412,
"grad_norm": 0.4911031126976013,
"learning_rate": 8.375475677753881e-05,
"loss": 0.6502,
"step": 113
},
{
"epoch": 0.015372168284789644,
"grad_norm": 0.4379326105117798,
"learning_rate": 8.345653031794292e-05,
"loss": 0.6927,
"step": 114
},
{
"epoch": 0.015507011866235168,
"grad_norm": 0.564357340335846,
"learning_rate": 8.315613291203976e-05,
"loss": 0.7794,
"step": 115
},
{
"epoch": 0.01564185544768069,
"grad_norm": 0.4725935757160187,
"learning_rate": 8.285358405218655e-05,
"loss": 0.8151,
"step": 116
},
{
"epoch": 0.015776699029126214,
"grad_norm": 0.3898284137248993,
"learning_rate": 8.25489033703452e-05,
"loss": 0.6533,
"step": 117
},
{
"epoch": 0.015911542610571736,
"grad_norm": 0.408464640378952,
"learning_rate": 8.224211063680853e-05,
"loss": 0.741,
"step": 118
},
{
"epoch": 0.01604638619201726,
"grad_norm": 0.42747777700424194,
"learning_rate": 8.19332257589174e-05,
"loss": 0.7429,
"step": 119
},
{
"epoch": 0.016181229773462782,
"grad_norm": 0.4827050566673279,
"learning_rate": 8.162226877976887e-05,
"loss": 0.6342,
"step": 120
},
{
"epoch": 0.016316073354908308,
"grad_norm": 0.44718173146247864,
"learning_rate": 8.130925987691569e-05,
"loss": 0.6742,
"step": 121
},
{
"epoch": 0.01645091693635383,
"grad_norm": 0.43637025356292725,
"learning_rate": 8.099421936105702e-05,
"loss": 0.6341,
"step": 122
},
{
"epoch": 0.016585760517799353,
"grad_norm": 0.41816022992134094,
"learning_rate": 8.067716767472045e-05,
"loss": 0.6587,
"step": 123
},
{
"epoch": 0.016720604099244876,
"grad_norm": 0.3830829858779907,
"learning_rate": 8.035812539093557e-05,
"loss": 0.4814,
"step": 124
},
{
"epoch": 0.0168554476806904,
"grad_norm": 0.473495215177536,
"learning_rate": 8.003711321189895e-05,
"loss": 0.5149,
"step": 125
},
{
"epoch": 0.01699029126213592,
"grad_norm": 0.41442325711250305,
"learning_rate": 7.971415196763088e-05,
"loss": 0.534,
"step": 126
},
{
"epoch": 0.017125134843581444,
"grad_norm": 0.40307527780532837,
"learning_rate": 7.938926261462366e-05,
"loss": 0.5376,
"step": 127
},
{
"epoch": 0.017259978425026967,
"grad_norm": 0.49703535437583923,
"learning_rate": 7.906246623448183e-05,
"loss": 0.6204,
"step": 128
},
{
"epoch": 0.017394822006472493,
"grad_norm": 0.4826800227165222,
"learning_rate": 7.873378403255419e-05,
"loss": 0.8429,
"step": 129
},
{
"epoch": 0.017529665587918016,
"grad_norm": 0.44567549228668213,
"learning_rate": 7.840323733655778e-05,
"loss": 0.4935,
"step": 130
},
{
"epoch": 0.01766450916936354,
"grad_norm": 0.5593176484107971,
"learning_rate": 7.807084759519405e-05,
"loss": 0.6138,
"step": 131
},
{
"epoch": 0.01779935275080906,
"grad_norm": 0.5233660340309143,
"learning_rate": 7.773663637675694e-05,
"loss": 0.579,
"step": 132
},
{
"epoch": 0.017934196332254584,
"grad_norm": 0.608767569065094,
"learning_rate": 7.740062536773352e-05,
"loss": 0.713,
"step": 133
},
{
"epoch": 0.018069039913700107,
"grad_norm": 0.5015419721603394,
"learning_rate": 7.706283637139658e-05,
"loss": 0.4805,
"step": 134
},
{
"epoch": 0.01820388349514563,
"grad_norm": 0.5143709182739258,
"learning_rate": 7.672329130639005e-05,
"loss": 0.5889,
"step": 135
},
{
"epoch": 0.018338727076591153,
"grad_norm": 0.6353588700294495,
"learning_rate": 7.638201220530665e-05,
"loss": 0.6094,
"step": 136
},
{
"epoch": 0.01847357065803668,
"grad_norm": 0.6180605292320251,
"learning_rate": 7.603902121325813e-05,
"loss": 0.564,
"step": 137
},
{
"epoch": 0.0186084142394822,
"grad_norm": 0.5658387541770935,
"learning_rate": 7.569434058643844e-05,
"loss": 0.6134,
"step": 138
},
{
"epoch": 0.018743257820927724,
"grad_norm": 0.5483933091163635,
"learning_rate": 7.534799269067953e-05,
"loss": 0.5484,
"step": 139
},
{
"epoch": 0.018878101402373247,
"grad_norm": 0.596729576587677,
"learning_rate": 7.500000000000001e-05,
"loss": 0.6012,
"step": 140
},
{
"epoch": 0.01901294498381877,
"grad_norm": 0.7123824954032898,
"learning_rate": 7.465038509514688e-05,
"loss": 0.5948,
"step": 141
},
{
"epoch": 0.019147788565264293,
"grad_norm": 0.6722177267074585,
"learning_rate": 7.42991706621303e-05,
"loss": 0.5897,
"step": 142
},
{
"epoch": 0.019282632146709815,
"grad_norm": 0.7012574076652527,
"learning_rate": 7.394637949075154e-05,
"loss": 0.5071,
"step": 143
},
{
"epoch": 0.019417475728155338,
"grad_norm": 0.8224066495895386,
"learning_rate": 7.35920344731241e-05,
"loss": 0.6788,
"step": 144
},
{
"epoch": 0.019552319309600864,
"grad_norm": 0.9073060750961304,
"learning_rate": 7.323615860218843e-05,
"loss": 0.7585,
"step": 145
},
{
"epoch": 0.019687162891046387,
"grad_norm": 0.9260389804840088,
"learning_rate": 7.287877497021978e-05,
"loss": 0.7458,
"step": 146
},
{
"epoch": 0.01982200647249191,
"grad_norm": 1.1151235103607178,
"learning_rate": 7.251990676732984e-05,
"loss": 0.6481,
"step": 147
},
{
"epoch": 0.019956850053937433,
"grad_norm": 1.102993130683899,
"learning_rate": 7.215957727996207e-05,
"loss": 0.7474,
"step": 148
},
{
"epoch": 0.020091693635382955,
"grad_norm": 1.4685449600219727,
"learning_rate": 7.179780988938051e-05,
"loss": 0.7184,
"step": 149
},
{
"epoch": 0.020226537216828478,
"grad_norm": 2.0621840953826904,
"learning_rate": 7.143462807015271e-05,
"loss": 0.9898,
"step": 150
},
{
"epoch": 0.020226537216828478,
"eval_loss": 0.6706996560096741,
"eval_runtime": 808.253,
"eval_samples_per_second": 15.453,
"eval_steps_per_second": 3.864,
"step": 150
},
{
"epoch": 0.020361380798274,
"grad_norm": 0.28820934891700745,
"learning_rate": 7.107005538862646e-05,
"loss": 0.4856,
"step": 151
},
{
"epoch": 0.020496224379719527,
"grad_norm": 0.4104454219341278,
"learning_rate": 7.07041155014006e-05,
"loss": 0.7801,
"step": 152
},
{
"epoch": 0.02063106796116505,
"grad_norm": 0.4290568232536316,
"learning_rate": 7.033683215379002e-05,
"loss": 0.9827,
"step": 153
},
{
"epoch": 0.020765911542610573,
"grad_norm": 0.42343682050704956,
"learning_rate": 6.996822917828477e-05,
"loss": 0.8051,
"step": 154
},
{
"epoch": 0.020900755124056095,
"grad_norm": 0.4831183850765228,
"learning_rate": 6.959833049300377e-05,
"loss": 0.7042,
"step": 155
},
{
"epoch": 0.021035598705501618,
"grad_norm": 0.5211657285690308,
"learning_rate": 6.922716010014255e-05,
"loss": 0.7197,
"step": 156
},
{
"epoch": 0.02117044228694714,
"grad_norm": 0.4082058370113373,
"learning_rate": 6.885474208441603e-05,
"loss": 0.94,
"step": 157
},
{
"epoch": 0.021305285868392664,
"grad_norm": 0.6103420853614807,
"learning_rate": 6.848110061149556e-05,
"loss": 0.7768,
"step": 158
},
{
"epoch": 0.021440129449838186,
"grad_norm": 0.47742289304733276,
"learning_rate": 6.810625992644085e-05,
"loss": 0.6773,
"step": 159
},
{
"epoch": 0.021574973031283712,
"grad_norm": 0.515769362449646,
"learning_rate": 6.773024435212678e-05,
"loss": 0.901,
"step": 160
},
{
"epoch": 0.021709816612729235,
"grad_norm": 0.44651755690574646,
"learning_rate": 6.735307828766515e-05,
"loss": 0.6748,
"step": 161
},
{
"epoch": 0.021844660194174758,
"grad_norm": 0.5577541589736938,
"learning_rate": 6.697478620682137e-05,
"loss": 0.782,
"step": 162
},
{
"epoch": 0.02197950377562028,
"grad_norm": 0.5241621136665344,
"learning_rate": 6.659539265642643e-05,
"loss": 0.8142,
"step": 163
},
{
"epoch": 0.022114347357065803,
"grad_norm": 0.42343997955322266,
"learning_rate": 6.621492225478414e-05,
"loss": 0.7662,
"step": 164
},
{
"epoch": 0.022249190938511326,
"grad_norm": 0.4410001039505005,
"learning_rate": 6.583339969007363e-05,
"loss": 0.6076,
"step": 165
},
{
"epoch": 0.02238403451995685,
"grad_norm": 0.5331948399543762,
"learning_rate": 6.545084971874738e-05,
"loss": 0.7594,
"step": 166
},
{
"epoch": 0.02251887810140237,
"grad_norm": 0.40507200360298157,
"learning_rate": 6.506729716392481e-05,
"loss": 0.5797,
"step": 167
},
{
"epoch": 0.022653721682847898,
"grad_norm": 0.44845443964004517,
"learning_rate": 6.468276691378155e-05,
"loss": 0.8773,
"step": 168
},
{
"epoch": 0.02278856526429342,
"grad_norm": 0.4197690486907959,
"learning_rate": 6.429728391993446e-05,
"loss": 0.7263,
"step": 169
},
{
"epoch": 0.022923408845738943,
"grad_norm": 0.4503156244754791,
"learning_rate": 6.391087319582264e-05,
"loss": 0.7674,
"step": 170
},
{
"epoch": 0.023058252427184466,
"grad_norm": 0.5146174430847168,
"learning_rate": 6.35235598150842e-05,
"loss": 0.6296,
"step": 171
},
{
"epoch": 0.02319309600862999,
"grad_norm": 0.42583590745925903,
"learning_rate": 6.313536890992935e-05,
"loss": 0.6544,
"step": 172
},
{
"epoch": 0.02332793959007551,
"grad_norm": 0.39939722418785095,
"learning_rate": 6.274632566950967e-05,
"loss": 0.605,
"step": 173
},
{
"epoch": 0.023462783171521034,
"grad_norm": 0.4075009226799011,
"learning_rate": 6.235645533828349e-05,
"loss": 0.6444,
"step": 174
},
{
"epoch": 0.023597626752966557,
"grad_norm": 0.44113990664482117,
"learning_rate": 6.19657832143779e-05,
"loss": 0.6811,
"step": 175
},
{
"epoch": 0.023732470334412083,
"grad_norm": 0.41997572779655457,
"learning_rate": 6.157433464794716e-05,
"loss": 0.5942,
"step": 176
},
{
"epoch": 0.023867313915857606,
"grad_norm": 0.4573177695274353,
"learning_rate": 6.118213503952779e-05,
"loss": 0.6136,
"step": 177
},
{
"epoch": 0.02400215749730313,
"grad_norm": 0.4656127393245697,
"learning_rate": 6.078920983839031e-05,
"loss": 0.7256,
"step": 178
},
{
"epoch": 0.02413700107874865,
"grad_norm": 0.48081159591674805,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.6941,
"step": 179
},
{
"epoch": 0.024271844660194174,
"grad_norm": 0.5295723676681519,
"learning_rate": 6.0001284688802226e-05,
"loss": 0.6675,
"step": 180
},
{
"epoch": 0.024406688241639697,
"grad_norm": 0.48933303356170654,
"learning_rate": 5.960633586768543e-05,
"loss": 0.6173,
"step": 181
},
{
"epoch": 0.02454153182308522,
"grad_norm": 0.5500467419624329,
"learning_rate": 5.921076370520058e-05,
"loss": 0.5046,
"step": 182
},
{
"epoch": 0.024676375404530743,
"grad_norm": 0.4575514793395996,
"learning_rate": 5.8814593869458455e-05,
"loss": 0.5618,
"step": 183
},
{
"epoch": 0.02481121898597627,
"grad_norm": 0.5372133851051331,
"learning_rate": 5.841785206735192e-05,
"loss": 0.5611,
"step": 184
},
{
"epoch": 0.02494606256742179,
"grad_norm": 0.5721400380134583,
"learning_rate": 5.8020564042888015e-05,
"loss": 0.5575,
"step": 185
},
{
"epoch": 0.025080906148867314,
"grad_norm": 0.5290271043777466,
"learning_rate": 5.762275557551727e-05,
"loss": 0.5946,
"step": 186
},
{
"epoch": 0.025215749730312837,
"grad_norm": 0.5718610286712646,
"learning_rate": 5.7224452478461064e-05,
"loss": 0.5754,
"step": 187
},
{
"epoch": 0.02535059331175836,
"grad_norm": 0.5636497735977173,
"learning_rate": 5.682568059703659e-05,
"loss": 0.5843,
"step": 188
},
{
"epoch": 0.025485436893203883,
"grad_norm": 0.6710667014122009,
"learning_rate": 5.642646580697973e-05,
"loss": 0.6845,
"step": 189
},
{
"epoch": 0.025620280474649405,
"grad_norm": 0.6817132830619812,
"learning_rate": 5.602683401276615e-05,
"loss": 0.6424,
"step": 190
},
{
"epoch": 0.02575512405609493,
"grad_norm": 0.6103585362434387,
"learning_rate": 5.562681114593028e-05,
"loss": 0.6427,
"step": 191
},
{
"epoch": 0.025889967637540454,
"grad_norm": 0.6302605271339417,
"learning_rate": 5.522642316338268e-05,
"loss": 0.5822,
"step": 192
},
{
"epoch": 0.026024811218985977,
"grad_norm": 0.7038308382034302,
"learning_rate": 5.482569604572576e-05,
"loss": 0.6185,
"step": 193
},
{
"epoch": 0.0261596548004315,
"grad_norm": 0.8374555110931396,
"learning_rate": 5.442465579556793e-05,
"loss": 0.597,
"step": 194
},
{
"epoch": 0.026294498381877023,
"grad_norm": 1.0388689041137695,
"learning_rate": 5.402332843583631e-05,
"loss": 0.7494,
"step": 195
},
{
"epoch": 0.026429341963322545,
"grad_norm": 0.9474101066589355,
"learning_rate": 5.3621740008088126e-05,
"loss": 0.7096,
"step": 196
},
{
"epoch": 0.026564185544768068,
"grad_norm": 1.096104383468628,
"learning_rate": 5.321991657082097e-05,
"loss": 0.7066,
"step": 197
},
{
"epoch": 0.02669902912621359,
"grad_norm": 1.1308999061584473,
"learning_rate": 5.281788419778187e-05,
"loss": 0.826,
"step": 198
},
{
"epoch": 0.026833872707659117,
"grad_norm": 1.0808179378509521,
"learning_rate": 5.2415668976275355e-05,
"loss": 0.6728,
"step": 199
},
{
"epoch": 0.02696871628910464,
"grad_norm": 1.3771480321884155,
"learning_rate": 5.201329700547076e-05,
"loss": 0.5769,
"step": 200
},
{
"epoch": 0.02696871628910464,
"eval_loss": 0.6482462286949158,
"eval_runtime": 807.9693,
"eval_samples_per_second": 15.459,
"eval_steps_per_second": 3.865,
"step": 200
},
{
"epoch": 0.027103559870550162,
"grad_norm": 0.2528764307498932,
"learning_rate": 5.161079439470866e-05,
"loss": 0.5304,
"step": 201
},
{
"epoch": 0.027238403451995685,
"grad_norm": 0.30551937222480774,
"learning_rate": 5.1208187261806615e-05,
"loss": 0.7367,
"step": 202
},
{
"epoch": 0.027373247033441208,
"grad_norm": 0.3601093292236328,
"learning_rate": 5.080550173136457e-05,
"loss": 0.9904,
"step": 203
},
{
"epoch": 0.02750809061488673,
"grad_norm": 0.39706265926361084,
"learning_rate": 5.0402763933069496e-05,
"loss": 0.8471,
"step": 204
},
{
"epoch": 0.027642934196332253,
"grad_norm": 0.3666420876979828,
"learning_rate": 5e-05,
"loss": 0.786,
"step": 205
},
{
"epoch": 0.027777777777777776,
"grad_norm": 0.46205174922943115,
"learning_rate": 4.9597236066930516e-05,
"loss": 0.8983,
"step": 206
},
{
"epoch": 0.027912621359223302,
"grad_norm": 0.49537229537963867,
"learning_rate": 4.919449826863544e-05,
"loss": 0.6641,
"step": 207
},
{
"epoch": 0.028047464940668825,
"grad_norm": 0.41916921734809875,
"learning_rate": 4.87918127381934e-05,
"loss": 0.8595,
"step": 208
},
{
"epoch": 0.028182308522114348,
"grad_norm": 0.5146481394767761,
"learning_rate": 4.8389205605291365e-05,
"loss": 0.542,
"step": 209
},
{
"epoch": 0.02831715210355987,
"grad_norm": 0.48146557807922363,
"learning_rate": 4.798670299452926e-05,
"loss": 0.7741,
"step": 210
},
{
"epoch": 0.028451995685005393,
"grad_norm": 0.4752905070781708,
"learning_rate": 4.758433102372466e-05,
"loss": 0.7346,
"step": 211
},
{
"epoch": 0.028586839266450916,
"grad_norm": 0.4842675030231476,
"learning_rate": 4.7182115802218126e-05,
"loss": 0.6964,
"step": 212
},
{
"epoch": 0.02872168284789644,
"grad_norm": 0.5016252994537354,
"learning_rate": 4.678008342917903e-05,
"loss": 0.6597,
"step": 213
},
{
"epoch": 0.02885652642934196,
"grad_norm": 0.448872834444046,
"learning_rate": 4.6378259991911886e-05,
"loss": 0.8068,
"step": 214
},
{
"epoch": 0.028991370010787488,
"grad_norm": 0.5060707926750183,
"learning_rate": 4.597667156416371e-05,
"loss": 0.6479,
"step": 215
},
{
"epoch": 0.02912621359223301,
"grad_norm": 0.4413240849971771,
"learning_rate": 4.5575344204432084e-05,
"loss": 0.624,
"step": 216
},
{
"epoch": 0.029261057173678533,
"grad_norm": 0.35683268308639526,
"learning_rate": 4.5174303954274244e-05,
"loss": 0.5424,
"step": 217
},
{
"epoch": 0.029395900755124056,
"grad_norm": 0.4228988587856293,
"learning_rate": 4.477357683661734e-05,
"loss": 0.5449,
"step": 218
},
{
"epoch": 0.02953074433656958,
"grad_norm": 0.47619080543518066,
"learning_rate": 4.437318885406973e-05,
"loss": 0.6661,
"step": 219
},
{
"epoch": 0.0296655879180151,
"grad_norm": 0.4964873194694519,
"learning_rate": 4.397316598723385e-05,
"loss": 0.5335,
"step": 220
},
{
"epoch": 0.029800431499460624,
"grad_norm": 0.4040234386920929,
"learning_rate": 4.3573534193020274e-05,
"loss": 0.6066,
"step": 221
},
{
"epoch": 0.029935275080906147,
"grad_norm": 0.404412180185318,
"learning_rate": 4.317431940296343e-05,
"loss": 0.6218,
"step": 222
},
{
"epoch": 0.030070118662351673,
"grad_norm": 0.4424015283584595,
"learning_rate": 4.277554752153895e-05,
"loss": 0.6119,
"step": 223
},
{
"epoch": 0.030204962243797196,
"grad_norm": 0.461986780166626,
"learning_rate": 4.237724442448273e-05,
"loss": 0.6169,
"step": 224
},
{
"epoch": 0.03033980582524272,
"grad_norm": 0.6273308992385864,
"learning_rate": 4.197943595711198e-05,
"loss": 0.7169,
"step": 225
},
{
"epoch": 0.03047464940668824,
"grad_norm": 0.48244282603263855,
"learning_rate": 4.1582147932648074e-05,
"loss": 0.7226,
"step": 226
},
{
"epoch": 0.030609492988133764,
"grad_norm": 0.5722710490226746,
"learning_rate": 4.118540613054156e-05,
"loss": 0.6891,
"step": 227
},
{
"epoch": 0.030744336569579287,
"grad_norm": 0.5136760473251343,
"learning_rate": 4.078923629479943e-05,
"loss": 0.4545,
"step": 228
},
{
"epoch": 0.03087918015102481,
"grad_norm": 0.4787845015525818,
"learning_rate": 4.039366413231458e-05,
"loss": 0.6042,
"step": 229
},
{
"epoch": 0.031014023732470336,
"grad_norm": 0.43043750524520874,
"learning_rate": 3.9998715311197785e-05,
"loss": 0.5761,
"step": 230
},
{
"epoch": 0.03114886731391586,
"grad_norm": 0.4996248483657837,
"learning_rate": 3.960441545911204e-05,
"loss": 0.4823,
"step": 231
},
{
"epoch": 0.03128371089536138,
"grad_norm": 0.5408189296722412,
"learning_rate": 3.92107901616097e-05,
"loss": 0.5458,
"step": 232
},
{
"epoch": 0.0314185544768069,
"grad_norm": 0.5118104219436646,
"learning_rate": 3.8817864960472236e-05,
"loss": 0.5905,
"step": 233
},
{
"epoch": 0.03155339805825243,
"grad_norm": 0.5481888651847839,
"learning_rate": 3.842566535205286e-05,
"loss": 0.5795,
"step": 234
},
{
"epoch": 0.03168824163969795,
"grad_norm": 0.5185476541519165,
"learning_rate": 3.803421678562213e-05,
"loss": 0.4887,
"step": 235
},
{
"epoch": 0.03182308522114347,
"grad_norm": 0.5122816562652588,
"learning_rate": 3.764354466171652e-05,
"loss": 0.5843,
"step": 236
},
{
"epoch": 0.031957928802589,
"grad_norm": 0.5696781277656555,
"learning_rate": 3.725367433049033e-05,
"loss": 0.6483,
"step": 237
},
{
"epoch": 0.03209277238403452,
"grad_norm": 0.5032668113708496,
"learning_rate": 3.6864631090070655e-05,
"loss": 0.4443,
"step": 238
},
{
"epoch": 0.032227615965480044,
"grad_norm": 0.5390294194221497,
"learning_rate": 3.6476440184915815e-05,
"loss": 0.6608,
"step": 239
},
{
"epoch": 0.032362459546925564,
"grad_norm": 0.5902894735336304,
"learning_rate": 3.608912680417737e-05,
"loss": 0.5865,
"step": 240
},
{
"epoch": 0.03249730312837109,
"grad_norm": 0.6053788065910339,
"learning_rate": 3.570271608006555e-05,
"loss": 0.5234,
"step": 241
},
{
"epoch": 0.032632146709816616,
"grad_norm": 0.5734366178512573,
"learning_rate": 3.531723308621847e-05,
"loss": 0.4917,
"step": 242
},
{
"epoch": 0.032766990291262135,
"grad_norm": 0.6243178248405457,
"learning_rate": 3.493270283607522e-05,
"loss": 0.5238,
"step": 243
},
{
"epoch": 0.03290183387270766,
"grad_norm": 0.7085187435150146,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.6328,
"step": 244
},
{
"epoch": 0.03303667745415318,
"grad_norm": 0.7865402102470398,
"learning_rate": 3.4166600309926387e-05,
"loss": 0.6454,
"step": 245
},
{
"epoch": 0.03317152103559871,
"grad_norm": 1.091198444366455,
"learning_rate": 3.3785077745215873e-05,
"loss": 0.7051,
"step": 246
},
{
"epoch": 0.033306364617044226,
"grad_norm": 0.9540070295333862,
"learning_rate": 3.340460734357359e-05,
"loss": 0.6448,
"step": 247
},
{
"epoch": 0.03344120819848975,
"grad_norm": 0.8281885385513306,
"learning_rate": 3.3025213793178646e-05,
"loss": 0.5543,
"step": 248
},
{
"epoch": 0.03357605177993527,
"grad_norm": 1.2517954111099243,
"learning_rate": 3.264692171233485e-05,
"loss": 0.8302,
"step": 249
},
{
"epoch": 0.0337108953613808,
"grad_norm": 1.3050953149795532,
"learning_rate": 3.226975564787322e-05,
"loss": 0.775,
"step": 250
},
{
"epoch": 0.0337108953613808,
"eval_loss": 0.6331894993782043,
"eval_runtime": 807.9179,
"eval_samples_per_second": 15.459,
"eval_steps_per_second": 3.865,
"step": 250
},
{
"epoch": 0.033845738942826324,
"grad_norm": 0.35031041502952576,
"learning_rate": 3.189374007355917e-05,
"loss": 0.8079,
"step": 251
},
{
"epoch": 0.03398058252427184,
"grad_norm": 0.3351590037345886,
"learning_rate": 3.151889938850445e-05,
"loss": 0.844,
"step": 252
},
{
"epoch": 0.03411542610571737,
"grad_norm": 0.37182357907295227,
"learning_rate": 3.114525791558398e-05,
"loss": 0.8581,
"step": 253
},
{
"epoch": 0.03425026968716289,
"grad_norm": 0.408346027135849,
"learning_rate": 3.0772839899857464e-05,
"loss": 0.9285,
"step": 254
},
{
"epoch": 0.034385113268608415,
"grad_norm": 0.40782642364501953,
"learning_rate": 3.0401669506996256e-05,
"loss": 0.8439,
"step": 255
},
{
"epoch": 0.034519956850053934,
"grad_norm": 0.43858733773231506,
"learning_rate": 3.003177082171523e-05,
"loss": 0.7533,
"step": 256
},
{
"epoch": 0.03465480043149946,
"grad_norm": 0.46821948885917664,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.8943,
"step": 257
},
{
"epoch": 0.03478964401294499,
"grad_norm": 0.4441760182380676,
"learning_rate": 2.9295884498599414e-05,
"loss": 0.5959,
"step": 258
},
{
"epoch": 0.034924487594390506,
"grad_norm": 0.6823344230651855,
"learning_rate": 2.8929944611373554e-05,
"loss": 0.639,
"step": 259
},
{
"epoch": 0.03505933117583603,
"grad_norm": 0.47621798515319824,
"learning_rate": 2.8565371929847284e-05,
"loss": 0.8371,
"step": 260
},
{
"epoch": 0.03519417475728155,
"grad_norm": 0.4548802077770233,
"learning_rate": 2.8202190110619493e-05,
"loss": 0.6628,
"step": 261
},
{
"epoch": 0.03532901833872708,
"grad_norm": 0.41249215602874756,
"learning_rate": 2.784042272003794e-05,
"loss": 0.6864,
"step": 262
},
{
"epoch": 0.0354638619201726,
"grad_norm": 0.45347633957862854,
"learning_rate": 2.7480093232670158e-05,
"loss": 0.5434,
"step": 263
},
{
"epoch": 0.03559870550161812,
"grad_norm": 0.4524693489074707,
"learning_rate": 2.712122502978024e-05,
"loss": 0.7259,
"step": 264
},
{
"epoch": 0.03573354908306365,
"grad_norm": 0.47238361835479736,
"learning_rate": 2.6763841397811573e-05,
"loss": 0.5781,
"step": 265
},
{
"epoch": 0.03586839266450917,
"grad_norm": 0.4329605996608734,
"learning_rate": 2.64079655268759e-05,
"loss": 0.6225,
"step": 266
},
{
"epoch": 0.036003236245954695,
"grad_norm": 0.3808312714099884,
"learning_rate": 2.605362050924848e-05,
"loss": 0.6212,
"step": 267
},
{
"epoch": 0.036138079827400214,
"grad_norm": 0.4135022461414337,
"learning_rate": 2.57008293378697e-05,
"loss": 0.5453,
"step": 268
},
{
"epoch": 0.03627292340884574,
"grad_norm": 0.43883487582206726,
"learning_rate": 2.534961490485313e-05,
"loss": 0.7092,
"step": 269
},
{
"epoch": 0.03640776699029126,
"grad_norm": 0.4413522183895111,
"learning_rate": 2.500000000000001e-05,
"loss": 0.5233,
"step": 270
},
{
"epoch": 0.036542610571736786,
"grad_norm": 0.44812479615211487,
"learning_rate": 2.4652007309320498e-05,
"loss": 0.5898,
"step": 271
},
{
"epoch": 0.036677454153182305,
"grad_norm": 0.44522127509117126,
"learning_rate": 2.430565941356157e-05,
"loss": 0.7672,
"step": 272
},
{
"epoch": 0.03681229773462783,
"grad_norm": 0.4575137197971344,
"learning_rate": 2.3960978786741877e-05,
"loss": 0.603,
"step": 273
},
{
"epoch": 0.03694714131607336,
"grad_norm": 0.4211556017398834,
"learning_rate": 2.361798779469336e-05,
"loss": 0.5092,
"step": 274
},
{
"epoch": 0.03708198489751888,
"grad_norm": 0.44828882813453674,
"learning_rate": 2.3276708693609943e-05,
"loss": 0.6189,
"step": 275
},
{
"epoch": 0.0372168284789644,
"grad_norm": 0.4255499541759491,
"learning_rate": 2.2937163628603435e-05,
"loss": 0.4853,
"step": 276
},
{
"epoch": 0.03735167206040992,
"grad_norm": 0.4299464225769043,
"learning_rate": 2.259937463226651e-05,
"loss": 0.5207,
"step": 277
},
{
"epoch": 0.03748651564185545,
"grad_norm": 0.480823814868927,
"learning_rate": 2.2263363623243054e-05,
"loss": 0.7003,
"step": 278
},
{
"epoch": 0.03762135922330097,
"grad_norm": 0.4868784248828888,
"learning_rate": 2.192915240480596e-05,
"loss": 0.5079,
"step": 279
},
{
"epoch": 0.037756202804746494,
"grad_norm": 0.47069790959358215,
"learning_rate": 2.1596762663442218e-05,
"loss": 0.4925,
"step": 280
},
{
"epoch": 0.03789104638619202,
"grad_norm": 0.5218607783317566,
"learning_rate": 2.1266215967445824e-05,
"loss": 0.7331,
"step": 281
},
{
"epoch": 0.03802588996763754,
"grad_norm": 0.4956890344619751,
"learning_rate": 2.0937533765518187e-05,
"loss": 0.6735,
"step": 282
},
{
"epoch": 0.038160733549083066,
"grad_norm": 0.5067938566207886,
"learning_rate": 2.061073738537635e-05,
"loss": 0.5038,
"step": 283
},
{
"epoch": 0.038295577130528585,
"grad_norm": 0.5209298133850098,
"learning_rate": 2.0285848032369137e-05,
"loss": 0.5229,
"step": 284
},
{
"epoch": 0.03843042071197411,
"grad_norm": 0.47033262252807617,
"learning_rate": 1.996288678810105e-05,
"loss": 0.5264,
"step": 285
},
{
"epoch": 0.03856526429341963,
"grad_norm": 0.526556134223938,
"learning_rate": 1.9641874609064443e-05,
"loss": 0.6005,
"step": 286
},
{
"epoch": 0.03870010787486516,
"grad_norm": 0.5509507060050964,
"learning_rate": 1.932283232527956e-05,
"loss": 0.509,
"step": 287
},
{
"epoch": 0.038834951456310676,
"grad_norm": 0.5332918763160706,
"learning_rate": 1.9005780638942982e-05,
"loss": 0.4613,
"step": 288
},
{
"epoch": 0.0389697950377562,
"grad_norm": 0.5600677728652954,
"learning_rate": 1.8690740123084316e-05,
"loss": 0.5382,
"step": 289
},
{
"epoch": 0.03910463861920173,
"grad_norm": 0.5525097250938416,
"learning_rate": 1.837773122023114e-05,
"loss": 0.5053,
"step": 290
},
{
"epoch": 0.03923948220064725,
"grad_norm": 0.6623300313949585,
"learning_rate": 1.8066774241082612e-05,
"loss": 0.5598,
"step": 291
},
{
"epoch": 0.039374325782092774,
"grad_norm": 0.5509719252586365,
"learning_rate": 1.7757889363191483e-05,
"loss": 0.4917,
"step": 292
},
{
"epoch": 0.03950916936353829,
"grad_norm": 0.7896840572357178,
"learning_rate": 1.745109662965481e-05,
"loss": 0.6469,
"step": 293
},
{
"epoch": 0.03964401294498382,
"grad_norm": 0.6374439001083374,
"learning_rate": 1.714641594781347e-05,
"loss": 0.6472,
"step": 294
},
{
"epoch": 0.03977885652642934,
"grad_norm": 0.6648433804512024,
"learning_rate": 1.684386708796025e-05,
"loss": 0.4498,
"step": 295
},
{
"epoch": 0.039913700107874865,
"grad_norm": 0.9021759033203125,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.655,
"step": 296
},
{
"epoch": 0.04004854368932039,
"grad_norm": 0.8277525305747986,
"learning_rate": 1.62452432224612e-05,
"loss": 0.672,
"step": 297
},
{
"epoch": 0.04018338727076591,
"grad_norm": 1.0603160858154297,
"learning_rate": 1.5949207060660138e-05,
"loss": 0.7682,
"step": 298
},
{
"epoch": 0.04031823085221144,
"grad_norm": 1.1702197790145874,
"learning_rate": 1.5655380406016235e-05,
"loss": 0.9829,
"step": 299
},
{
"epoch": 0.040453074433656956,
"grad_norm": 1.2975033521652222,
"learning_rate": 1.536378232452003e-05,
"loss": 0.7308,
"step": 300
},
{
"epoch": 0.040453074433656956,
"eval_loss": 0.6252959370613098,
"eval_runtime": 808.5673,
"eval_samples_per_second": 15.447,
"eval_steps_per_second": 3.862,
"step": 300
},
{
"epoch": 0.04058791801510248,
"grad_norm": 0.27319738268852234,
"learning_rate": 1.5074431737553157e-05,
"loss": 0.5275,
"step": 301
},
{
"epoch": 0.040722761596548,
"grad_norm": 0.27846696972846985,
"learning_rate": 1.4787347420660541e-05,
"loss": 0.875,
"step": 302
},
{
"epoch": 0.04085760517799353,
"grad_norm": 0.31747734546661377,
"learning_rate": 1.4502548002332088e-05,
"loss": 0.8743,
"step": 303
},
{
"epoch": 0.040992448759439054,
"grad_norm": 0.4086015820503235,
"learning_rate": 1.422005196279395e-05,
"loss": 0.8437,
"step": 304
},
{
"epoch": 0.04112729234088457,
"grad_norm": 0.42003804445266724,
"learning_rate": 1.3939877632809278e-05,
"loss": 0.8198,
"step": 305
},
{
"epoch": 0.0412621359223301,
"grad_norm": 0.41218090057373047,
"learning_rate": 1.3662043192488849e-05,
"loss": 0.7197,
"step": 306
},
{
"epoch": 0.04139697950377562,
"grad_norm": 0.42850056290626526,
"learning_rate": 1.338656667011134e-05,
"loss": 0.6244,
"step": 307
},
{
"epoch": 0.041531823085221145,
"grad_norm": 0.40615183115005493,
"learning_rate": 1.3113465940953495e-05,
"loss": 0.7325,
"step": 308
},
{
"epoch": 0.041666666666666664,
"grad_norm": 0.39038151502609253,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.6878,
"step": 309
},
{
"epoch": 0.04180151024811219,
"grad_norm": 0.5736265778541565,
"learning_rate": 1.257446259144494e-05,
"loss": 0.6428,
"step": 310
},
{
"epoch": 0.04193635382955771,
"grad_norm": 0.6234601140022278,
"learning_rate": 1.2308594946249163e-05,
"loss": 0.6945,
"step": 311
},
{
"epoch": 0.042071197411003236,
"grad_norm": 0.5283477306365967,
"learning_rate": 1.204517304231343e-05,
"loss": 0.7922,
"step": 312
},
{
"epoch": 0.04220604099244876,
"grad_norm": 0.3996746838092804,
"learning_rate": 1.178421397270758e-05,
"loss": 0.7166,
"step": 313
},
{
"epoch": 0.04234088457389428,
"grad_norm": 0.4354736804962158,
"learning_rate": 1.1525734670691701e-05,
"loss": 0.6033,
"step": 314
},
{
"epoch": 0.04247572815533981,
"grad_norm": 0.3996833562850952,
"learning_rate": 1.1269751908617277e-05,
"loss": 0.5985,
"step": 315
},
{
"epoch": 0.04261057173678533,
"grad_norm": 0.4767645001411438,
"learning_rate": 1.1016282296838887e-05,
"loss": 0.6651,
"step": 316
},
{
"epoch": 0.04274541531823085,
"grad_norm": 0.3924247920513153,
"learning_rate": 1.0765342282636416e-05,
"loss": 0.525,
"step": 317
},
{
"epoch": 0.04288025889967637,
"grad_norm": 0.42607831954956055,
"learning_rate": 1.0516948149147754e-05,
"loss": 0.6906,
"step": 318
},
{
"epoch": 0.0430151024811219,
"grad_norm": 0.4339483082294464,
"learning_rate": 1.0271116014312293e-05,
"loss": 0.639,
"step": 319
},
{
"epoch": 0.043149946062567425,
"grad_norm": 0.43249940872192383,
"learning_rate": 1.0027861829824952e-05,
"loss": 0.5653,
"step": 320
},
{
"epoch": 0.043284789644012944,
"grad_norm": 0.4711824953556061,
"learning_rate": 9.787201380101157e-06,
"loss": 0.5899,
"step": 321
},
{
"epoch": 0.04341963322545847,
"grad_norm": 0.44571396708488464,
"learning_rate": 9.549150281252633e-06,
"loss": 0.5738,
"step": 322
},
{
"epoch": 0.04355447680690399,
"grad_norm": 0.5030795335769653,
"learning_rate": 9.313723980074018e-06,
"loss": 0.6172,
"step": 323
},
{
"epoch": 0.043689320388349516,
"grad_norm": 0.45189574360847473,
"learning_rate": 9.080937753040646e-06,
"loss": 0.6094,
"step": 324
},
{
"epoch": 0.043824163969795035,
"grad_norm": 0.427013635635376,
"learning_rate": 8.850806705317183e-06,
"loss": 0.625,
"step": 325
},
{
"epoch": 0.04395900755124056,
"grad_norm": 0.44683054089546204,
"learning_rate": 8.623345769777514e-06,
"loss": 0.5149,
"step": 326
},
{
"epoch": 0.04409385113268608,
"grad_norm": 0.455631285905838,
"learning_rate": 8.398569706035792e-06,
"loss": 0.596,
"step": 327
},
{
"epoch": 0.04422869471413161,
"grad_norm": 0.4964303970336914,
"learning_rate": 8.176493099488663e-06,
"loss": 0.6825,
"step": 328
},
{
"epoch": 0.04436353829557713,
"grad_norm": 0.45703086256980896,
"learning_rate": 7.957130360368898e-06,
"loss": 0.4597,
"step": 329
},
{
"epoch": 0.04449838187702265,
"grad_norm": 0.47953537106513977,
"learning_rate": 7.740495722810271e-06,
"loss": 0.5882,
"step": 330
},
{
"epoch": 0.04463322545846818,
"grad_norm": 0.4635942578315735,
"learning_rate": 7.526603243923957e-06,
"loss": 0.4344,
"step": 331
},
{
"epoch": 0.0447680690399137,
"grad_norm": 0.5266132354736328,
"learning_rate": 7.315466802886401e-06,
"loss": 0.4848,
"step": 332
},
{
"epoch": 0.044902912621359224,
"grad_norm": 0.4961995482444763,
"learning_rate": 7.107100100038671e-06,
"loss": 0.613,
"step": 333
},
{
"epoch": 0.04503775620280474,
"grad_norm": 0.4727863073348999,
"learning_rate": 6.901516655997536e-06,
"loss": 0.4806,
"step": 334
},
{
"epoch": 0.04517259978425027,
"grad_norm": 0.556632399559021,
"learning_rate": 6.698729810778065e-06,
"loss": 0.5254,
"step": 335
},
{
"epoch": 0.045307443365695796,
"grad_norm": 0.5376824736595154,
"learning_rate": 6.498752722928042e-06,
"loss": 0.6246,
"step": 336
},
{
"epoch": 0.045442286947141315,
"grad_norm": 0.5227162837982178,
"learning_rate": 6.301598368674105e-06,
"loss": 0.5084,
"step": 337
},
{
"epoch": 0.04557713052858684,
"grad_norm": 0.5096950531005859,
"learning_rate": 6.107279541079769e-06,
"loss": 0.5204,
"step": 338
},
{
"epoch": 0.04571197411003236,
"grad_norm": 0.6380806565284729,
"learning_rate": 5.915808849215304e-06,
"loss": 0.6499,
"step": 339
},
{
"epoch": 0.04584681769147789,
"grad_norm": 0.5523012280464172,
"learning_rate": 5.727198717339511e-06,
"loss": 0.4168,
"step": 340
},
{
"epoch": 0.045981661272923406,
"grad_norm": 0.5703758597373962,
"learning_rate": 5.54146138409355e-06,
"loss": 0.5355,
"step": 341
},
{
"epoch": 0.04611650485436893,
"grad_norm": 0.6606732606887817,
"learning_rate": 5.358608901706802e-06,
"loss": 0.6347,
"step": 342
},
{
"epoch": 0.04625134843581446,
"grad_norm": 0.6200899481773376,
"learning_rate": 5.178653135214812e-06,
"loss": 0.5202,
"step": 343
},
{
"epoch": 0.04638619201725998,
"grad_norm": 0.7857818603515625,
"learning_rate": 5.001605761689398e-06,
"loss": 0.5535,
"step": 344
},
{
"epoch": 0.046521035598705504,
"grad_norm": 0.6917232275009155,
"learning_rate": 4.827478269480895e-06,
"loss": 0.5836,
"step": 345
},
{
"epoch": 0.04665587918015102,
"grad_norm": 0.8985839486122131,
"learning_rate": 4.65628195747273e-06,
"loss": 0.6814,
"step": 346
},
{
"epoch": 0.04679072276159655,
"grad_norm": 0.9178153872489929,
"learning_rate": 4.488027934348271e-06,
"loss": 0.58,
"step": 347
},
{
"epoch": 0.04692556634304207,
"grad_norm": 1.1714760065078735,
"learning_rate": 4.322727117869951e-06,
"loss": 0.745,
"step": 348
},
{
"epoch": 0.047060409924487595,
"grad_norm": 1.2585656642913818,
"learning_rate": 4.16039023417088e-06,
"loss": 0.713,
"step": 349
},
{
"epoch": 0.047195253505933114,
"grad_norm": 1.3940469026565552,
"learning_rate": 4.001027817058789e-06,
"loss": 0.7642,
"step": 350
},
{
"epoch": 0.047195253505933114,
"eval_loss": 0.6220620274543762,
"eval_runtime": 808.8682,
"eval_samples_per_second": 15.441,
"eval_steps_per_second": 3.861,
"step": 350
},
{
"epoch": 0.04733009708737864,
"grad_norm": 0.23296354711055756,
"learning_rate": 3.844650207332562e-06,
"loss": 0.5557,
"step": 351
},
{
"epoch": 0.04746494066882417,
"grad_norm": 0.2704331576824188,
"learning_rate": 3.691267552111183e-06,
"loss": 0.8556,
"step": 352
},
{
"epoch": 0.047599784250269686,
"grad_norm": 0.32684770226478577,
"learning_rate": 3.54088980417534e-06,
"loss": 0.8739,
"step": 353
},
{
"epoch": 0.04773462783171521,
"grad_norm": 0.3810671269893646,
"learning_rate": 3.393526721321616e-06,
"loss": 0.6595,
"step": 354
},
{
"epoch": 0.04786947141316073,
"grad_norm": 0.3469758927822113,
"learning_rate": 3.249187865729264e-06,
"loss": 0.617,
"step": 355
},
{
"epoch": 0.04800431499460626,
"grad_norm": 0.3857620358467102,
"learning_rate": 3.1078826033397843e-06,
"loss": 0.8274,
"step": 356
},
{
"epoch": 0.04813915857605178,
"grad_norm": 0.45720577239990234,
"learning_rate": 2.9696201032491434e-06,
"loss": 0.6145,
"step": 357
},
{
"epoch": 0.0482740021574973,
"grad_norm": 0.4756413996219635,
"learning_rate": 2.8344093371128424e-06,
"loss": 0.8195,
"step": 358
},
{
"epoch": 0.04840884573894283,
"grad_norm": 0.5415727496147156,
"learning_rate": 2.70225907856374e-06,
"loss": 0.7706,
"step": 359
},
{
"epoch": 0.04854368932038835,
"grad_norm": 0.5260385274887085,
"learning_rate": 2.573177902642726e-06,
"loss": 0.7806,
"step": 360
},
{
"epoch": 0.048678532901833875,
"grad_norm": 0.5152238011360168,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.6455,
"step": 361
},
{
"epoch": 0.048813376483279394,
"grad_norm": 0.4815971255302429,
"learning_rate": 2.324256102563188e-06,
"loss": 0.8413,
"step": 362
},
{
"epoch": 0.04894822006472492,
"grad_norm": 0.5217490792274475,
"learning_rate": 2.204431630583548e-06,
"loss": 0.7371,
"step": 363
},
{
"epoch": 0.04908306364617044,
"grad_norm": 0.40579986572265625,
"learning_rate": 2.087708544541689e-06,
"loss": 0.7718,
"step": 364
},
{
"epoch": 0.049217907227615966,
"grad_norm": 0.441269189119339,
"learning_rate": 1.974094418431388e-06,
"loss": 0.638,
"step": 365
},
{
"epoch": 0.049352750809061485,
"grad_norm": 0.43731170892715454,
"learning_rate": 1.8635966245104664e-06,
"loss": 0.7073,
"step": 366
},
{
"epoch": 0.04948759439050701,
"grad_norm": 0.42750218510627747,
"learning_rate": 1.7562223328224325e-06,
"loss": 0.7956,
"step": 367
},
{
"epoch": 0.04962243797195254,
"grad_norm": 0.4149448871612549,
"learning_rate": 1.6519785107311891e-06,
"loss": 0.5633,
"step": 368
},
{
"epoch": 0.04975728155339806,
"grad_norm": 0.40429598093032837,
"learning_rate": 1.5508719224689717e-06,
"loss": 0.717,
"step": 369
},
{
"epoch": 0.04989212513484358,
"grad_norm": 0.43707722425460815,
"learning_rate": 1.4529091286973995e-06,
"loss": 0.6377,
"step": 370
},
{
"epoch": 0.0500269687162891,
"grad_norm": 0.4193248748779297,
"learning_rate": 1.358096486081778e-06,
"loss": 0.6427,
"step": 371
},
{
"epoch": 0.05016181229773463,
"grad_norm": 0.41846606135368347,
"learning_rate": 1.2664401468786114e-06,
"loss": 0.6396,
"step": 372
},
{
"epoch": 0.05029665587918015,
"grad_norm": 0.4295959770679474,
"learning_rate": 1.1779460585363944e-06,
"loss": 0.6024,
"step": 373
},
{
"epoch": 0.050431499460625674,
"grad_norm": 0.5195227265357971,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.5379,
"step": 374
},
{
"epoch": 0.0505663430420712,
"grad_norm": 0.3819423317909241,
"learning_rate": 1.0104673978866164e-06,
"loss": 0.5306,
"step": 375
},
{
"epoch": 0.05070118662351672,
"grad_norm": 0.4182847738265991,
"learning_rate": 9.314936930293283e-07,
"loss": 0.4756,
"step": 376
},
{
"epoch": 0.050836030204962246,
"grad_norm": 0.4350426495075226,
"learning_rate": 8.557039732283944e-07,
"loss": 0.514,
"step": 377
},
{
"epoch": 0.050970873786407765,
"grad_norm": 0.45626920461654663,
"learning_rate": 7.83103156370113e-07,
"loss": 0.5751,
"step": 378
},
{
"epoch": 0.05110571736785329,
"grad_norm": 0.44910773634910583,
"learning_rate": 7.136959534174592e-07,
"loss": 0.4992,
"step": 379
},
{
"epoch": 0.05124056094929881,
"grad_norm": 0.4697156846523285,
"learning_rate": 6.474868681043578e-07,
"loss": 0.5868,
"step": 380
},
{
"epoch": 0.05137540453074434,
"grad_norm": 0.5265816450119019,
"learning_rate": 5.844801966434832e-07,
"loss": 0.7519,
"step": 381
},
{
"epoch": 0.05151024811218986,
"grad_norm": 0.5154181122779846,
"learning_rate": 5.246800274474439e-07,
"loss": 0.6139,
"step": 382
},
{
"epoch": 0.05164509169363538,
"grad_norm": 0.48455241322517395,
"learning_rate": 4.680902408635335e-07,
"loss": 0.4926,
"step": 383
},
{
"epoch": 0.05177993527508091,
"grad_norm": 0.5290249586105347,
"learning_rate": 4.1471450892189846e-07,
"loss": 0.6908,
"step": 384
},
{
"epoch": 0.05191477885652643,
"grad_norm": 0.5313962697982788,
"learning_rate": 3.6455629509730136e-07,
"loss": 0.6272,
"step": 385
},
{
"epoch": 0.052049622437971954,
"grad_norm": 0.511638879776001,
"learning_rate": 3.1761885408435054e-07,
"loss": 0.4756,
"step": 386
},
{
"epoch": 0.05218446601941747,
"grad_norm": 0.5756959915161133,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.5613,
"step": 387
},
{
"epoch": 0.052319309600863,
"grad_norm": 0.5868178606033325,
"learning_rate": 2.334182641175686e-07,
"loss": 0.5253,
"step": 388
},
{
"epoch": 0.05245415318230852,
"grad_norm": 0.520788848400116,
"learning_rate": 1.9616057881935436e-07,
"loss": 0.4795,
"step": 389
},
{
"epoch": 0.052588996763754045,
"grad_norm": 0.5664001703262329,
"learning_rate": 1.6213459328950352e-07,
"loss": 0.5453,
"step": 390
},
{
"epoch": 0.05272384034519957,
"grad_norm": 0.5474855899810791,
"learning_rate": 1.3134251542544774e-07,
"loss": 0.5063,
"step": 391
},
{
"epoch": 0.05285868392664509,
"grad_norm": 0.6837330460548401,
"learning_rate": 1.0378634328099269e-07,
"loss": 0.6015,
"step": 392
},
{
"epoch": 0.05299352750809062,
"grad_norm": 0.6691194772720337,
"learning_rate": 7.946786493666647e-08,
"loss": 0.5717,
"step": 393
},
{
"epoch": 0.053128371089536136,
"grad_norm": 0.6826778054237366,
"learning_rate": 5.838865838366792e-08,
"loss": 0.5854,
"step": 394
},
{
"epoch": 0.05326321467098166,
"grad_norm": 0.9006599187850952,
"learning_rate": 4.055009142152067e-08,
"loss": 0.5394,
"step": 395
},
{
"epoch": 0.05339805825242718,
"grad_norm": 0.8636307716369629,
"learning_rate": 2.595332156925534e-08,
"loss": 0.6416,
"step": 396
},
{
"epoch": 0.05353290183387271,
"grad_norm": 0.8960134983062744,
"learning_rate": 1.4599295990352924e-08,
"loss": 0.7612,
"step": 397
},
{
"epoch": 0.053667745415318234,
"grad_norm": 0.9180140495300293,
"learning_rate": 6.488751431266149e-09,
"loss": 0.5951,
"step": 398
},
{
"epoch": 0.05380258899676375,
"grad_norm": 1.1765445470809937,
"learning_rate": 1.622214173602199e-09,
"loss": 0.7061,
"step": 399
},
{
"epoch": 0.05393743257820928,
"grad_norm": 1.1231061220169067,
"learning_rate": 0.0,
"loss": 0.7403,
"step": 400
},
{
"epoch": 0.05393743257820928,
"eval_loss": 0.615675151348114,
"eval_runtime": 808.2953,
"eval_samples_per_second": 15.452,
"eval_steps_per_second": 3.864,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.3047041162254746e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}