oodeh's picture
Add files using upload-large-folder tool
6f5926f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.89908256880734,
"eval_steps": 500,
"global_step": 324,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01834862385321101,
"grad_norm": 0.04378490149974823,
"learning_rate": 4.999989423013716e-05,
"loss": 0.6713,
"num_input_tokens_seen": 44136,
"step": 1
},
{
"epoch": 0.03669724770642202,
"grad_norm": 0.040646992623806,
"learning_rate": 4.999957692144361e-05,
"loss": 0.533,
"num_input_tokens_seen": 83096,
"step": 2
},
{
"epoch": 0.05504587155963303,
"grad_norm": 0.04658753052353859,
"learning_rate": 4.999904807660428e-05,
"loss": 0.6048,
"num_input_tokens_seen": 122112,
"step": 3
},
{
"epoch": 0.07339449541284404,
"grad_norm": 0.04322144016623497,
"learning_rate": 4.999830770009406e-05,
"loss": 0.4948,
"num_input_tokens_seen": 163064,
"step": 4
},
{
"epoch": 0.09174311926605505,
"grad_norm": 0.06536195427179337,
"learning_rate": 4.999735579817769e-05,
"loss": 0.6607,
"num_input_tokens_seen": 203808,
"step": 5
},
{
"epoch": 0.11009174311926606,
"grad_norm": 0.059904925525188446,
"learning_rate": 4.9996192378909786e-05,
"loss": 0.5802,
"num_input_tokens_seen": 241824,
"step": 6
},
{
"epoch": 0.12844036697247707,
"grad_norm": 0.19818365573883057,
"learning_rate": 4.999481745213471e-05,
"loss": 0.5148,
"num_input_tokens_seen": 287608,
"step": 7
},
{
"epoch": 0.14678899082568808,
"grad_norm": 0.05985472351312637,
"learning_rate": 4.9993231029486544e-05,
"loss": 0.5714,
"num_input_tokens_seen": 325320,
"step": 8
},
{
"epoch": 0.1651376146788991,
"grad_norm": 0.061375778168439865,
"learning_rate": 4.999143312438893e-05,
"loss": 0.6812,
"num_input_tokens_seen": 369848,
"step": 9
},
{
"epoch": 0.1834862385321101,
"grad_norm": 0.06196414306759834,
"learning_rate": 4.998942375205502e-05,
"loss": 0.5358,
"num_input_tokens_seen": 415104,
"step": 10
},
{
"epoch": 0.2018348623853211,
"grad_norm": 0.07861393690109253,
"learning_rate": 4.9987202929487275e-05,
"loss": 0.6527,
"num_input_tokens_seen": 467224,
"step": 11
},
{
"epoch": 0.22018348623853212,
"grad_norm": 0.05596446990966797,
"learning_rate": 4.99847706754774e-05,
"loss": 0.5354,
"num_input_tokens_seen": 502824,
"step": 12
},
{
"epoch": 0.23853211009174313,
"grad_norm": 0.05289844051003456,
"learning_rate": 4.998212701060612e-05,
"loss": 0.5263,
"num_input_tokens_seen": 544744,
"step": 13
},
{
"epoch": 0.25688073394495414,
"grad_norm": 0.04996591433882713,
"learning_rate": 4.997927195724303e-05,
"loss": 0.5536,
"num_input_tokens_seen": 591136,
"step": 14
},
{
"epoch": 0.27522935779816515,
"grad_norm": 0.05822828412055969,
"learning_rate": 4.997620553954645e-05,
"loss": 0.6106,
"num_input_tokens_seen": 629664,
"step": 15
},
{
"epoch": 0.29357798165137616,
"grad_norm": 0.06353770196437836,
"learning_rate": 4.997292778346312e-05,
"loss": 0.5129,
"num_input_tokens_seen": 663392,
"step": 16
},
{
"epoch": 0.3119266055045872,
"grad_norm": 0.07256966829299927,
"learning_rate": 4.996943871672807e-05,
"loss": 0.6377,
"num_input_tokens_seen": 698360,
"step": 17
},
{
"epoch": 0.3302752293577982,
"grad_norm": 0.055458713322877884,
"learning_rate": 4.996573836886435e-05,
"loss": 0.4083,
"num_input_tokens_seen": 737520,
"step": 18
},
{
"epoch": 0.3486238532110092,
"grad_norm": 0.07792335003614426,
"learning_rate": 4.9961826771182784e-05,
"loss": 0.6086,
"num_input_tokens_seen": 768056,
"step": 19
},
{
"epoch": 0.3669724770642202,
"grad_norm": 0.06627275049686432,
"learning_rate": 4.995770395678171e-05,
"loss": 0.4591,
"num_input_tokens_seen": 806256,
"step": 20
},
{
"epoch": 0.3853211009174312,
"grad_norm": 0.05830290913581848,
"learning_rate": 4.9953369960546676e-05,
"loss": 0.3731,
"num_input_tokens_seen": 842336,
"step": 21
},
{
"epoch": 0.4036697247706422,
"grad_norm": 0.07277437299489975,
"learning_rate": 4.9948824819150185e-05,
"loss": 0.6243,
"num_input_tokens_seen": 876672,
"step": 22
},
{
"epoch": 0.42201834862385323,
"grad_norm": 0.07477546483278275,
"learning_rate": 4.994406857105136e-05,
"loss": 0.5788,
"num_input_tokens_seen": 915192,
"step": 23
},
{
"epoch": 0.44036697247706424,
"grad_norm": 0.06912907212972641,
"learning_rate": 4.993910125649561e-05,
"loss": 0.4753,
"num_input_tokens_seen": 951904,
"step": 24
},
{
"epoch": 0.45871559633027525,
"grad_norm": 0.0655476376414299,
"learning_rate": 4.993392291751431e-05,
"loss": 0.4518,
"num_input_tokens_seen": 1001816,
"step": 25
},
{
"epoch": 0.47706422018348627,
"grad_norm": 0.06466512382030487,
"learning_rate": 4.992853359792444e-05,
"loss": 0.5638,
"num_input_tokens_seen": 1053064,
"step": 26
},
{
"epoch": 0.4954128440366973,
"grad_norm": 0.0645688995718956,
"learning_rate": 4.99229333433282e-05,
"loss": 0.4644,
"num_input_tokens_seen": 1086688,
"step": 27
},
{
"epoch": 0.5137614678899083,
"grad_norm": 0.07181251049041748,
"learning_rate": 4.9917122201112656e-05,
"loss": 0.6191,
"num_input_tokens_seen": 1134824,
"step": 28
},
{
"epoch": 0.5321100917431193,
"grad_norm": 0.07322589308023453,
"learning_rate": 4.9911100220449293e-05,
"loss": 0.6752,
"num_input_tokens_seen": 1172072,
"step": 29
},
{
"epoch": 0.5504587155963303,
"grad_norm": 0.06396070122718811,
"learning_rate": 4.990486745229364e-05,
"loss": 0.3587,
"num_input_tokens_seen": 1211096,
"step": 30
},
{
"epoch": 0.5688073394495413,
"grad_norm": 0.07803395390510559,
"learning_rate": 4.989842394938482e-05,
"loss": 0.459,
"num_input_tokens_seen": 1259456,
"step": 31
},
{
"epoch": 0.5871559633027523,
"grad_norm": 0.05974648892879486,
"learning_rate": 4.989176976624511e-05,
"loss": 0.4148,
"num_input_tokens_seen": 1306944,
"step": 32
},
{
"epoch": 0.6055045871559633,
"grad_norm": 0.09784268587827682,
"learning_rate": 4.988490495917947e-05,
"loss": 0.539,
"num_input_tokens_seen": 1353744,
"step": 33
},
{
"epoch": 0.6238532110091743,
"grad_norm": 0.09906516224145889,
"learning_rate": 4.987782958627508e-05,
"loss": 0.5453,
"num_input_tokens_seen": 1394736,
"step": 34
},
{
"epoch": 0.6422018348623854,
"grad_norm": 0.08984062820672989,
"learning_rate": 4.987054370740083e-05,
"loss": 0.468,
"num_input_tokens_seen": 1442048,
"step": 35
},
{
"epoch": 0.6605504587155964,
"grad_norm": 0.08672655373811722,
"learning_rate": 4.9863047384206835e-05,
"loss": 0.4078,
"num_input_tokens_seen": 1478440,
"step": 36
},
{
"epoch": 0.6788990825688074,
"grad_norm": 0.1327345073223114,
"learning_rate": 4.9855340680123905e-05,
"loss": 0.5299,
"num_input_tokens_seen": 1525992,
"step": 37
},
{
"epoch": 0.6972477064220184,
"grad_norm": 0.09178602695465088,
"learning_rate": 4.9847423660363e-05,
"loss": 0.439,
"num_input_tokens_seen": 1555608,
"step": 38
},
{
"epoch": 0.7155963302752294,
"grad_norm": 0.09418320655822754,
"learning_rate": 4.983929639191469e-05,
"loss": 0.5337,
"num_input_tokens_seen": 1597392,
"step": 39
},
{
"epoch": 0.7339449541284404,
"grad_norm": 0.08294719457626343,
"learning_rate": 4.983095894354858e-05,
"loss": 0.4536,
"num_input_tokens_seen": 1649656,
"step": 40
},
{
"epoch": 0.7522935779816514,
"grad_norm": 0.09774205833673477,
"learning_rate": 4.982241138581273e-05,
"loss": 0.5221,
"num_input_tokens_seen": 1695952,
"step": 41
},
{
"epoch": 0.7706422018348624,
"grad_norm": 0.09319107979536057,
"learning_rate": 4.9813653791033057e-05,
"loss": 0.4279,
"num_input_tokens_seen": 1737224,
"step": 42
},
{
"epoch": 0.7889908256880734,
"grad_norm": 0.09561405330896378,
"learning_rate": 4.980468623331273e-05,
"loss": 0.5121,
"num_input_tokens_seen": 1772320,
"step": 43
},
{
"epoch": 0.8073394495412844,
"grad_norm": 0.08274025470018387,
"learning_rate": 4.979550878853154e-05,
"loss": 0.54,
"num_input_tokens_seen": 1823888,
"step": 44
},
{
"epoch": 0.8256880733944955,
"grad_norm": 0.08728913217782974,
"learning_rate": 4.9786121534345265e-05,
"loss": 0.4488,
"num_input_tokens_seen": 1872488,
"step": 45
},
{
"epoch": 0.8440366972477065,
"grad_norm": 0.0787016749382019,
"learning_rate": 4.9776524550184965e-05,
"loss": 0.4353,
"num_input_tokens_seen": 1924744,
"step": 46
},
{
"epoch": 0.8623853211009175,
"grad_norm": 0.10952188074588776,
"learning_rate": 4.97667179172564e-05,
"loss": 0.4784,
"num_input_tokens_seen": 1959936,
"step": 47
},
{
"epoch": 0.8807339449541285,
"grad_norm": 0.08525826781988144,
"learning_rate": 4.975670171853926e-05,
"loss": 0.3586,
"num_input_tokens_seen": 2003896,
"step": 48
},
{
"epoch": 0.8990825688073395,
"grad_norm": 0.10409987717866898,
"learning_rate": 4.9746476038786496e-05,
"loss": 0.4451,
"num_input_tokens_seen": 2047632,
"step": 49
},
{
"epoch": 0.9174311926605505,
"grad_norm": 0.0782993957400322,
"learning_rate": 4.973604096452361e-05,
"loss": 0.3591,
"num_input_tokens_seen": 2096928,
"step": 50
},
{
"epoch": 0.9357798165137615,
"grad_norm": 0.09829951077699661,
"learning_rate": 4.9725396584047925e-05,
"loss": 0.3415,
"num_input_tokens_seen": 2129536,
"step": 51
},
{
"epoch": 0.9541284403669725,
"grad_norm": 0.10606162995100021,
"learning_rate": 4.971454298742779e-05,
"loss": 0.3758,
"num_input_tokens_seen": 2169144,
"step": 52
},
{
"epoch": 0.9724770642201835,
"grad_norm": 0.09280356764793396,
"learning_rate": 4.97034802665019e-05,
"loss": 0.485,
"num_input_tokens_seen": 2207720,
"step": 53
},
{
"epoch": 0.9908256880733946,
"grad_norm": 0.11888203024864197,
"learning_rate": 4.9692208514878444e-05,
"loss": 0.3469,
"num_input_tokens_seen": 2236392,
"step": 54
},
{
"epoch": 1.0,
"grad_norm": 0.13222463428974152,
"learning_rate": 4.9680727827934354e-05,
"loss": 0.4284,
"num_input_tokens_seen": 2259088,
"step": 55
},
{
"epoch": 1.018348623853211,
"grad_norm": 0.10572745651006699,
"learning_rate": 4.966903830281449e-05,
"loss": 0.4186,
"num_input_tokens_seen": 2298496,
"step": 56
},
{
"epoch": 1.036697247706422,
"grad_norm": 0.11462350189685822,
"learning_rate": 4.965714003843079e-05,
"loss": 0.4696,
"num_input_tokens_seen": 2333016,
"step": 57
},
{
"epoch": 1.0550458715596331,
"grad_norm": 0.11215240508317947,
"learning_rate": 4.9645033135461494e-05,
"loss": 0.3905,
"num_input_tokens_seen": 2367992,
"step": 58
},
{
"epoch": 1.073394495412844,
"grad_norm": 0.0973561555147171,
"learning_rate": 4.963271769635024e-05,
"loss": 0.3588,
"num_input_tokens_seen": 2415328,
"step": 59
},
{
"epoch": 1.091743119266055,
"grad_norm": 0.10240709036588669,
"learning_rate": 4.962019382530521e-05,
"loss": 0.5532,
"num_input_tokens_seen": 2454792,
"step": 60
},
{
"epoch": 1.110091743119266,
"grad_norm": 0.0959337130188942,
"learning_rate": 4.9607461628298244e-05,
"loss": 0.331,
"num_input_tokens_seen": 2503072,
"step": 61
},
{
"epoch": 1.1284403669724772,
"grad_norm": 0.10228750854730606,
"learning_rate": 4.9594521213063974e-05,
"loss": 0.3728,
"num_input_tokens_seen": 2546960,
"step": 62
},
{
"epoch": 1.146788990825688,
"grad_norm": 0.09403488785028458,
"learning_rate": 4.958137268909887e-05,
"loss": 0.4695,
"num_input_tokens_seen": 2595432,
"step": 63
},
{
"epoch": 1.165137614678899,
"grad_norm": 0.11396344751119614,
"learning_rate": 4.9568016167660334e-05,
"loss": 0.3653,
"num_input_tokens_seen": 2633912,
"step": 64
},
{
"epoch": 1.18348623853211,
"grad_norm": 0.09487481415271759,
"learning_rate": 4.9554451761765766e-05,
"loss": 0.3498,
"num_input_tokens_seen": 2680792,
"step": 65
},
{
"epoch": 1.2018348623853212,
"grad_norm": 0.1249895691871643,
"learning_rate": 4.9540679586191605e-05,
"loss": 0.4053,
"num_input_tokens_seen": 2716584,
"step": 66
},
{
"epoch": 1.2201834862385321,
"grad_norm": 0.12268221378326416,
"learning_rate": 4.952669975747232e-05,
"loss": 0.4189,
"num_input_tokens_seen": 2757088,
"step": 67
},
{
"epoch": 1.238532110091743,
"grad_norm": 0.12126032263040543,
"learning_rate": 4.951251239389948e-05,
"loss": 0.4994,
"num_input_tokens_seen": 2795664,
"step": 68
},
{
"epoch": 1.2568807339449541,
"grad_norm": 0.1069057360291481,
"learning_rate": 4.949811761552074e-05,
"loss": 0.3275,
"num_input_tokens_seen": 2840936,
"step": 69
},
{
"epoch": 1.2752293577981653,
"grad_norm": 0.10893313586711884,
"learning_rate": 4.948351554413879e-05,
"loss": 0.4366,
"num_input_tokens_seen": 2886768,
"step": 70
},
{
"epoch": 1.2935779816513762,
"grad_norm": 0.12898756563663483,
"learning_rate": 4.9468706303310355e-05,
"loss": 0.3916,
"num_input_tokens_seen": 2919328,
"step": 71
},
{
"epoch": 1.311926605504587,
"grad_norm": 0.12405356019735336,
"learning_rate": 4.9453690018345144e-05,
"loss": 0.3249,
"num_input_tokens_seen": 2966744,
"step": 72
},
{
"epoch": 1.3302752293577982,
"grad_norm": 0.13137595355510712,
"learning_rate": 4.943846681630479e-05,
"loss": 0.3956,
"num_input_tokens_seen": 3007248,
"step": 73
},
{
"epoch": 1.3486238532110093,
"grad_norm": 0.13920250535011292,
"learning_rate": 4.942303682600178e-05,
"loss": 0.3956,
"num_input_tokens_seen": 3050960,
"step": 74
},
{
"epoch": 1.3669724770642202,
"grad_norm": 0.1255589872598648,
"learning_rate": 4.940740017799833e-05,
"loss": 0.3773,
"num_input_tokens_seen": 3088592,
"step": 75
},
{
"epoch": 1.385321100917431,
"grad_norm": 0.10222747176885605,
"learning_rate": 4.939155700460536e-05,
"loss": 0.4,
"num_input_tokens_seen": 3153520,
"step": 76
},
{
"epoch": 1.4036697247706422,
"grad_norm": 0.13205283880233765,
"learning_rate": 4.9375507439881266e-05,
"loss": 0.4343,
"num_input_tokens_seen": 3199272,
"step": 77
},
{
"epoch": 1.4220183486238533,
"grad_norm": 0.11005694419145584,
"learning_rate": 4.9359251619630886e-05,
"loss": 0.3881,
"num_input_tokens_seen": 3247128,
"step": 78
},
{
"epoch": 1.4403669724770642,
"grad_norm": 0.14799247682094574,
"learning_rate": 4.9342789681404275e-05,
"loss": 0.3972,
"num_input_tokens_seen": 3294192,
"step": 79
},
{
"epoch": 1.4587155963302751,
"grad_norm": 0.1279418021440506,
"learning_rate": 4.9326121764495596e-05,
"loss": 0.3438,
"num_input_tokens_seen": 3329736,
"step": 80
},
{
"epoch": 1.4770642201834863,
"grad_norm": 0.11036807298660278,
"learning_rate": 4.9309248009941914e-05,
"loss": 0.3189,
"num_input_tokens_seen": 3371376,
"step": 81
},
{
"epoch": 1.4954128440366974,
"grad_norm": 0.11855707317590714,
"learning_rate": 4.9292168560522014e-05,
"loss": 0.401,
"num_input_tokens_seen": 3412368,
"step": 82
},
{
"epoch": 1.5137614678899083,
"grad_norm": 0.13195356726646423,
"learning_rate": 4.9274883560755156e-05,
"loss": 0.4973,
"num_input_tokens_seen": 3455000,
"step": 83
},
{
"epoch": 1.5321100917431192,
"grad_norm": 0.1462787538766861,
"learning_rate": 4.925739315689991e-05,
"loss": 0.3768,
"num_input_tokens_seen": 3488960,
"step": 84
},
{
"epoch": 1.5504587155963303,
"grad_norm": 0.13765974342823029,
"learning_rate": 4.92396974969529e-05,
"loss": 0.2999,
"num_input_tokens_seen": 3521320,
"step": 85
},
{
"epoch": 1.5688073394495414,
"grad_norm": 0.15276113152503967,
"learning_rate": 4.9221796730647516e-05,
"loss": 0.3638,
"num_input_tokens_seen": 3559464,
"step": 86
},
{
"epoch": 1.5871559633027523,
"grad_norm": 0.1441674381494522,
"learning_rate": 4.92036910094527e-05,
"loss": 0.3919,
"num_input_tokens_seen": 3598080,
"step": 87
},
{
"epoch": 1.6055045871559632,
"grad_norm": 0.1780252456665039,
"learning_rate": 4.9185380486571595e-05,
"loss": 0.3626,
"num_input_tokens_seen": 3630064,
"step": 88
},
{
"epoch": 1.6238532110091743,
"grad_norm": 0.16947726905345917,
"learning_rate": 4.916686531694035e-05,
"loss": 0.3439,
"num_input_tokens_seen": 3661408,
"step": 89
},
{
"epoch": 1.6422018348623855,
"grad_norm": 0.1552971601486206,
"learning_rate": 4.914814565722671e-05,
"loss": 0.3236,
"num_input_tokens_seen": 3695480,
"step": 90
},
{
"epoch": 1.6605504587155964,
"grad_norm": 0.14925938844680786,
"learning_rate": 4.912922166582874e-05,
"loss": 0.4255,
"num_input_tokens_seen": 3734560,
"step": 91
},
{
"epoch": 1.6788990825688073,
"grad_norm": 0.1332874596118927,
"learning_rate": 4.9110093502873476e-05,
"loss": 0.3061,
"num_input_tokens_seen": 3773112,
"step": 92
},
{
"epoch": 1.6972477064220184,
"grad_norm": 0.15471243858337402,
"learning_rate": 4.909076133021557e-05,
"loss": 0.3275,
"num_input_tokens_seen": 3813392,
"step": 93
},
{
"epoch": 1.7155963302752295,
"grad_norm": 0.16010524332523346,
"learning_rate": 4.907122531143594e-05,
"loss": 0.4179,
"num_input_tokens_seen": 3856416,
"step": 94
},
{
"epoch": 1.7339449541284404,
"grad_norm": 0.13423003256320953,
"learning_rate": 4.905148561184033e-05,
"loss": 0.3593,
"num_input_tokens_seen": 3899472,
"step": 95
},
{
"epoch": 1.7522935779816513,
"grad_norm": 0.14900773763656616,
"learning_rate": 4.9031542398457974e-05,
"loss": 0.5007,
"num_input_tokens_seen": 3962976,
"step": 96
},
{
"epoch": 1.7706422018348624,
"grad_norm": 0.15728624165058136,
"learning_rate": 4.9011395840040144e-05,
"loss": 0.3484,
"num_input_tokens_seen": 4000696,
"step": 97
},
{
"epoch": 1.7889908256880735,
"grad_norm": 0.11092367768287659,
"learning_rate": 4.8991046107058735e-05,
"loss": 0.2889,
"num_input_tokens_seen": 4045256,
"step": 98
},
{
"epoch": 1.8073394495412844,
"grad_norm": 0.1289113610982895,
"learning_rate": 4.8970493371704826e-05,
"loss": 0.2203,
"num_input_tokens_seen": 4076800,
"step": 99
},
{
"epoch": 1.8256880733944953,
"grad_norm": 0.18886639177799225,
"learning_rate": 4.894973780788722e-05,
"loss": 0.3966,
"num_input_tokens_seen": 4119840,
"step": 100
},
{
"epoch": 1.8440366972477065,
"grad_norm": 0.1563039869070053,
"learning_rate": 4.892877959123097e-05,
"loss": 0.4417,
"num_input_tokens_seen": 4165848,
"step": 101
},
{
"epoch": 1.8623853211009176,
"grad_norm": 0.16883380711078644,
"learning_rate": 4.890761889907589e-05,
"loss": 0.4258,
"num_input_tokens_seen": 4202824,
"step": 102
},
{
"epoch": 1.8807339449541285,
"grad_norm": 0.18241995573043823,
"learning_rate": 4.8886255910475054e-05,
"loss": 0.3952,
"num_input_tokens_seen": 4233888,
"step": 103
},
{
"epoch": 1.8990825688073394,
"grad_norm": 0.19913265109062195,
"learning_rate": 4.88646908061933e-05,
"loss": 0.3241,
"num_input_tokens_seen": 4267064,
"step": 104
},
{
"epoch": 1.9174311926605505,
"grad_norm": 0.18295545876026154,
"learning_rate": 4.884292376870567e-05,
"loss": 0.4239,
"num_input_tokens_seen": 4312536,
"step": 105
},
{
"epoch": 1.9357798165137616,
"grad_norm": 0.16657495498657227,
"learning_rate": 4.8820954982195905e-05,
"loss": 0.2579,
"num_input_tokens_seen": 4356656,
"step": 106
},
{
"epoch": 1.9541284403669725,
"grad_norm": 0.18504932522773743,
"learning_rate": 4.879878463255483e-05,
"loss": 0.44,
"num_input_tokens_seen": 4400216,
"step": 107
},
{
"epoch": 1.9724770642201834,
"grad_norm": 0.1923118382692337,
"learning_rate": 4.877641290737884e-05,
"loss": 0.2662,
"num_input_tokens_seen": 4436968,
"step": 108
},
{
"epoch": 1.9908256880733946,
"grad_norm": 0.19636788964271545,
"learning_rate": 4.875383999596828e-05,
"loss": 0.4211,
"num_input_tokens_seen": 4488232,
"step": 109
},
{
"epoch": 2.0,
"grad_norm": 0.3168099820613861,
"learning_rate": 4.873106608932585e-05,
"loss": 0.2499,
"num_input_tokens_seen": 4518176,
"step": 110
},
{
"epoch": 2.018348623853211,
"grad_norm": 0.14410308003425598,
"learning_rate": 4.8708091380154984e-05,
"loss": 0.2722,
"num_input_tokens_seen": 4570896,
"step": 111
},
{
"epoch": 2.036697247706422,
"grad_norm": 0.17840909957885742,
"learning_rate": 4.868491606285823e-05,
"loss": 0.2758,
"num_input_tokens_seen": 4613576,
"step": 112
},
{
"epoch": 2.055045871559633,
"grad_norm": 0.178523987531662,
"learning_rate": 4.866154033353561e-05,
"loss": 0.3361,
"num_input_tokens_seen": 4652896,
"step": 113
},
{
"epoch": 2.073394495412844,
"grad_norm": 0.17396725714206696,
"learning_rate": 4.8637964389982926e-05,
"loss": 0.2667,
"num_input_tokens_seen": 4694256,
"step": 114
},
{
"epoch": 2.091743119266055,
"grad_norm": 0.19471587240695953,
"learning_rate": 4.8614188431690125e-05,
"loss": 0.3628,
"num_input_tokens_seen": 4747552,
"step": 115
},
{
"epoch": 2.1100917431192663,
"grad_norm": 0.1722450852394104,
"learning_rate": 4.859021265983959e-05,
"loss": 0.3599,
"num_input_tokens_seen": 4794080,
"step": 116
},
{
"epoch": 2.128440366972477,
"grad_norm": 0.20137006044387817,
"learning_rate": 4.856603727730447e-05,
"loss": 0.4262,
"num_input_tokens_seen": 4847912,
"step": 117
},
{
"epoch": 2.146788990825688,
"grad_norm": 0.19395771622657776,
"learning_rate": 4.854166248864689e-05,
"loss": 0.3118,
"num_input_tokens_seen": 4885480,
"step": 118
},
{
"epoch": 2.165137614678899,
"grad_norm": 0.209548681974411,
"learning_rate": 4.85170885001163e-05,
"loss": 0.3725,
"num_input_tokens_seen": 4921240,
"step": 119
},
{
"epoch": 2.18348623853211,
"grad_norm": 0.18228279054164886,
"learning_rate": 4.849231551964771e-05,
"loss": 0.3816,
"num_input_tokens_seen": 4960224,
"step": 120
},
{
"epoch": 2.2018348623853212,
"grad_norm": 0.24349354207515717,
"learning_rate": 4.846734375685989e-05,
"loss": 0.3383,
"num_input_tokens_seen": 4990536,
"step": 121
},
{
"epoch": 2.220183486238532,
"grad_norm": 0.17600344121456146,
"learning_rate": 4.844217342305363e-05,
"loss": 0.3011,
"num_input_tokens_seen": 5044296,
"step": 122
},
{
"epoch": 2.238532110091743,
"grad_norm": 0.18766675889492035,
"learning_rate": 4.8416804731209945e-05,
"loss": 0.4458,
"num_input_tokens_seen": 5088368,
"step": 123
},
{
"epoch": 2.2568807339449544,
"grad_norm": 0.17657820880413055,
"learning_rate": 4.839123789598829e-05,
"loss": 0.2564,
"num_input_tokens_seen": 5133472,
"step": 124
},
{
"epoch": 2.2752293577981653,
"grad_norm": 0.20606014132499695,
"learning_rate": 4.836547313372471e-05,
"loss": 0.313,
"num_input_tokens_seen": 5167768,
"step": 125
},
{
"epoch": 2.293577981651376,
"grad_norm": 0.23511061072349548,
"learning_rate": 4.8339510662430046e-05,
"loss": 0.2963,
"num_input_tokens_seen": 5209400,
"step": 126
},
{
"epoch": 2.311926605504587,
"grad_norm": 0.18234293162822723,
"learning_rate": 4.8313350701788054e-05,
"loss": 0.2566,
"num_input_tokens_seen": 5249360,
"step": 127
},
{
"epoch": 2.330275229357798,
"grad_norm": 0.2223992496728897,
"learning_rate": 4.828699347315356e-05,
"loss": 0.2833,
"num_input_tokens_seen": 5300808,
"step": 128
},
{
"epoch": 2.3486238532110093,
"grad_norm": 0.23101739585399628,
"learning_rate": 4.826043919955062e-05,
"loss": 0.3099,
"num_input_tokens_seen": 5332960,
"step": 129
},
{
"epoch": 2.36697247706422,
"grad_norm": 0.26640889048576355,
"learning_rate": 4.823368810567056e-05,
"loss": 0.3238,
"num_input_tokens_seen": 5365008,
"step": 130
},
{
"epoch": 2.385321100917431,
"grad_norm": 0.2374572902917862,
"learning_rate": 4.820674041787017e-05,
"loss": 0.3153,
"num_input_tokens_seen": 5400184,
"step": 131
},
{
"epoch": 2.4036697247706424,
"grad_norm": 0.22812288999557495,
"learning_rate": 4.817959636416969e-05,
"loss": 0.2997,
"num_input_tokens_seen": 5440320,
"step": 132
},
{
"epoch": 2.4220183486238533,
"grad_norm": 0.20079149305820465,
"learning_rate": 4.815225617425095e-05,
"loss": 0.2373,
"num_input_tokens_seen": 5480832,
"step": 133
},
{
"epoch": 2.4403669724770642,
"grad_norm": 0.196709543466568,
"learning_rate": 4.81247200794554e-05,
"loss": 0.2456,
"num_input_tokens_seen": 5526936,
"step": 134
},
{
"epoch": 2.458715596330275,
"grad_norm": 0.17305873334407806,
"learning_rate": 4.8096988312782174e-05,
"loss": 0.2099,
"num_input_tokens_seen": 5566384,
"step": 135
},
{
"epoch": 2.477064220183486,
"grad_norm": 3.584635019302368,
"learning_rate": 4.806906110888606e-05,
"loss": 0.3485,
"num_input_tokens_seen": 5629896,
"step": 136
},
{
"epoch": 2.4954128440366974,
"grad_norm": 0.23481500148773193,
"learning_rate": 4.80409387040756e-05,
"loss": 0.2231,
"num_input_tokens_seen": 5674504,
"step": 137
},
{
"epoch": 2.5137614678899083,
"grad_norm": 0.27899855375289917,
"learning_rate": 4.8012621336311016e-05,
"loss": 0.4285,
"num_input_tokens_seen": 5714000,
"step": 138
},
{
"epoch": 2.532110091743119,
"grad_norm": 0.24404938519001007,
"learning_rate": 4.798410924520223e-05,
"loss": 0.3343,
"num_input_tokens_seen": 5756856,
"step": 139
},
{
"epoch": 2.5504587155963305,
"grad_norm": 0.26869162917137146,
"learning_rate": 4.7955402672006854e-05,
"loss": 0.2497,
"num_input_tokens_seen": 5781192,
"step": 140
},
{
"epoch": 2.5688073394495414,
"grad_norm": 0.2057972550392151,
"learning_rate": 4.79265018596281e-05,
"loss": 0.2991,
"num_input_tokens_seen": 5824024,
"step": 141
},
{
"epoch": 2.5871559633027523,
"grad_norm": 0.2184937596321106,
"learning_rate": 4.789740705261278e-05,
"loss": 0.2406,
"num_input_tokens_seen": 5862584,
"step": 142
},
{
"epoch": 2.6055045871559632,
"grad_norm": 0.23603741824626923,
"learning_rate": 4.786811849714918e-05,
"loss": 0.2722,
"num_input_tokens_seen": 5897344,
"step": 143
},
{
"epoch": 2.623853211009174,
"grad_norm": 0.22983981668949127,
"learning_rate": 4.783863644106502e-05,
"loss": 0.374,
"num_input_tokens_seen": 5931736,
"step": 144
},
{
"epoch": 2.6422018348623855,
"grad_norm": 0.2825419306755066,
"learning_rate": 4.780896113382536e-05,
"loss": 0.3386,
"num_input_tokens_seen": 5972784,
"step": 145
},
{
"epoch": 2.6605504587155964,
"grad_norm": 0.4502134621143341,
"learning_rate": 4.777909282653042e-05,
"loss": 0.2289,
"num_input_tokens_seen": 6018968,
"step": 146
},
{
"epoch": 2.6788990825688073,
"grad_norm": 0.2428288459777832,
"learning_rate": 4.7749031771913584e-05,
"loss": 0.4061,
"num_input_tokens_seen": 6062520,
"step": 147
},
{
"epoch": 2.6972477064220186,
"grad_norm": 0.2685629725456238,
"learning_rate": 4.771877822433911e-05,
"loss": 0.2198,
"num_input_tokens_seen": 6087928,
"step": 148
},
{
"epoch": 2.7155963302752295,
"grad_norm": 0.24021446704864502,
"learning_rate": 4.7688332439800096e-05,
"loss": 0.34,
"num_input_tokens_seen": 6134792,
"step": 149
},
{
"epoch": 2.7339449541284404,
"grad_norm": 0.2568534314632416,
"learning_rate": 4.765769467591625e-05,
"loss": 0.3292,
"num_input_tokens_seen": 6183296,
"step": 150
},
{
"epoch": 2.7522935779816513,
"grad_norm": 0.20823974907398224,
"learning_rate": 4.762686519193175e-05,
"loss": 0.2539,
"num_input_tokens_seen": 6225840,
"step": 151
},
{
"epoch": 2.770642201834862,
"grad_norm": 0.23333317041397095,
"learning_rate": 4.759584424871302e-05,
"loss": 0.3571,
"num_input_tokens_seen": 6274760,
"step": 152
},
{
"epoch": 2.7889908256880735,
"grad_norm": 0.20232398808002472,
"learning_rate": 4.756463210874652e-05,
"loss": 0.2783,
"num_input_tokens_seen": 6326168,
"step": 153
},
{
"epoch": 2.8073394495412844,
"grad_norm": 0.3479433059692383,
"learning_rate": 4.7533229036136553e-05,
"loss": 0.2925,
"num_input_tokens_seen": 6360312,
"step": 154
},
{
"epoch": 2.8256880733944953,
"grad_norm": 0.2659524083137512,
"learning_rate": 4.750163529660303e-05,
"loss": 0.2606,
"num_input_tokens_seen": 6395496,
"step": 155
},
{
"epoch": 2.8440366972477067,
"grad_norm": 0.24823158979415894,
"learning_rate": 4.7469851157479177e-05,
"loss": 0.3721,
"num_input_tokens_seen": 6437064,
"step": 156
},
{
"epoch": 2.8623853211009176,
"grad_norm": 0.32034072279930115,
"learning_rate": 4.743787688770932e-05,
"loss": 0.3931,
"num_input_tokens_seen": 6477616,
"step": 157
},
{
"epoch": 2.8807339449541285,
"grad_norm": 0.23295725882053375,
"learning_rate": 4.740571275784659e-05,
"loss": 0.2201,
"num_input_tokens_seen": 6518680,
"step": 158
},
{
"epoch": 2.8990825688073394,
"grad_norm": 0.2758423984050751,
"learning_rate": 4.737335904005063e-05,
"loss": 0.2579,
"num_input_tokens_seen": 6549768,
"step": 159
},
{
"epoch": 2.9174311926605503,
"grad_norm": 0.26690953969955444,
"learning_rate": 4.734081600808531e-05,
"loss": 0.2575,
"num_input_tokens_seen": 6581000,
"step": 160
},
{
"epoch": 2.9357798165137616,
"grad_norm": 0.26657482981681824,
"learning_rate": 4.730808393731639e-05,
"loss": 0.2597,
"num_input_tokens_seen": 6612632,
"step": 161
},
{
"epoch": 2.9541284403669725,
"grad_norm": 0.22647295892238617,
"learning_rate": 4.72751631047092e-05,
"loss": 0.3335,
"num_input_tokens_seen": 6654288,
"step": 162
},
{
"epoch": 2.9724770642201834,
"grad_norm": 0.2863366901874542,
"learning_rate": 4.72420537888263e-05,
"loss": 0.374,
"num_input_tokens_seen": 6707208,
"step": 163
},
{
"epoch": 2.9908256880733948,
"grad_norm": 0.2606408894062042,
"learning_rate": 4.7208756269825104e-05,
"loss": 0.3477,
"num_input_tokens_seen": 6748448,
"step": 164
},
{
"epoch": 3.0,
"grad_norm": 0.440924733877182,
"learning_rate": 4.717527082945554e-05,
"loss": 0.3214,
"num_input_tokens_seen": 6777264,
"step": 165
},
{
"epoch": 3.018348623853211,
"grad_norm": 0.27583903074264526,
"learning_rate": 4.714159775105765e-05,
"loss": 0.2681,
"num_input_tokens_seen": 6809456,
"step": 166
},
{
"epoch": 3.036697247706422,
"grad_norm": 0.2995987832546234,
"learning_rate": 4.7107737319559176e-05,
"loss": 0.2633,
"num_input_tokens_seen": 6845768,
"step": 167
},
{
"epoch": 3.055045871559633,
"grad_norm": 0.23999951779842377,
"learning_rate": 4.707368982147318e-05,
"loss": 0.1961,
"num_input_tokens_seen": 6893056,
"step": 168
},
{
"epoch": 3.073394495412844,
"grad_norm": 0.23356525599956512,
"learning_rate": 4.703945554489558e-05,
"loss": 0.2836,
"num_input_tokens_seen": 6932480,
"step": 169
},
{
"epoch": 3.091743119266055,
"grad_norm": 0.29919493198394775,
"learning_rate": 4.700503477950278e-05,
"loss": 0.2838,
"num_input_tokens_seen": 6975992,
"step": 170
},
{
"epoch": 3.1100917431192663,
"grad_norm": 0.3350690007209778,
"learning_rate": 4.697042781654913e-05,
"loss": 0.3489,
"num_input_tokens_seen": 7021840,
"step": 171
},
{
"epoch": 3.128440366972477,
"grad_norm": 0.2837466895580292,
"learning_rate": 4.693563494886455e-05,
"loss": 0.3797,
"num_input_tokens_seen": 7065192,
"step": 172
},
{
"epoch": 3.146788990825688,
"grad_norm": 0.24601787328720093,
"learning_rate": 4.6900656470851964e-05,
"loss": 0.2046,
"num_input_tokens_seen": 7114544,
"step": 173
},
{
"epoch": 3.165137614678899,
"grad_norm": 0.32290250062942505,
"learning_rate": 4.6865492678484895e-05,
"loss": 0.2596,
"num_input_tokens_seen": 7152736,
"step": 174
},
{
"epoch": 3.18348623853211,
"grad_norm": 0.33591920137405396,
"learning_rate": 4.68301438693049e-05,
"loss": 0.3045,
"num_input_tokens_seen": 7207464,
"step": 175
},
{
"epoch": 3.2018348623853212,
"grad_norm": 0.25471043586730957,
"learning_rate": 4.679461034241906e-05,
"loss": 0.2096,
"num_input_tokens_seen": 7238640,
"step": 176
},
{
"epoch": 3.220183486238532,
"grad_norm": 0.31238994002342224,
"learning_rate": 4.6758892398497494e-05,
"loss": 0.2226,
"num_input_tokens_seen": 7279112,
"step": 177
},
{
"epoch": 3.238532110091743,
"grad_norm": 0.35679712891578674,
"learning_rate": 4.672299033977076e-05,
"loss": 0.2403,
"num_input_tokens_seen": 7311632,
"step": 178
},
{
"epoch": 3.2568807339449544,
"grad_norm": 0.326914519071579,
"learning_rate": 4.6686904470027316e-05,
"loss": 0.2156,
"num_input_tokens_seen": 7344864,
"step": 179
},
{
"epoch": 3.2752293577981653,
"grad_norm": 0.3293381929397583,
"learning_rate": 4.665063509461097e-05,
"loss": 0.238,
"num_input_tokens_seen": 7389944,
"step": 180
},
{
"epoch": 3.293577981651376,
"grad_norm": 0.3313307762145996,
"learning_rate": 4.661418252041827e-05,
"loss": 0.2251,
"num_input_tokens_seen": 7423672,
"step": 181
},
{
"epoch": 3.311926605504587,
"grad_norm": 0.3328595459461212,
"learning_rate": 4.657754705589591e-05,
"loss": 0.2922,
"num_input_tokens_seen": 7459576,
"step": 182
},
{
"epoch": 3.330275229357798,
"grad_norm": 0.2721710801124573,
"learning_rate": 4.6540729011038146e-05,
"loss": 0.2698,
"num_input_tokens_seen": 7511736,
"step": 183
},
{
"epoch": 3.3486238532110093,
"grad_norm": 0.2488890290260315,
"learning_rate": 4.650372869738414e-05,
"loss": 0.173,
"num_input_tokens_seen": 7558552,
"step": 184
},
{
"epoch": 3.36697247706422,
"grad_norm": 0.3800615668296814,
"learning_rate": 4.6466546428015336e-05,
"loss": 0.32,
"num_input_tokens_seen": 7599040,
"step": 185
},
{
"epoch": 3.385321100917431,
"grad_norm": 0.3377014100551605,
"learning_rate": 4.642918251755281e-05,
"loss": 0.3058,
"num_input_tokens_seen": 7653264,
"step": 186
},
{
"epoch": 3.4036697247706424,
"grad_norm": 0.25239789485931396,
"learning_rate": 4.639163728215463e-05,
"loss": 0.1896,
"num_input_tokens_seen": 7694272,
"step": 187
},
{
"epoch": 3.4220183486238533,
"grad_norm": 0.34607502818107605,
"learning_rate": 4.6353911039513145e-05,
"loss": 0.2933,
"num_input_tokens_seen": 7730848,
"step": 188
},
{
"epoch": 3.4403669724770642,
"grad_norm": 0.30653324723243713,
"learning_rate": 4.6316004108852305e-05,
"loss": 0.2625,
"num_input_tokens_seen": 7781200,
"step": 189
},
{
"epoch": 3.458715596330275,
"grad_norm": 0.2943236231803894,
"learning_rate": 4.627791681092499e-05,
"loss": 0.3372,
"num_input_tokens_seen": 7825032,
"step": 190
},
{
"epoch": 3.477064220183486,
"grad_norm": 0.30080685019493103,
"learning_rate": 4.623964946801027e-05,
"loss": 0.2229,
"num_input_tokens_seen": 7855840,
"step": 191
},
{
"epoch": 3.4954128440366974,
"grad_norm": 0.3511403799057007,
"learning_rate": 4.620120240391065e-05,
"loss": 0.3967,
"num_input_tokens_seen": 7905928,
"step": 192
},
{
"epoch": 3.5137614678899083,
"grad_norm": 0.273583322763443,
"learning_rate": 4.61625759439494e-05,
"loss": 0.2254,
"num_input_tokens_seen": 7955992,
"step": 193
},
{
"epoch": 3.532110091743119,
"grad_norm": 0.3457902669906616,
"learning_rate": 4.612377041496776e-05,
"loss": 0.2553,
"num_input_tokens_seen": 7998024,
"step": 194
},
{
"epoch": 3.5504587155963305,
"grad_norm": 0.31968954205513,
"learning_rate": 4.608478614532215e-05,
"loss": 0.2197,
"num_input_tokens_seen": 8055672,
"step": 195
},
{
"epoch": 3.5688073394495414,
"grad_norm": 0.34753403067588806,
"learning_rate": 4.604562346488144e-05,
"loss": 0.2507,
"num_input_tokens_seen": 8090848,
"step": 196
},
{
"epoch": 3.5871559633027523,
"grad_norm": 0.3808669149875641,
"learning_rate": 4.6006282705024144e-05,
"loss": 0.2422,
"num_input_tokens_seen": 8136680,
"step": 197
},
{
"epoch": 3.6055045871559632,
"grad_norm": 0.3004499673843384,
"learning_rate": 4.5966764198635606e-05,
"loss": 0.2107,
"num_input_tokens_seen": 8187472,
"step": 198
},
{
"epoch": 3.623853211009174,
"grad_norm": 0.30718186497688293,
"learning_rate": 4.592706828010518e-05,
"loss": 0.1854,
"num_input_tokens_seen": 8225216,
"step": 199
},
{
"epoch": 3.6422018348623855,
"grad_norm": 0.23112858831882477,
"learning_rate": 4.588719528532342e-05,
"loss": 0.1687,
"num_input_tokens_seen": 8274456,
"step": 200
},
{
"epoch": 3.6605504587155964,
"grad_norm": 0.25966888666152954,
"learning_rate": 4.5847145551679206e-05,
"loss": 0.2549,
"num_input_tokens_seen": 8317016,
"step": 201
},
{
"epoch": 3.6788990825688073,
"grad_norm": 0.25600987672805786,
"learning_rate": 4.580691941805695e-05,
"loss": 0.1602,
"num_input_tokens_seen": 8361856,
"step": 202
},
{
"epoch": 3.6972477064220186,
"grad_norm": 0.33986184000968933,
"learning_rate": 4.5766517224833637e-05,
"loss": 0.2495,
"num_input_tokens_seen": 8410696,
"step": 203
},
{
"epoch": 3.7155963302752295,
"grad_norm": 0.36899781227111816,
"learning_rate": 4.572593931387604e-05,
"loss": 0.2012,
"num_input_tokens_seen": 8441872,
"step": 204
},
{
"epoch": 3.7339449541284404,
"grad_norm": 0.42072632908821106,
"learning_rate": 4.568518602853776e-05,
"loss": 0.2373,
"num_input_tokens_seen": 8482544,
"step": 205
},
{
"epoch": 3.7522935779816513,
"grad_norm": 0.40593233704566956,
"learning_rate": 4.5644257713656356e-05,
"loss": 0.233,
"num_input_tokens_seen": 8519856,
"step": 206
},
{
"epoch": 3.770642201834862,
"grad_norm": 0.38003161549568176,
"learning_rate": 4.5603154715550386e-05,
"loss": 0.225,
"num_input_tokens_seen": 8551392,
"step": 207
},
{
"epoch": 3.7889908256880735,
"grad_norm": 0.2564244568347931,
"learning_rate": 4.556187738201656e-05,
"loss": 0.2975,
"num_input_tokens_seen": 8599472,
"step": 208
},
{
"epoch": 3.8073394495412844,
"grad_norm": 0.29023391008377075,
"learning_rate": 4.552042606232668e-05,
"loss": 0.2033,
"num_input_tokens_seen": 8631880,
"step": 209
},
{
"epoch": 3.8256880733944953,
"grad_norm": 0.32886001467704773,
"learning_rate": 4.54788011072248e-05,
"loss": 0.2024,
"num_input_tokens_seen": 8675016,
"step": 210
},
{
"epoch": 3.8440366972477067,
"grad_norm": 0.36749884486198425,
"learning_rate": 4.5437002868924166e-05,
"loss": 0.2304,
"num_input_tokens_seen": 8713248,
"step": 211
},
{
"epoch": 3.8623853211009176,
"grad_norm": 0.3055097758769989,
"learning_rate": 4.539503170110431e-05,
"loss": 0.2928,
"num_input_tokens_seen": 8748800,
"step": 212
},
{
"epoch": 3.8807339449541285,
"grad_norm": 0.38436686992645264,
"learning_rate": 4.535288795890798e-05,
"loss": 0.2214,
"num_input_tokens_seen": 8787832,
"step": 213
},
{
"epoch": 3.8990825688073394,
"grad_norm": 0.44330883026123047,
"learning_rate": 4.531057199893824e-05,
"loss": 0.2168,
"num_input_tokens_seen": 8819616,
"step": 214
},
{
"epoch": 3.9174311926605503,
"grad_norm": 0.28318527340888977,
"learning_rate": 4.526808417925531e-05,
"loss": 0.279,
"num_input_tokens_seen": 8860744,
"step": 215
},
{
"epoch": 3.9357798165137616,
"grad_norm": 0.3287319839000702,
"learning_rate": 4.522542485937369e-05,
"loss": 0.2597,
"num_input_tokens_seen": 8906432,
"step": 216
},
{
"epoch": 3.9541284403669725,
"grad_norm": 0.35815751552581787,
"learning_rate": 4.5182594400259e-05,
"loss": 0.241,
"num_input_tokens_seen": 8955104,
"step": 217
},
{
"epoch": 3.9724770642201834,
"grad_norm": 0.3299608528614044,
"learning_rate": 4.5139593164324986e-05,
"loss": 0.2157,
"num_input_tokens_seen": 8990200,
"step": 218
},
{
"epoch": 3.9908256880733948,
"grad_norm": 0.2916093170642853,
"learning_rate": 4.509642151543043e-05,
"loss": 0.2046,
"num_input_tokens_seen": 9020760,
"step": 219
},
{
"epoch": 4.0,
"grad_norm": 0.40076637268066406,
"learning_rate": 4.50530798188761e-05,
"loss": 0.1714,
"num_input_tokens_seen": 9036352,
"step": 220
},
{
"epoch": 4.018348623853211,
"grad_norm": 0.40515249967575073,
"learning_rate": 4.50095684414016e-05,
"loss": 0.1811,
"num_input_tokens_seen": 9091776,
"step": 221
},
{
"epoch": 4.036697247706422,
"grad_norm": 0.32984718680381775,
"learning_rate": 4.496588775118232e-05,
"loss": 0.2101,
"num_input_tokens_seen": 9134080,
"step": 222
},
{
"epoch": 4.055045871559633,
"grad_norm": 0.27288541197776794,
"learning_rate": 4.4922038117826334e-05,
"loss": 0.1444,
"num_input_tokens_seen": 9172720,
"step": 223
},
{
"epoch": 4.073394495412844,
"grad_norm": 0.5168021321296692,
"learning_rate": 4.48780199123712e-05,
"loss": 0.2342,
"num_input_tokens_seen": 9213664,
"step": 224
},
{
"epoch": 4.091743119266055,
"grad_norm": 0.3986498713493347,
"learning_rate": 4.4833833507280884e-05,
"loss": 0.1676,
"num_input_tokens_seen": 9261768,
"step": 225
},
{
"epoch": 4.110091743119266,
"grad_norm": 0.4472793936729431,
"learning_rate": 4.478947927644258e-05,
"loss": 0.295,
"num_input_tokens_seen": 9300928,
"step": 226
},
{
"epoch": 4.128440366972477,
"grad_norm": 0.39240705966949463,
"learning_rate": 4.474495759516358e-05,
"loss": 0.17,
"num_input_tokens_seen": 9329472,
"step": 227
},
{
"epoch": 4.146788990825688,
"grad_norm": 0.354526549577713,
"learning_rate": 4.4700268840168045e-05,
"loss": 0.1759,
"num_input_tokens_seen": 9365640,
"step": 228
},
{
"epoch": 4.165137614678899,
"grad_norm": 0.3216766119003296,
"learning_rate": 4.4655413389593856e-05,
"loss": 0.1878,
"num_input_tokens_seen": 9410552,
"step": 229
},
{
"epoch": 4.18348623853211,
"grad_norm": 0.30976617336273193,
"learning_rate": 4.4610391622989396e-05,
"loss": 0.1637,
"num_input_tokens_seen": 9452416,
"step": 230
},
{
"epoch": 4.201834862385321,
"grad_norm": 0.385437935590744,
"learning_rate": 4.456520392131035e-05,
"loss": 0.2748,
"num_input_tokens_seen": 9503528,
"step": 231
},
{
"epoch": 4.220183486238533,
"grad_norm": 0.37948480248451233,
"learning_rate": 4.4519850666916484e-05,
"loss": 0.2635,
"num_input_tokens_seen": 9541592,
"step": 232
},
{
"epoch": 4.238532110091743,
"grad_norm": 0.36141568422317505,
"learning_rate": 4.447433224356839e-05,
"loss": 0.2027,
"num_input_tokens_seen": 9586064,
"step": 233
},
{
"epoch": 4.256880733944954,
"grad_norm": 0.4549350440502167,
"learning_rate": 4.442864903642428e-05,
"loss": 0.2107,
"num_input_tokens_seen": 9641688,
"step": 234
},
{
"epoch": 4.275229357798165,
"grad_norm": 0.3979765474796295,
"learning_rate": 4.438280143203665e-05,
"loss": 0.2879,
"num_input_tokens_seen": 9686240,
"step": 235
},
{
"epoch": 4.293577981651376,
"grad_norm": 0.35011065006256104,
"learning_rate": 4.43367898183491e-05,
"loss": 0.2594,
"num_input_tokens_seen": 9735632,
"step": 236
},
{
"epoch": 4.3119266055045875,
"grad_norm": 0.3999224007129669,
"learning_rate": 4.4290614584693004e-05,
"loss": 0.1907,
"num_input_tokens_seen": 9766536,
"step": 237
},
{
"epoch": 4.330275229357798,
"grad_norm": 0.39629611372947693,
"learning_rate": 4.4244276121784195e-05,
"loss": 0.1805,
"num_input_tokens_seen": 9796400,
"step": 238
},
{
"epoch": 4.348623853211009,
"grad_norm": 0.36784592270851135,
"learning_rate": 4.4197774821719714e-05,
"loss": 0.1824,
"num_input_tokens_seen": 9831992,
"step": 239
},
{
"epoch": 4.36697247706422,
"grad_norm": 0.3408430516719818,
"learning_rate": 4.415111107797445e-05,
"loss": 0.1721,
"num_input_tokens_seen": 9875640,
"step": 240
},
{
"epoch": 4.385321100917431,
"grad_norm": 0.3232553005218506,
"learning_rate": 4.410428528539783e-05,
"loss": 0.275,
"num_input_tokens_seen": 9916816,
"step": 241
},
{
"epoch": 4.4036697247706424,
"grad_norm": 0.38150206208229065,
"learning_rate": 4.405729784021046e-05,
"loss": 0.1963,
"num_input_tokens_seen": 9962928,
"step": 242
},
{
"epoch": 4.422018348623853,
"grad_norm": 0.4176963269710541,
"learning_rate": 4.401014914000078e-05,
"loss": 0.1626,
"num_input_tokens_seen": 9997224,
"step": 243
},
{
"epoch": 4.440366972477064,
"grad_norm": 0.38855600357055664,
"learning_rate": 4.396283958372173e-05,
"loss": 0.1733,
"num_input_tokens_seen": 10036248,
"step": 244
},
{
"epoch": 4.458715596330276,
"grad_norm": 0.3860638737678528,
"learning_rate": 4.391536957168733e-05,
"loss": 0.1936,
"num_input_tokens_seen": 10070312,
"step": 245
},
{
"epoch": 4.477064220183486,
"grad_norm": 0.31510865688323975,
"learning_rate": 4.386773950556931e-05,
"loss": 0.1847,
"num_input_tokens_seen": 10114568,
"step": 246
},
{
"epoch": 4.495412844036697,
"grad_norm": 0.3280925154685974,
"learning_rate": 4.381994978839371e-05,
"loss": 0.1981,
"num_input_tokens_seen": 10150280,
"step": 247
},
{
"epoch": 4.513761467889909,
"grad_norm": 0.33136090636253357,
"learning_rate": 4.377200082453749e-05,
"loss": 0.1681,
"num_input_tokens_seen": 10194000,
"step": 248
},
{
"epoch": 4.532110091743119,
"grad_norm": 0.43955501914024353,
"learning_rate": 4.372389301972506e-05,
"loss": 0.2111,
"num_input_tokens_seen": 10232264,
"step": 249
},
{
"epoch": 4.5504587155963305,
"grad_norm": 0.28938886523246765,
"learning_rate": 4.36756267810249e-05,
"loss": 0.2307,
"num_input_tokens_seen": 10271880,
"step": 250
},
{
"epoch": 4.568807339449541,
"grad_norm": 0.37232765555381775,
"learning_rate": 4.36272025168461e-05,
"loss": 0.1609,
"num_input_tokens_seen": 10317720,
"step": 251
},
{
"epoch": 4.587155963302752,
"grad_norm": 0.6248548030853271,
"learning_rate": 4.357862063693486e-05,
"loss": 0.2456,
"num_input_tokens_seen": 10362168,
"step": 252
},
{
"epoch": 4.605504587155964,
"grad_norm": 0.32828131318092346,
"learning_rate": 4.3529881552371096e-05,
"loss": 0.3159,
"num_input_tokens_seen": 10414312,
"step": 253
},
{
"epoch": 4.623853211009174,
"grad_norm": 0.4158332049846649,
"learning_rate": 4.34809856755649e-05,
"loss": 0.2194,
"num_input_tokens_seen": 10451800,
"step": 254
},
{
"epoch": 4.6422018348623855,
"grad_norm": 0.3648194670677185,
"learning_rate": 4.34319334202531e-05,
"loss": 0.1846,
"num_input_tokens_seen": 10496224,
"step": 255
},
{
"epoch": 4.660550458715596,
"grad_norm": 0.36835575103759766,
"learning_rate": 4.3382725201495723e-05,
"loss": 0.1906,
"num_input_tokens_seen": 10536392,
"step": 256
},
{
"epoch": 4.678899082568807,
"grad_norm": 0.3501613140106201,
"learning_rate": 4.333336143567247e-05,
"loss": 0.1793,
"num_input_tokens_seen": 10577640,
"step": 257
},
{
"epoch": 4.697247706422019,
"grad_norm": 0.3431616425514221,
"learning_rate": 4.3283842540479264e-05,
"loss": 0.1576,
"num_input_tokens_seen": 10613376,
"step": 258
},
{
"epoch": 4.715596330275229,
"grad_norm": 0.3290237784385681,
"learning_rate": 4.3234168934924636e-05,
"loss": 0.1447,
"num_input_tokens_seen": 10647232,
"step": 259
},
{
"epoch": 4.73394495412844,
"grad_norm": 0.40264153480529785,
"learning_rate": 4.318434103932622e-05,
"loss": 0.1488,
"num_input_tokens_seen": 10696024,
"step": 260
},
{
"epoch": 4.752293577981652,
"grad_norm": 0.3453703820705414,
"learning_rate": 4.313435927530719e-05,
"loss": 0.1597,
"num_input_tokens_seen": 10730408,
"step": 261
},
{
"epoch": 4.770642201834862,
"grad_norm": 0.3993653655052185,
"learning_rate": 4.30842240657927e-05,
"loss": 0.2266,
"num_input_tokens_seen": 10764776,
"step": 262
},
{
"epoch": 4.7889908256880735,
"grad_norm": 0.4080798923969269,
"learning_rate": 4.303393583500628e-05,
"loss": 0.1562,
"num_input_tokens_seen": 10792272,
"step": 263
},
{
"epoch": 4.807339449541285,
"grad_norm": 0.3028413951396942,
"learning_rate": 4.2983495008466276e-05,
"loss": 0.1504,
"num_input_tokens_seen": 10825240,
"step": 264
},
{
"epoch": 4.825688073394495,
"grad_norm": 0.3980019688606262,
"learning_rate": 4.293290201298223e-05,
"loss": 0.1648,
"num_input_tokens_seen": 10883592,
"step": 265
},
{
"epoch": 4.844036697247707,
"grad_norm": 0.42035797238349915,
"learning_rate": 4.288215727665129e-05,
"loss": 0.1652,
"num_input_tokens_seen": 10922640,
"step": 266
},
{
"epoch": 4.862385321100917,
"grad_norm": 0.37766003608703613,
"learning_rate": 4.2831261228854544e-05,
"loss": 0.1817,
"num_input_tokens_seen": 10967288,
"step": 267
},
{
"epoch": 4.8807339449541285,
"grad_norm": 0.3495088815689087,
"learning_rate": 4.278021430025343e-05,
"loss": 0.2066,
"num_input_tokens_seen": 11011152,
"step": 268
},
{
"epoch": 4.89908256880734,
"grad_norm": 0.34071093797683716,
"learning_rate": 4.272901692278609e-05,
"loss": 0.1522,
"num_input_tokens_seen": 11055608,
"step": 269
},
{
"epoch": 4.91743119266055,
"grad_norm": 0.33437398076057434,
"learning_rate": 4.267766952966369e-05,
"loss": 0.2201,
"num_input_tokens_seen": 11101992,
"step": 270
},
{
"epoch": 4.935779816513762,
"grad_norm": 0.3340584337711334,
"learning_rate": 4.262617255536676e-05,
"loss": 0.2777,
"num_input_tokens_seen": 11141408,
"step": 271
},
{
"epoch": 4.954128440366972,
"grad_norm": 0.348679780960083,
"learning_rate": 4.257452643564155e-05,
"loss": 0.1857,
"num_input_tokens_seen": 11185344,
"step": 272
},
{
"epoch": 4.972477064220183,
"grad_norm": 0.3691309094429016,
"learning_rate": 4.2522731607496275e-05,
"loss": 0.1653,
"num_input_tokens_seen": 11216160,
"step": 273
},
{
"epoch": 4.990825688073395,
"grad_norm": 0.4692390561103821,
"learning_rate": 4.24707885091975e-05,
"loss": 0.1761,
"num_input_tokens_seen": 11276200,
"step": 274
},
{
"epoch": 5.0,
"grad_norm": 0.4963766932487488,
"learning_rate": 4.241869758026638e-05,
"loss": 0.1582,
"num_input_tokens_seen": 11295440,
"step": 275
},
{
"epoch": 5.018348623853211,
"grad_norm": 0.37284544110298157,
"learning_rate": 4.2366459261474933e-05,
"loss": 0.1538,
"num_input_tokens_seen": 11336400,
"step": 276
},
{
"epoch": 5.036697247706422,
"grad_norm": 0.2949577867984772,
"learning_rate": 4.231407399484236e-05,
"loss": 0.1319,
"num_input_tokens_seen": 11374056,
"step": 277
},
{
"epoch": 5.055045871559633,
"grad_norm": 0.32850027084350586,
"learning_rate": 4.226154222363124e-05,
"loss": 0.174,
"num_input_tokens_seen": 11414776,
"step": 278
},
{
"epoch": 5.073394495412844,
"grad_norm": 0.3812579810619354,
"learning_rate": 4.220886439234385e-05,
"loss": 0.1831,
"num_input_tokens_seen": 11465456,
"step": 279
},
{
"epoch": 5.091743119266055,
"grad_norm": 0.3396337032318115,
"learning_rate": 4.215604094671835e-05,
"loss": 0.1515,
"num_input_tokens_seen": 11500184,
"step": 280
},
{
"epoch": 5.110091743119266,
"grad_norm": 0.35079169273376465,
"learning_rate": 4.2103072333725e-05,
"loss": 0.1295,
"num_input_tokens_seen": 11537112,
"step": 281
},
{
"epoch": 5.128440366972477,
"grad_norm": 0.3811327815055847,
"learning_rate": 4.2049959001562464e-05,
"loss": 0.1339,
"num_input_tokens_seen": 11579440,
"step": 282
},
{
"epoch": 5.146788990825688,
"grad_norm": 0.3935602009296417,
"learning_rate": 4.199670139965393e-05,
"loss": 0.1909,
"num_input_tokens_seen": 11643272,
"step": 283
},
{
"epoch": 5.165137614678899,
"grad_norm": 0.3952607810497284,
"learning_rate": 4.194329997864331e-05,
"loss": 0.2334,
"num_input_tokens_seen": 11677528,
"step": 284
},
{
"epoch": 5.18348623853211,
"grad_norm": 0.3687472939491272,
"learning_rate": 4.188975519039151e-05,
"loss": 0.1406,
"num_input_tokens_seen": 11727944,
"step": 285
},
{
"epoch": 5.201834862385321,
"grad_norm": 0.3518407344818115,
"learning_rate": 4.183606748797251e-05,
"loss": 0.138,
"num_input_tokens_seen": 11779568,
"step": 286
},
{
"epoch": 5.220183486238533,
"grad_norm": 0.4072832763195038,
"learning_rate": 4.1782237325669595e-05,
"loss": 0.159,
"num_input_tokens_seen": 11824600,
"step": 287
},
{
"epoch": 5.238532110091743,
"grad_norm": 0.386280357837677,
"learning_rate": 4.172826515897146e-05,
"loss": 0.2517,
"num_input_tokens_seen": 11873736,
"step": 288
},
{
"epoch": 5.256880733944954,
"grad_norm": 0.3576860725879669,
"learning_rate": 4.167415144456841e-05,
"loss": 0.1349,
"num_input_tokens_seen": 11909608,
"step": 289
},
{
"epoch": 5.275229357798165,
"grad_norm": 0.3952435851097107,
"learning_rate": 4.1619896640348445e-05,
"loss": 0.1348,
"num_input_tokens_seen": 11945440,
"step": 290
},
{
"epoch": 5.293577981651376,
"grad_norm": 0.3565181493759155,
"learning_rate": 4.1565501205393445e-05,
"loss": 0.1331,
"num_input_tokens_seen": 11985568,
"step": 291
},
{
"epoch": 5.3119266055045875,
"grad_norm": 0.40558820962905884,
"learning_rate": 4.1510965599975196e-05,
"loss": 0.2337,
"num_input_tokens_seen": 12034320,
"step": 292
},
{
"epoch": 5.330275229357798,
"grad_norm": 0.36426106095314026,
"learning_rate": 4.1456290285551596e-05,
"loss": 0.1299,
"num_input_tokens_seen": 12070184,
"step": 293
},
{
"epoch": 5.348623853211009,
"grad_norm": 0.33881404995918274,
"learning_rate": 4.140147572476268e-05,
"loss": 0.1239,
"num_input_tokens_seen": 12111512,
"step": 294
},
{
"epoch": 5.36697247706422,
"grad_norm": 0.38019630312919617,
"learning_rate": 4.1346522381426744e-05,
"loss": 0.133,
"num_input_tokens_seen": 12156792,
"step": 295
},
{
"epoch": 5.385321100917431,
"grad_norm": 0.38179296255111694,
"learning_rate": 4.129143072053638e-05,
"loss": 0.1301,
"num_input_tokens_seen": 12185168,
"step": 296
},
{
"epoch": 5.4036697247706424,
"grad_norm": 0.35054150223731995,
"learning_rate": 4.123620120825459e-05,
"loss": 0.1298,
"num_input_tokens_seen": 12222256,
"step": 297
},
{
"epoch": 5.422018348623853,
"grad_norm": 0.3463946580886841,
"learning_rate": 4.118083431191081e-05,
"loss": 0.2088,
"num_input_tokens_seen": 12257536,
"step": 298
},
{
"epoch": 5.440366972477064,
"grad_norm": 0.42584434151649475,
"learning_rate": 4.112533049999696e-05,
"loss": 0.1062,
"num_input_tokens_seen": 12290576,
"step": 299
},
{
"epoch": 5.458715596330276,
"grad_norm": 0.4341515600681305,
"learning_rate": 4.1069690242163484e-05,
"loss": 0.1989,
"num_input_tokens_seen": 12323416,
"step": 300
},
{
"epoch": 5.477064220183486,
"grad_norm": 0.4299938380718231,
"learning_rate": 4.101391400921538e-05,
"loss": 0.1243,
"num_input_tokens_seen": 12370264,
"step": 301
},
{
"epoch": 5.495412844036697,
"grad_norm": 0.4070415198802948,
"learning_rate": 4.095800227310821e-05,
"loss": 0.2281,
"num_input_tokens_seen": 12410568,
"step": 302
},
{
"epoch": 5.513761467889909,
"grad_norm": 0.4446506202220917,
"learning_rate": 4.09019555069441e-05,
"loss": 0.1462,
"num_input_tokens_seen": 12442880,
"step": 303
},
{
"epoch": 5.532110091743119,
"grad_norm": 0.36110538244247437,
"learning_rate": 4.0845774184967754e-05,
"loss": 0.1497,
"num_input_tokens_seen": 12487016,
"step": 304
},
{
"epoch": 5.5504587155963305,
"grad_norm": 0.42756903171539307,
"learning_rate": 4.078945878256244e-05,
"loss": 0.2082,
"num_input_tokens_seen": 12525072,
"step": 305
},
{
"epoch": 5.568807339449541,
"grad_norm": 0.360649049282074,
"learning_rate": 4.073300977624594e-05,
"loss": 0.1214,
"num_input_tokens_seen": 12555792,
"step": 306
},
{
"epoch": 5.587155963302752,
"grad_norm": 0.3768391013145447,
"learning_rate": 4.067642764366654e-05,
"loss": 0.1278,
"num_input_tokens_seen": 12601616,
"step": 307
},
{
"epoch": 5.605504587155964,
"grad_norm": 0.3882285952568054,
"learning_rate": 4.0619712863599e-05,
"loss": 0.1485,
"num_input_tokens_seen": 12634360,
"step": 308
},
{
"epoch": 5.623853211009174,
"grad_norm": 0.38816162943840027,
"learning_rate": 4.0562865915940496e-05,
"loss": 0.1221,
"num_input_tokens_seen": 12674808,
"step": 309
},
{
"epoch": 5.6422018348623855,
"grad_norm": 0.41803887486457825,
"learning_rate": 4.05058872817065e-05,
"loss": 0.1388,
"num_input_tokens_seen": 12710864,
"step": 310
},
{
"epoch": 5.660550458715596,
"grad_norm": 0.3802741467952728,
"learning_rate": 4.044877744302683e-05,
"loss": 0.1349,
"num_input_tokens_seen": 12750920,
"step": 311
},
{
"epoch": 5.678899082568807,
"grad_norm": 0.48202404379844666,
"learning_rate": 4.039153688314145e-05,
"loss": 0.1555,
"num_input_tokens_seen": 12789488,
"step": 312
},
{
"epoch": 5.697247706422019,
"grad_norm": 0.3168826103210449,
"learning_rate": 4.0334166086396484e-05,
"loss": 0.1063,
"num_input_tokens_seen": 12831408,
"step": 313
},
{
"epoch": 5.715596330275229,
"grad_norm": 0.43414828181266785,
"learning_rate": 4.0276665538239996e-05,
"loss": 0.127,
"num_input_tokens_seen": 12872584,
"step": 314
},
{
"epoch": 5.73394495412844,
"grad_norm": 0.47761547565460205,
"learning_rate": 4.021903572521802e-05,
"loss": 0.1428,
"num_input_tokens_seen": 12910528,
"step": 315
},
{
"epoch": 5.752293577981652,
"grad_norm": 0.3542017936706543,
"learning_rate": 4.0161277134970345e-05,
"loss": 0.1279,
"num_input_tokens_seen": 12942800,
"step": 316
},
{
"epoch": 5.770642201834862,
"grad_norm": 0.31866851449012756,
"learning_rate": 4.010339025622641e-05,
"loss": 0.1459,
"num_input_tokens_seen": 12989136,
"step": 317
},
{
"epoch": 5.7889908256880735,
"grad_norm": 0.44918256998062134,
"learning_rate": 4.0045375578801214e-05,
"loss": 0.1429,
"num_input_tokens_seen": 13035472,
"step": 318
},
{
"epoch": 5.807339449541285,
"grad_norm": 0.32426726818084717,
"learning_rate": 3.99872335935911e-05,
"loss": 0.1257,
"num_input_tokens_seen": 13074952,
"step": 319
},
{
"epoch": 5.825688073394495,
"grad_norm": 0.6903991103172302,
"learning_rate": 3.9928964792569655e-05,
"loss": 0.1807,
"num_input_tokens_seen": 13124624,
"step": 320
},
{
"epoch": 5.844036697247707,
"grad_norm": 0.3665274679660797,
"learning_rate": 3.9870569668783536e-05,
"loss": 0.1853,
"num_input_tokens_seen": 13171464,
"step": 321
},
{
"epoch": 5.862385321100917,
"grad_norm": 0.41457998752593994,
"learning_rate": 3.981204871634827e-05,
"loss": 0.214,
"num_input_tokens_seen": 13225240,
"step": 322
},
{
"epoch": 5.8807339449541285,
"grad_norm": 0.4047159254550934,
"learning_rate": 3.9753402430444116e-05,
"loss": 0.1907,
"num_input_tokens_seen": 13275848,
"step": 323
},
{
"epoch": 5.89908256880734,
"grad_norm": 0.4578211307525635,
"learning_rate": 3.969463130731183e-05,
"loss": 0.27,
"num_input_tokens_seen": 13311096,
"step": 324
}
],
"logging_steps": 1.0,
"max_steps": 1080,
"num_input_tokens_seen": 13311096,
"num_train_epochs": 20,
"save_steps": 54,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.1229463256131174e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}