|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.9357798165137616, |
|
"eval_steps": 500, |
|
"global_step": 216, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01834862385321101, |
|
"grad_norm": 0.04378490149974823, |
|
"learning_rate": 4.999989423013716e-05, |
|
"loss": 0.6713, |
|
"num_input_tokens_seen": 44136, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03669724770642202, |
|
"grad_norm": 0.040646992623806, |
|
"learning_rate": 4.999957692144361e-05, |
|
"loss": 0.533, |
|
"num_input_tokens_seen": 83096, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.05504587155963303, |
|
"grad_norm": 0.04658753052353859, |
|
"learning_rate": 4.999904807660428e-05, |
|
"loss": 0.6048, |
|
"num_input_tokens_seen": 122112, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.07339449541284404, |
|
"grad_norm": 0.04322144016623497, |
|
"learning_rate": 4.999830770009406e-05, |
|
"loss": 0.4948, |
|
"num_input_tokens_seen": 163064, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.09174311926605505, |
|
"grad_norm": 0.06536195427179337, |
|
"learning_rate": 4.999735579817769e-05, |
|
"loss": 0.6607, |
|
"num_input_tokens_seen": 203808, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.11009174311926606, |
|
"grad_norm": 0.059904925525188446, |
|
"learning_rate": 4.9996192378909786e-05, |
|
"loss": 0.5802, |
|
"num_input_tokens_seen": 241824, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.12844036697247707, |
|
"grad_norm": 0.19818365573883057, |
|
"learning_rate": 4.999481745213471e-05, |
|
"loss": 0.5148, |
|
"num_input_tokens_seen": 287608, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.14678899082568808, |
|
"grad_norm": 0.05985472351312637, |
|
"learning_rate": 4.9993231029486544e-05, |
|
"loss": 0.5714, |
|
"num_input_tokens_seen": 325320, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1651376146788991, |
|
"grad_norm": 0.061375778168439865, |
|
"learning_rate": 4.999143312438893e-05, |
|
"loss": 0.6812, |
|
"num_input_tokens_seen": 369848, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1834862385321101, |
|
"grad_norm": 0.06196414306759834, |
|
"learning_rate": 4.998942375205502e-05, |
|
"loss": 0.5358, |
|
"num_input_tokens_seen": 415104, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2018348623853211, |
|
"grad_norm": 0.07861393690109253, |
|
"learning_rate": 4.9987202929487275e-05, |
|
"loss": 0.6527, |
|
"num_input_tokens_seen": 467224, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.22018348623853212, |
|
"grad_norm": 0.05596446990966797, |
|
"learning_rate": 4.99847706754774e-05, |
|
"loss": 0.5354, |
|
"num_input_tokens_seen": 502824, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.23853211009174313, |
|
"grad_norm": 0.05289844051003456, |
|
"learning_rate": 4.998212701060612e-05, |
|
"loss": 0.5263, |
|
"num_input_tokens_seen": 544744, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.25688073394495414, |
|
"grad_norm": 0.04996591433882713, |
|
"learning_rate": 4.997927195724303e-05, |
|
"loss": 0.5536, |
|
"num_input_tokens_seen": 591136, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.27522935779816515, |
|
"grad_norm": 0.05822828412055969, |
|
"learning_rate": 4.997620553954645e-05, |
|
"loss": 0.6106, |
|
"num_input_tokens_seen": 629664, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29357798165137616, |
|
"grad_norm": 0.06353770196437836, |
|
"learning_rate": 4.997292778346312e-05, |
|
"loss": 0.5129, |
|
"num_input_tokens_seen": 663392, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3119266055045872, |
|
"grad_norm": 0.07256966829299927, |
|
"learning_rate": 4.996943871672807e-05, |
|
"loss": 0.6377, |
|
"num_input_tokens_seen": 698360, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3302752293577982, |
|
"grad_norm": 0.055458713322877884, |
|
"learning_rate": 4.996573836886435e-05, |
|
"loss": 0.4083, |
|
"num_input_tokens_seen": 737520, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3486238532110092, |
|
"grad_norm": 0.07792335003614426, |
|
"learning_rate": 4.9961826771182784e-05, |
|
"loss": 0.6086, |
|
"num_input_tokens_seen": 768056, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.3669724770642202, |
|
"grad_norm": 0.06627275049686432, |
|
"learning_rate": 4.995770395678171e-05, |
|
"loss": 0.4591, |
|
"num_input_tokens_seen": 806256, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3853211009174312, |
|
"grad_norm": 0.05830290913581848, |
|
"learning_rate": 4.9953369960546676e-05, |
|
"loss": 0.3731, |
|
"num_input_tokens_seen": 842336, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4036697247706422, |
|
"grad_norm": 0.07277437299489975, |
|
"learning_rate": 4.9948824819150185e-05, |
|
"loss": 0.6243, |
|
"num_input_tokens_seen": 876672, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.42201834862385323, |
|
"grad_norm": 0.07477546483278275, |
|
"learning_rate": 4.994406857105136e-05, |
|
"loss": 0.5788, |
|
"num_input_tokens_seen": 915192, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.44036697247706424, |
|
"grad_norm": 0.06912907212972641, |
|
"learning_rate": 4.993910125649561e-05, |
|
"loss": 0.4753, |
|
"num_input_tokens_seen": 951904, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.45871559633027525, |
|
"grad_norm": 0.0655476376414299, |
|
"learning_rate": 4.993392291751431e-05, |
|
"loss": 0.4518, |
|
"num_input_tokens_seen": 1001816, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.47706422018348627, |
|
"grad_norm": 0.06466512382030487, |
|
"learning_rate": 4.992853359792444e-05, |
|
"loss": 0.5638, |
|
"num_input_tokens_seen": 1053064, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.4954128440366973, |
|
"grad_norm": 0.0645688995718956, |
|
"learning_rate": 4.99229333433282e-05, |
|
"loss": 0.4644, |
|
"num_input_tokens_seen": 1086688, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5137614678899083, |
|
"grad_norm": 0.07181251049041748, |
|
"learning_rate": 4.9917122201112656e-05, |
|
"loss": 0.6191, |
|
"num_input_tokens_seen": 1134824, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.5321100917431193, |
|
"grad_norm": 0.07322589308023453, |
|
"learning_rate": 4.9911100220449293e-05, |
|
"loss": 0.6752, |
|
"num_input_tokens_seen": 1172072, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.5504587155963303, |
|
"grad_norm": 0.06396070122718811, |
|
"learning_rate": 4.990486745229364e-05, |
|
"loss": 0.3587, |
|
"num_input_tokens_seen": 1211096, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5688073394495413, |
|
"grad_norm": 0.07803395390510559, |
|
"learning_rate": 4.989842394938482e-05, |
|
"loss": 0.459, |
|
"num_input_tokens_seen": 1259456, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5871559633027523, |
|
"grad_norm": 0.05974648892879486, |
|
"learning_rate": 4.989176976624511e-05, |
|
"loss": 0.4148, |
|
"num_input_tokens_seen": 1306944, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.6055045871559633, |
|
"grad_norm": 0.09784268587827682, |
|
"learning_rate": 4.988490495917947e-05, |
|
"loss": 0.539, |
|
"num_input_tokens_seen": 1353744, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6238532110091743, |
|
"grad_norm": 0.09906516224145889, |
|
"learning_rate": 4.987782958627508e-05, |
|
"loss": 0.5453, |
|
"num_input_tokens_seen": 1394736, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.6422018348623854, |
|
"grad_norm": 0.08984062820672989, |
|
"learning_rate": 4.987054370740083e-05, |
|
"loss": 0.468, |
|
"num_input_tokens_seen": 1442048, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6605504587155964, |
|
"grad_norm": 0.08672655373811722, |
|
"learning_rate": 4.9863047384206835e-05, |
|
"loss": 0.4078, |
|
"num_input_tokens_seen": 1478440, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6788990825688074, |
|
"grad_norm": 0.1327345073223114, |
|
"learning_rate": 4.9855340680123905e-05, |
|
"loss": 0.5299, |
|
"num_input_tokens_seen": 1525992, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6972477064220184, |
|
"grad_norm": 0.09178602695465088, |
|
"learning_rate": 4.9847423660363e-05, |
|
"loss": 0.439, |
|
"num_input_tokens_seen": 1555608, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.7155963302752294, |
|
"grad_norm": 0.09418320655822754, |
|
"learning_rate": 4.983929639191469e-05, |
|
"loss": 0.5337, |
|
"num_input_tokens_seen": 1597392, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.7339449541284404, |
|
"grad_norm": 0.08294719457626343, |
|
"learning_rate": 4.983095894354858e-05, |
|
"loss": 0.4536, |
|
"num_input_tokens_seen": 1649656, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7522935779816514, |
|
"grad_norm": 0.09774205833673477, |
|
"learning_rate": 4.982241138581273e-05, |
|
"loss": 0.5221, |
|
"num_input_tokens_seen": 1695952, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7706422018348624, |
|
"grad_norm": 0.09319107979536057, |
|
"learning_rate": 4.9813653791033057e-05, |
|
"loss": 0.4279, |
|
"num_input_tokens_seen": 1737224, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7889908256880734, |
|
"grad_norm": 0.09561405330896378, |
|
"learning_rate": 4.980468623331273e-05, |
|
"loss": 0.5121, |
|
"num_input_tokens_seen": 1772320, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.8073394495412844, |
|
"grad_norm": 0.08274025470018387, |
|
"learning_rate": 4.979550878853154e-05, |
|
"loss": 0.54, |
|
"num_input_tokens_seen": 1823888, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.8256880733944955, |
|
"grad_norm": 0.08728913217782974, |
|
"learning_rate": 4.9786121534345265e-05, |
|
"loss": 0.4488, |
|
"num_input_tokens_seen": 1872488, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8440366972477065, |
|
"grad_norm": 0.0787016749382019, |
|
"learning_rate": 4.9776524550184965e-05, |
|
"loss": 0.4353, |
|
"num_input_tokens_seen": 1924744, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.8623853211009175, |
|
"grad_norm": 0.10952188074588776, |
|
"learning_rate": 4.97667179172564e-05, |
|
"loss": 0.4784, |
|
"num_input_tokens_seen": 1959936, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.8807339449541285, |
|
"grad_norm": 0.08525826781988144, |
|
"learning_rate": 4.975670171853926e-05, |
|
"loss": 0.3586, |
|
"num_input_tokens_seen": 2003896, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8990825688073395, |
|
"grad_norm": 0.10409987717866898, |
|
"learning_rate": 4.9746476038786496e-05, |
|
"loss": 0.4451, |
|
"num_input_tokens_seen": 2047632, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.9174311926605505, |
|
"grad_norm": 0.0782993957400322, |
|
"learning_rate": 4.973604096452361e-05, |
|
"loss": 0.3591, |
|
"num_input_tokens_seen": 2096928, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9357798165137615, |
|
"grad_norm": 0.09829951077699661, |
|
"learning_rate": 4.9725396584047925e-05, |
|
"loss": 0.3415, |
|
"num_input_tokens_seen": 2129536, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.9541284403669725, |
|
"grad_norm": 0.10606162995100021, |
|
"learning_rate": 4.971454298742779e-05, |
|
"loss": 0.3758, |
|
"num_input_tokens_seen": 2169144, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.9724770642201835, |
|
"grad_norm": 0.09280356764793396, |
|
"learning_rate": 4.97034802665019e-05, |
|
"loss": 0.485, |
|
"num_input_tokens_seen": 2207720, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.9908256880733946, |
|
"grad_norm": 0.11888203024864197, |
|
"learning_rate": 4.9692208514878444e-05, |
|
"loss": 0.3469, |
|
"num_input_tokens_seen": 2236392, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.13222463428974152, |
|
"learning_rate": 4.9680727827934354e-05, |
|
"loss": 0.4284, |
|
"num_input_tokens_seen": 2259088, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.018348623853211, |
|
"grad_norm": 0.10572745651006699, |
|
"learning_rate": 4.966903830281449e-05, |
|
"loss": 0.4186, |
|
"num_input_tokens_seen": 2298496, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.036697247706422, |
|
"grad_norm": 0.11462350189685822, |
|
"learning_rate": 4.965714003843079e-05, |
|
"loss": 0.4696, |
|
"num_input_tokens_seen": 2333016, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.0550458715596331, |
|
"grad_norm": 0.11215240508317947, |
|
"learning_rate": 4.9645033135461494e-05, |
|
"loss": 0.3905, |
|
"num_input_tokens_seen": 2367992, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.073394495412844, |
|
"grad_norm": 0.0973561555147171, |
|
"learning_rate": 4.963271769635024e-05, |
|
"loss": 0.3588, |
|
"num_input_tokens_seen": 2415328, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.091743119266055, |
|
"grad_norm": 0.10240709036588669, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 0.5532, |
|
"num_input_tokens_seen": 2454792, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.110091743119266, |
|
"grad_norm": 0.0959337130188942, |
|
"learning_rate": 4.9607461628298244e-05, |
|
"loss": 0.331, |
|
"num_input_tokens_seen": 2503072, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.1284403669724772, |
|
"grad_norm": 0.10228750854730606, |
|
"learning_rate": 4.9594521213063974e-05, |
|
"loss": 0.3728, |
|
"num_input_tokens_seen": 2546960, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.146788990825688, |
|
"grad_norm": 0.09403488785028458, |
|
"learning_rate": 4.958137268909887e-05, |
|
"loss": 0.4695, |
|
"num_input_tokens_seen": 2595432, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.165137614678899, |
|
"grad_norm": 0.11396344751119614, |
|
"learning_rate": 4.9568016167660334e-05, |
|
"loss": 0.3653, |
|
"num_input_tokens_seen": 2633912, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.18348623853211, |
|
"grad_norm": 0.09487481415271759, |
|
"learning_rate": 4.9554451761765766e-05, |
|
"loss": 0.3498, |
|
"num_input_tokens_seen": 2680792, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.2018348623853212, |
|
"grad_norm": 0.1249895691871643, |
|
"learning_rate": 4.9540679586191605e-05, |
|
"loss": 0.4053, |
|
"num_input_tokens_seen": 2716584, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.2201834862385321, |
|
"grad_norm": 0.12268221378326416, |
|
"learning_rate": 4.952669975747232e-05, |
|
"loss": 0.4189, |
|
"num_input_tokens_seen": 2757088, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.238532110091743, |
|
"grad_norm": 0.12126032263040543, |
|
"learning_rate": 4.951251239389948e-05, |
|
"loss": 0.4994, |
|
"num_input_tokens_seen": 2795664, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.2568807339449541, |
|
"grad_norm": 0.1069057360291481, |
|
"learning_rate": 4.949811761552074e-05, |
|
"loss": 0.3275, |
|
"num_input_tokens_seen": 2840936, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.2752293577981653, |
|
"grad_norm": 0.10893313586711884, |
|
"learning_rate": 4.948351554413879e-05, |
|
"loss": 0.4366, |
|
"num_input_tokens_seen": 2886768, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.2935779816513762, |
|
"grad_norm": 0.12898756563663483, |
|
"learning_rate": 4.9468706303310355e-05, |
|
"loss": 0.3916, |
|
"num_input_tokens_seen": 2919328, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.311926605504587, |
|
"grad_norm": 0.12405356019735336, |
|
"learning_rate": 4.9453690018345144e-05, |
|
"loss": 0.3249, |
|
"num_input_tokens_seen": 2966744, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.3302752293577982, |
|
"grad_norm": 0.13137595355510712, |
|
"learning_rate": 4.943846681630479e-05, |
|
"loss": 0.3956, |
|
"num_input_tokens_seen": 3007248, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.3486238532110093, |
|
"grad_norm": 0.13920250535011292, |
|
"learning_rate": 4.942303682600178e-05, |
|
"loss": 0.3956, |
|
"num_input_tokens_seen": 3050960, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.3669724770642202, |
|
"grad_norm": 0.1255589872598648, |
|
"learning_rate": 4.940740017799833e-05, |
|
"loss": 0.3773, |
|
"num_input_tokens_seen": 3088592, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.385321100917431, |
|
"grad_norm": 0.10222747176885605, |
|
"learning_rate": 4.939155700460536e-05, |
|
"loss": 0.4, |
|
"num_input_tokens_seen": 3153520, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.4036697247706422, |
|
"grad_norm": 0.13205283880233765, |
|
"learning_rate": 4.9375507439881266e-05, |
|
"loss": 0.4343, |
|
"num_input_tokens_seen": 3199272, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.4220183486238533, |
|
"grad_norm": 0.11005694419145584, |
|
"learning_rate": 4.9359251619630886e-05, |
|
"loss": 0.3881, |
|
"num_input_tokens_seen": 3247128, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.4403669724770642, |
|
"grad_norm": 0.14799247682094574, |
|
"learning_rate": 4.9342789681404275e-05, |
|
"loss": 0.3972, |
|
"num_input_tokens_seen": 3294192, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.4587155963302751, |
|
"grad_norm": 0.1279418021440506, |
|
"learning_rate": 4.9326121764495596e-05, |
|
"loss": 0.3438, |
|
"num_input_tokens_seen": 3329736, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.4770642201834863, |
|
"grad_norm": 0.11036807298660278, |
|
"learning_rate": 4.9309248009941914e-05, |
|
"loss": 0.3189, |
|
"num_input_tokens_seen": 3371376, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.4954128440366974, |
|
"grad_norm": 0.11855707317590714, |
|
"learning_rate": 4.9292168560522014e-05, |
|
"loss": 0.401, |
|
"num_input_tokens_seen": 3412368, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.5137614678899083, |
|
"grad_norm": 0.13195356726646423, |
|
"learning_rate": 4.9274883560755156e-05, |
|
"loss": 0.4973, |
|
"num_input_tokens_seen": 3455000, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.5321100917431192, |
|
"grad_norm": 0.1462787538766861, |
|
"learning_rate": 4.925739315689991e-05, |
|
"loss": 0.3768, |
|
"num_input_tokens_seen": 3488960, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.5504587155963303, |
|
"grad_norm": 0.13765974342823029, |
|
"learning_rate": 4.92396974969529e-05, |
|
"loss": 0.2999, |
|
"num_input_tokens_seen": 3521320, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.5688073394495414, |
|
"grad_norm": 0.15276113152503967, |
|
"learning_rate": 4.9221796730647516e-05, |
|
"loss": 0.3638, |
|
"num_input_tokens_seen": 3559464, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.5871559633027523, |
|
"grad_norm": 0.1441674381494522, |
|
"learning_rate": 4.92036910094527e-05, |
|
"loss": 0.3919, |
|
"num_input_tokens_seen": 3598080, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.6055045871559632, |
|
"grad_norm": 0.1780252456665039, |
|
"learning_rate": 4.9185380486571595e-05, |
|
"loss": 0.3626, |
|
"num_input_tokens_seen": 3630064, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.6238532110091743, |
|
"grad_norm": 0.16947726905345917, |
|
"learning_rate": 4.916686531694035e-05, |
|
"loss": 0.3439, |
|
"num_input_tokens_seen": 3661408, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.6422018348623855, |
|
"grad_norm": 0.1552971601486206, |
|
"learning_rate": 4.914814565722671e-05, |
|
"loss": 0.3236, |
|
"num_input_tokens_seen": 3695480, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6605504587155964, |
|
"grad_norm": 0.14925938844680786, |
|
"learning_rate": 4.912922166582874e-05, |
|
"loss": 0.4255, |
|
"num_input_tokens_seen": 3734560, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.6788990825688073, |
|
"grad_norm": 0.1332874596118927, |
|
"learning_rate": 4.9110093502873476e-05, |
|
"loss": 0.3061, |
|
"num_input_tokens_seen": 3773112, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.6972477064220184, |
|
"grad_norm": 0.15471243858337402, |
|
"learning_rate": 4.909076133021557e-05, |
|
"loss": 0.3275, |
|
"num_input_tokens_seen": 3813392, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.7155963302752295, |
|
"grad_norm": 0.16010524332523346, |
|
"learning_rate": 4.907122531143594e-05, |
|
"loss": 0.4179, |
|
"num_input_tokens_seen": 3856416, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.7339449541284404, |
|
"grad_norm": 0.13423003256320953, |
|
"learning_rate": 4.905148561184033e-05, |
|
"loss": 0.3593, |
|
"num_input_tokens_seen": 3899472, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.7522935779816513, |
|
"grad_norm": 0.14900773763656616, |
|
"learning_rate": 4.9031542398457974e-05, |
|
"loss": 0.5007, |
|
"num_input_tokens_seen": 3962976, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.7706422018348624, |
|
"grad_norm": 0.15728624165058136, |
|
"learning_rate": 4.9011395840040144e-05, |
|
"loss": 0.3484, |
|
"num_input_tokens_seen": 4000696, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.7889908256880735, |
|
"grad_norm": 0.11092367768287659, |
|
"learning_rate": 4.8991046107058735e-05, |
|
"loss": 0.2889, |
|
"num_input_tokens_seen": 4045256, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.8073394495412844, |
|
"grad_norm": 0.1289113610982895, |
|
"learning_rate": 4.8970493371704826e-05, |
|
"loss": 0.2203, |
|
"num_input_tokens_seen": 4076800, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.8256880733944953, |
|
"grad_norm": 0.18886639177799225, |
|
"learning_rate": 4.894973780788722e-05, |
|
"loss": 0.3966, |
|
"num_input_tokens_seen": 4119840, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8440366972477065, |
|
"grad_norm": 0.1563039869070053, |
|
"learning_rate": 4.892877959123097e-05, |
|
"loss": 0.4417, |
|
"num_input_tokens_seen": 4165848, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.8623853211009176, |
|
"grad_norm": 0.16883380711078644, |
|
"learning_rate": 4.890761889907589e-05, |
|
"loss": 0.4258, |
|
"num_input_tokens_seen": 4202824, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.8807339449541285, |
|
"grad_norm": 0.18241995573043823, |
|
"learning_rate": 4.8886255910475054e-05, |
|
"loss": 0.3952, |
|
"num_input_tokens_seen": 4233888, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.8990825688073394, |
|
"grad_norm": 0.19913265109062195, |
|
"learning_rate": 4.88646908061933e-05, |
|
"loss": 0.3241, |
|
"num_input_tokens_seen": 4267064, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.9174311926605505, |
|
"grad_norm": 0.18295545876026154, |
|
"learning_rate": 4.884292376870567e-05, |
|
"loss": 0.4239, |
|
"num_input_tokens_seen": 4312536, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.9357798165137616, |
|
"grad_norm": 0.16657495498657227, |
|
"learning_rate": 4.8820954982195905e-05, |
|
"loss": 0.2579, |
|
"num_input_tokens_seen": 4356656, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.9541284403669725, |
|
"grad_norm": 0.18504932522773743, |
|
"learning_rate": 4.879878463255483e-05, |
|
"loss": 0.44, |
|
"num_input_tokens_seen": 4400216, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.9724770642201834, |
|
"grad_norm": 0.1923118382692337, |
|
"learning_rate": 4.877641290737884e-05, |
|
"loss": 0.2662, |
|
"num_input_tokens_seen": 4436968, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.9908256880733946, |
|
"grad_norm": 0.19636788964271545, |
|
"learning_rate": 4.875383999596828e-05, |
|
"loss": 0.4211, |
|
"num_input_tokens_seen": 4488232, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.3168099820613861, |
|
"learning_rate": 4.873106608932585e-05, |
|
"loss": 0.2499, |
|
"num_input_tokens_seen": 4518176, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.018348623853211, |
|
"grad_norm": 0.14410308003425598, |
|
"learning_rate": 4.8708091380154984e-05, |
|
"loss": 0.2722, |
|
"num_input_tokens_seen": 4570896, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.036697247706422, |
|
"grad_norm": 0.17840909957885742, |
|
"learning_rate": 4.868491606285823e-05, |
|
"loss": 0.2758, |
|
"num_input_tokens_seen": 4613576, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.055045871559633, |
|
"grad_norm": 0.178523987531662, |
|
"learning_rate": 4.866154033353561e-05, |
|
"loss": 0.3361, |
|
"num_input_tokens_seen": 4652896, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 2.073394495412844, |
|
"grad_norm": 0.17396725714206696, |
|
"learning_rate": 4.8637964389982926e-05, |
|
"loss": 0.2667, |
|
"num_input_tokens_seen": 4694256, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.091743119266055, |
|
"grad_norm": 0.19471587240695953, |
|
"learning_rate": 4.8614188431690125e-05, |
|
"loss": 0.3628, |
|
"num_input_tokens_seen": 4747552, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.1100917431192663, |
|
"grad_norm": 0.1722450852394104, |
|
"learning_rate": 4.859021265983959e-05, |
|
"loss": 0.3599, |
|
"num_input_tokens_seen": 4794080, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 2.128440366972477, |
|
"grad_norm": 0.20137006044387817, |
|
"learning_rate": 4.856603727730447e-05, |
|
"loss": 0.4262, |
|
"num_input_tokens_seen": 4847912, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 2.146788990825688, |
|
"grad_norm": 0.19395771622657776, |
|
"learning_rate": 4.854166248864689e-05, |
|
"loss": 0.3118, |
|
"num_input_tokens_seen": 4885480, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 2.165137614678899, |
|
"grad_norm": 0.209548681974411, |
|
"learning_rate": 4.85170885001163e-05, |
|
"loss": 0.3725, |
|
"num_input_tokens_seen": 4921240, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 2.18348623853211, |
|
"grad_norm": 0.18228279054164886, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 0.3816, |
|
"num_input_tokens_seen": 4960224, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.2018348623853212, |
|
"grad_norm": 0.24349354207515717, |
|
"learning_rate": 4.846734375685989e-05, |
|
"loss": 0.3383, |
|
"num_input_tokens_seen": 4990536, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 2.220183486238532, |
|
"grad_norm": 0.17600344121456146, |
|
"learning_rate": 4.844217342305363e-05, |
|
"loss": 0.3011, |
|
"num_input_tokens_seen": 5044296, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 2.238532110091743, |
|
"grad_norm": 0.18766675889492035, |
|
"learning_rate": 4.8416804731209945e-05, |
|
"loss": 0.4458, |
|
"num_input_tokens_seen": 5088368, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 2.2568807339449544, |
|
"grad_norm": 0.17657820880413055, |
|
"learning_rate": 4.839123789598829e-05, |
|
"loss": 0.2564, |
|
"num_input_tokens_seen": 5133472, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 2.2752293577981653, |
|
"grad_norm": 0.20606014132499695, |
|
"learning_rate": 4.836547313372471e-05, |
|
"loss": 0.313, |
|
"num_input_tokens_seen": 5167768, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.293577981651376, |
|
"grad_norm": 0.23511061072349548, |
|
"learning_rate": 4.8339510662430046e-05, |
|
"loss": 0.2963, |
|
"num_input_tokens_seen": 5209400, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.311926605504587, |
|
"grad_norm": 0.18234293162822723, |
|
"learning_rate": 4.8313350701788054e-05, |
|
"loss": 0.2566, |
|
"num_input_tokens_seen": 5249360, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 2.330275229357798, |
|
"grad_norm": 0.2223992496728897, |
|
"learning_rate": 4.828699347315356e-05, |
|
"loss": 0.2833, |
|
"num_input_tokens_seen": 5300808, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 2.3486238532110093, |
|
"grad_norm": 0.23101739585399628, |
|
"learning_rate": 4.826043919955062e-05, |
|
"loss": 0.3099, |
|
"num_input_tokens_seen": 5332960, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 2.36697247706422, |
|
"grad_norm": 0.26640889048576355, |
|
"learning_rate": 4.823368810567056e-05, |
|
"loss": 0.3238, |
|
"num_input_tokens_seen": 5365008, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.385321100917431, |
|
"grad_norm": 0.2374572902917862, |
|
"learning_rate": 4.820674041787017e-05, |
|
"loss": 0.3153, |
|
"num_input_tokens_seen": 5400184, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 2.4036697247706424, |
|
"grad_norm": 0.22812288999557495, |
|
"learning_rate": 4.817959636416969e-05, |
|
"loss": 0.2997, |
|
"num_input_tokens_seen": 5440320, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 2.4220183486238533, |
|
"grad_norm": 0.20079149305820465, |
|
"learning_rate": 4.815225617425095e-05, |
|
"loss": 0.2373, |
|
"num_input_tokens_seen": 5480832, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 2.4403669724770642, |
|
"grad_norm": 0.196709543466568, |
|
"learning_rate": 4.81247200794554e-05, |
|
"loss": 0.2456, |
|
"num_input_tokens_seen": 5526936, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 2.458715596330275, |
|
"grad_norm": 0.17305873334407806, |
|
"learning_rate": 4.8096988312782174e-05, |
|
"loss": 0.2099, |
|
"num_input_tokens_seen": 5566384, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.477064220183486, |
|
"grad_norm": 3.584635019302368, |
|
"learning_rate": 4.806906110888606e-05, |
|
"loss": 0.3485, |
|
"num_input_tokens_seen": 5629896, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 2.4954128440366974, |
|
"grad_norm": 0.23481500148773193, |
|
"learning_rate": 4.80409387040756e-05, |
|
"loss": 0.2231, |
|
"num_input_tokens_seen": 5674504, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 2.5137614678899083, |
|
"grad_norm": 0.27899855375289917, |
|
"learning_rate": 4.8012621336311016e-05, |
|
"loss": 0.4285, |
|
"num_input_tokens_seen": 5714000, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 2.532110091743119, |
|
"grad_norm": 0.24404938519001007, |
|
"learning_rate": 4.798410924520223e-05, |
|
"loss": 0.3343, |
|
"num_input_tokens_seen": 5756856, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 2.5504587155963305, |
|
"grad_norm": 0.26869162917137146, |
|
"learning_rate": 4.7955402672006854e-05, |
|
"loss": 0.2497, |
|
"num_input_tokens_seen": 5781192, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.5688073394495414, |
|
"grad_norm": 0.2057972550392151, |
|
"learning_rate": 4.79265018596281e-05, |
|
"loss": 0.2991, |
|
"num_input_tokens_seen": 5824024, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 2.5871559633027523, |
|
"grad_norm": 0.2184937596321106, |
|
"learning_rate": 4.789740705261278e-05, |
|
"loss": 0.2406, |
|
"num_input_tokens_seen": 5862584, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 2.6055045871559632, |
|
"grad_norm": 0.23603741824626923, |
|
"learning_rate": 4.786811849714918e-05, |
|
"loss": 0.2722, |
|
"num_input_tokens_seen": 5897344, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 2.623853211009174, |
|
"grad_norm": 0.22983981668949127, |
|
"learning_rate": 4.783863644106502e-05, |
|
"loss": 0.374, |
|
"num_input_tokens_seen": 5931736, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 2.6422018348623855, |
|
"grad_norm": 0.2825419306755066, |
|
"learning_rate": 4.780896113382536e-05, |
|
"loss": 0.3386, |
|
"num_input_tokens_seen": 5972784, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.6605504587155964, |
|
"grad_norm": 0.4502134621143341, |
|
"learning_rate": 4.777909282653042e-05, |
|
"loss": 0.2289, |
|
"num_input_tokens_seen": 6018968, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 2.6788990825688073, |
|
"grad_norm": 0.2428288459777832, |
|
"learning_rate": 4.7749031771913584e-05, |
|
"loss": 0.4061, |
|
"num_input_tokens_seen": 6062520, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 2.6972477064220186, |
|
"grad_norm": 0.2685629725456238, |
|
"learning_rate": 4.771877822433911e-05, |
|
"loss": 0.2198, |
|
"num_input_tokens_seen": 6087928, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 2.7155963302752295, |
|
"grad_norm": 0.24021446704864502, |
|
"learning_rate": 4.7688332439800096e-05, |
|
"loss": 0.34, |
|
"num_input_tokens_seen": 6134792, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 2.7339449541284404, |
|
"grad_norm": 0.2568534314632416, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 0.3292, |
|
"num_input_tokens_seen": 6183296, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.7522935779816513, |
|
"grad_norm": 0.20823974907398224, |
|
"learning_rate": 4.762686519193175e-05, |
|
"loss": 0.2539, |
|
"num_input_tokens_seen": 6225840, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 2.770642201834862, |
|
"grad_norm": 0.23333317041397095, |
|
"learning_rate": 4.759584424871302e-05, |
|
"loss": 0.3571, |
|
"num_input_tokens_seen": 6274760, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 2.7889908256880735, |
|
"grad_norm": 0.20232398808002472, |
|
"learning_rate": 4.756463210874652e-05, |
|
"loss": 0.2783, |
|
"num_input_tokens_seen": 6326168, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 2.8073394495412844, |
|
"grad_norm": 0.3479433059692383, |
|
"learning_rate": 4.7533229036136553e-05, |
|
"loss": 0.2925, |
|
"num_input_tokens_seen": 6360312, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 2.8256880733944953, |
|
"grad_norm": 0.2659524083137512, |
|
"learning_rate": 4.750163529660303e-05, |
|
"loss": 0.2606, |
|
"num_input_tokens_seen": 6395496, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.8440366972477067, |
|
"grad_norm": 0.24823158979415894, |
|
"learning_rate": 4.7469851157479177e-05, |
|
"loss": 0.3721, |
|
"num_input_tokens_seen": 6437064, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 2.8623853211009176, |
|
"grad_norm": 0.32034072279930115, |
|
"learning_rate": 4.743787688770932e-05, |
|
"loss": 0.3931, |
|
"num_input_tokens_seen": 6477616, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 2.8807339449541285, |
|
"grad_norm": 0.23295725882053375, |
|
"learning_rate": 4.740571275784659e-05, |
|
"loss": 0.2201, |
|
"num_input_tokens_seen": 6518680, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 2.8990825688073394, |
|
"grad_norm": 0.2758423984050751, |
|
"learning_rate": 4.737335904005063e-05, |
|
"loss": 0.2579, |
|
"num_input_tokens_seen": 6549768, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 2.9174311926605503, |
|
"grad_norm": 0.26690953969955444, |
|
"learning_rate": 4.734081600808531e-05, |
|
"loss": 0.2575, |
|
"num_input_tokens_seen": 6581000, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.9357798165137616, |
|
"grad_norm": 0.26657482981681824, |
|
"learning_rate": 4.730808393731639e-05, |
|
"loss": 0.2597, |
|
"num_input_tokens_seen": 6612632, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 2.9541284403669725, |
|
"grad_norm": 0.22647295892238617, |
|
"learning_rate": 4.72751631047092e-05, |
|
"loss": 0.3335, |
|
"num_input_tokens_seen": 6654288, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 2.9724770642201834, |
|
"grad_norm": 0.2863366901874542, |
|
"learning_rate": 4.72420537888263e-05, |
|
"loss": 0.374, |
|
"num_input_tokens_seen": 6707208, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 2.9908256880733948, |
|
"grad_norm": 0.2606408894062042, |
|
"learning_rate": 4.7208756269825104e-05, |
|
"loss": 0.3477, |
|
"num_input_tokens_seen": 6748448, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.440924733877182, |
|
"learning_rate": 4.717527082945554e-05, |
|
"loss": 0.3214, |
|
"num_input_tokens_seen": 6777264, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 3.018348623853211, |
|
"grad_norm": 0.27583903074264526, |
|
"learning_rate": 4.714159775105765e-05, |
|
"loss": 0.2681, |
|
"num_input_tokens_seen": 6809456, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 3.036697247706422, |
|
"grad_norm": 0.2995987832546234, |
|
"learning_rate": 4.7107737319559176e-05, |
|
"loss": 0.2633, |
|
"num_input_tokens_seen": 6845768, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 3.055045871559633, |
|
"grad_norm": 0.23999951779842377, |
|
"learning_rate": 4.707368982147318e-05, |
|
"loss": 0.1961, |
|
"num_input_tokens_seen": 6893056, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 3.073394495412844, |
|
"grad_norm": 0.23356525599956512, |
|
"learning_rate": 4.703945554489558e-05, |
|
"loss": 0.2836, |
|
"num_input_tokens_seen": 6932480, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 3.091743119266055, |
|
"grad_norm": 0.29919493198394775, |
|
"learning_rate": 4.700503477950278e-05, |
|
"loss": 0.2838, |
|
"num_input_tokens_seen": 6975992, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.1100917431192663, |
|
"grad_norm": 0.3350690007209778, |
|
"learning_rate": 4.697042781654913e-05, |
|
"loss": 0.3489, |
|
"num_input_tokens_seen": 7021840, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 3.128440366972477, |
|
"grad_norm": 0.2837466895580292, |
|
"learning_rate": 4.693563494886455e-05, |
|
"loss": 0.3797, |
|
"num_input_tokens_seen": 7065192, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 3.146788990825688, |
|
"grad_norm": 0.24601787328720093, |
|
"learning_rate": 4.6900656470851964e-05, |
|
"loss": 0.2046, |
|
"num_input_tokens_seen": 7114544, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 3.165137614678899, |
|
"grad_norm": 0.32290250062942505, |
|
"learning_rate": 4.6865492678484895e-05, |
|
"loss": 0.2596, |
|
"num_input_tokens_seen": 7152736, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 3.18348623853211, |
|
"grad_norm": 0.33591920137405396, |
|
"learning_rate": 4.68301438693049e-05, |
|
"loss": 0.3045, |
|
"num_input_tokens_seen": 7207464, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 3.2018348623853212, |
|
"grad_norm": 0.25471043586730957, |
|
"learning_rate": 4.679461034241906e-05, |
|
"loss": 0.2096, |
|
"num_input_tokens_seen": 7238640, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 3.220183486238532, |
|
"grad_norm": 0.31238994002342224, |
|
"learning_rate": 4.6758892398497494e-05, |
|
"loss": 0.2226, |
|
"num_input_tokens_seen": 7279112, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 3.238532110091743, |
|
"grad_norm": 0.35679712891578674, |
|
"learning_rate": 4.672299033977076e-05, |
|
"loss": 0.2403, |
|
"num_input_tokens_seen": 7311632, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 3.2568807339449544, |
|
"grad_norm": 0.326914519071579, |
|
"learning_rate": 4.6686904470027316e-05, |
|
"loss": 0.2156, |
|
"num_input_tokens_seen": 7344864, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 3.2752293577981653, |
|
"grad_norm": 0.3293381929397583, |
|
"learning_rate": 4.665063509461097e-05, |
|
"loss": 0.238, |
|
"num_input_tokens_seen": 7389944, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.293577981651376, |
|
"grad_norm": 0.3313307762145996, |
|
"learning_rate": 4.661418252041827e-05, |
|
"loss": 0.2251, |
|
"num_input_tokens_seen": 7423672, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 3.311926605504587, |
|
"grad_norm": 0.3328595459461212, |
|
"learning_rate": 4.657754705589591e-05, |
|
"loss": 0.2922, |
|
"num_input_tokens_seen": 7459576, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 3.330275229357798, |
|
"grad_norm": 0.2721710801124573, |
|
"learning_rate": 4.6540729011038146e-05, |
|
"loss": 0.2698, |
|
"num_input_tokens_seen": 7511736, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 3.3486238532110093, |
|
"grad_norm": 0.2488890290260315, |
|
"learning_rate": 4.650372869738414e-05, |
|
"loss": 0.173, |
|
"num_input_tokens_seen": 7558552, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 3.36697247706422, |
|
"grad_norm": 0.3800615668296814, |
|
"learning_rate": 4.6466546428015336e-05, |
|
"loss": 0.32, |
|
"num_input_tokens_seen": 7599040, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 3.385321100917431, |
|
"grad_norm": 0.3377014100551605, |
|
"learning_rate": 4.642918251755281e-05, |
|
"loss": 0.3058, |
|
"num_input_tokens_seen": 7653264, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 3.4036697247706424, |
|
"grad_norm": 0.25239789485931396, |
|
"learning_rate": 4.639163728215463e-05, |
|
"loss": 0.1896, |
|
"num_input_tokens_seen": 7694272, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 3.4220183486238533, |
|
"grad_norm": 0.34607502818107605, |
|
"learning_rate": 4.6353911039513145e-05, |
|
"loss": 0.2933, |
|
"num_input_tokens_seen": 7730848, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 3.4403669724770642, |
|
"grad_norm": 0.30653324723243713, |
|
"learning_rate": 4.6316004108852305e-05, |
|
"loss": 0.2625, |
|
"num_input_tokens_seen": 7781200, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 3.458715596330275, |
|
"grad_norm": 0.2943236231803894, |
|
"learning_rate": 4.627791681092499e-05, |
|
"loss": 0.3372, |
|
"num_input_tokens_seen": 7825032, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.477064220183486, |
|
"grad_norm": 0.30080685019493103, |
|
"learning_rate": 4.623964946801027e-05, |
|
"loss": 0.2229, |
|
"num_input_tokens_seen": 7855840, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 3.4954128440366974, |
|
"grad_norm": 0.3511403799057007, |
|
"learning_rate": 4.620120240391065e-05, |
|
"loss": 0.3967, |
|
"num_input_tokens_seen": 7905928, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 3.5137614678899083, |
|
"grad_norm": 0.273583322763443, |
|
"learning_rate": 4.61625759439494e-05, |
|
"loss": 0.2254, |
|
"num_input_tokens_seen": 7955992, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 3.532110091743119, |
|
"grad_norm": 0.3457902669906616, |
|
"learning_rate": 4.612377041496776e-05, |
|
"loss": 0.2553, |
|
"num_input_tokens_seen": 7998024, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 3.5504587155963305, |
|
"grad_norm": 0.31968954205513, |
|
"learning_rate": 4.608478614532215e-05, |
|
"loss": 0.2197, |
|
"num_input_tokens_seen": 8055672, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 3.5688073394495414, |
|
"grad_norm": 0.34753403067588806, |
|
"learning_rate": 4.604562346488144e-05, |
|
"loss": 0.2507, |
|
"num_input_tokens_seen": 8090848, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 3.5871559633027523, |
|
"grad_norm": 0.3808669149875641, |
|
"learning_rate": 4.6006282705024144e-05, |
|
"loss": 0.2422, |
|
"num_input_tokens_seen": 8136680, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 3.6055045871559632, |
|
"grad_norm": 0.3004499673843384, |
|
"learning_rate": 4.5966764198635606e-05, |
|
"loss": 0.2107, |
|
"num_input_tokens_seen": 8187472, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 3.623853211009174, |
|
"grad_norm": 0.30718186497688293, |
|
"learning_rate": 4.592706828010518e-05, |
|
"loss": 0.1854, |
|
"num_input_tokens_seen": 8225216, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 3.6422018348623855, |
|
"grad_norm": 0.23112858831882477, |
|
"learning_rate": 4.588719528532342e-05, |
|
"loss": 0.1687, |
|
"num_input_tokens_seen": 8274456, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.6605504587155964, |
|
"grad_norm": 0.25966888666152954, |
|
"learning_rate": 4.5847145551679206e-05, |
|
"loss": 0.2549, |
|
"num_input_tokens_seen": 8317016, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 3.6788990825688073, |
|
"grad_norm": 0.25600987672805786, |
|
"learning_rate": 4.580691941805695e-05, |
|
"loss": 0.1602, |
|
"num_input_tokens_seen": 8361856, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 3.6972477064220186, |
|
"grad_norm": 0.33986184000968933, |
|
"learning_rate": 4.5766517224833637e-05, |
|
"loss": 0.2495, |
|
"num_input_tokens_seen": 8410696, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 3.7155963302752295, |
|
"grad_norm": 0.36899781227111816, |
|
"learning_rate": 4.572593931387604e-05, |
|
"loss": 0.2012, |
|
"num_input_tokens_seen": 8441872, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 3.7339449541284404, |
|
"grad_norm": 0.42072632908821106, |
|
"learning_rate": 4.568518602853776e-05, |
|
"loss": 0.2373, |
|
"num_input_tokens_seen": 8482544, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 3.7522935779816513, |
|
"grad_norm": 0.40593233704566956, |
|
"learning_rate": 4.5644257713656356e-05, |
|
"loss": 0.233, |
|
"num_input_tokens_seen": 8519856, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 3.770642201834862, |
|
"grad_norm": 0.38003161549568176, |
|
"learning_rate": 4.5603154715550386e-05, |
|
"loss": 0.225, |
|
"num_input_tokens_seen": 8551392, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 3.7889908256880735, |
|
"grad_norm": 0.2564244568347931, |
|
"learning_rate": 4.556187738201656e-05, |
|
"loss": 0.2975, |
|
"num_input_tokens_seen": 8599472, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 3.8073394495412844, |
|
"grad_norm": 0.29023391008377075, |
|
"learning_rate": 4.552042606232668e-05, |
|
"loss": 0.2033, |
|
"num_input_tokens_seen": 8631880, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 3.8256880733944953, |
|
"grad_norm": 0.32886001467704773, |
|
"learning_rate": 4.54788011072248e-05, |
|
"loss": 0.2024, |
|
"num_input_tokens_seen": 8675016, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.8440366972477067, |
|
"grad_norm": 0.36749884486198425, |
|
"learning_rate": 4.5437002868924166e-05, |
|
"loss": 0.2304, |
|
"num_input_tokens_seen": 8713248, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 3.8623853211009176, |
|
"grad_norm": 0.3055097758769989, |
|
"learning_rate": 4.539503170110431e-05, |
|
"loss": 0.2928, |
|
"num_input_tokens_seen": 8748800, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 3.8807339449541285, |
|
"grad_norm": 0.38436686992645264, |
|
"learning_rate": 4.535288795890798e-05, |
|
"loss": 0.2214, |
|
"num_input_tokens_seen": 8787832, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 3.8990825688073394, |
|
"grad_norm": 0.44330883026123047, |
|
"learning_rate": 4.531057199893824e-05, |
|
"loss": 0.2168, |
|
"num_input_tokens_seen": 8819616, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 3.9174311926605503, |
|
"grad_norm": 0.28318527340888977, |
|
"learning_rate": 4.526808417925531e-05, |
|
"loss": 0.279, |
|
"num_input_tokens_seen": 8860744, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 3.9357798165137616, |
|
"grad_norm": 0.3287319839000702, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 0.2597, |
|
"num_input_tokens_seen": 8906432, |
|
"step": 216 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 1080, |
|
"num_input_tokens_seen": 8906432, |
|
"num_train_epochs": 20, |
|
"save_steps": 54, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.513615025265705e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|