|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9636552440290758, |
|
"eval_steps": 500, |
|
"global_step": 180, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016614745586708203, |
|
"grad_norm": 0.061364494264125824, |
|
"learning_rate": 4.999991432639962e-05, |
|
"loss": 0.5857, |
|
"num_input_tokens_seen": 70408, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.033229491173416406, |
|
"grad_norm": 0.0613991804420948, |
|
"learning_rate": 4.999965730618567e-05, |
|
"loss": 0.5331, |
|
"num_input_tokens_seen": 139640, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04984423676012461, |
|
"grad_norm": 0.06351307034492493, |
|
"learning_rate": 4.9999228941119745e-05, |
|
"loss": 0.5852, |
|
"num_input_tokens_seen": 223656, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06645898234683281, |
|
"grad_norm": 0.05762802064418793, |
|
"learning_rate": 4.999862923413781e-05, |
|
"loss": 0.5384, |
|
"num_input_tokens_seen": 300688, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.08307372793354102, |
|
"grad_norm": 0.0632179006934166, |
|
"learning_rate": 4.999785818935018e-05, |
|
"loss": 0.5273, |
|
"num_input_tokens_seen": 366368, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09968847352024922, |
|
"grad_norm": 0.056689903140068054, |
|
"learning_rate": 4.999691581204152e-05, |
|
"loss": 0.5145, |
|
"num_input_tokens_seen": 445808, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.11630321910695743, |
|
"grad_norm": 0.06574171781539917, |
|
"learning_rate": 4.9995802108670775e-05, |
|
"loss": 0.5301, |
|
"num_input_tokens_seen": 522800, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.13291796469366562, |
|
"grad_norm": 0.06367070972919464, |
|
"learning_rate": 4.999451708687114e-05, |
|
"loss": 0.5552, |
|
"num_input_tokens_seen": 599608, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.14953271028037382, |
|
"grad_norm": 0.0585966520011425, |
|
"learning_rate": 4.9993060755450015e-05, |
|
"loss": 0.5999, |
|
"num_input_tokens_seen": 681424, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.16614745586708204, |
|
"grad_norm": 0.05650574713945389, |
|
"learning_rate": 4.999143312438893e-05, |
|
"loss": 0.4535, |
|
"num_input_tokens_seen": 756744, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18276220145379024, |
|
"grad_norm": 0.05954223498702049, |
|
"learning_rate": 4.998963420484349e-05, |
|
"loss": 0.4674, |
|
"num_input_tokens_seen": 842576, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.19937694704049844, |
|
"grad_norm": 0.0663776770234108, |
|
"learning_rate": 4.998766400914329e-05, |
|
"loss": 0.4703, |
|
"num_input_tokens_seen": 917232, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.21599169262720663, |
|
"grad_norm": 0.056374579668045044, |
|
"learning_rate": 4.9985522550791825e-05, |
|
"loss": 0.3725, |
|
"num_input_tokens_seen": 1006800, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.23260643821391486, |
|
"grad_norm": 0.06437493115663528, |
|
"learning_rate": 4.998320984446641e-05, |
|
"loss": 0.4653, |
|
"num_input_tokens_seen": 1085824, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.24922118380062305, |
|
"grad_norm": 0.06560757756233215, |
|
"learning_rate": 4.9980725906018074e-05, |
|
"loss": 0.5026, |
|
"num_input_tokens_seen": 1164160, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.26583592938733125, |
|
"grad_norm": 0.06942517310380936, |
|
"learning_rate": 4.997807075247146e-05, |
|
"loss": 0.5401, |
|
"num_input_tokens_seen": 1242264, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2824506749740395, |
|
"grad_norm": 0.06349828094244003, |
|
"learning_rate": 4.997524440202469e-05, |
|
"loss": 0.4713, |
|
"num_input_tokens_seen": 1325904, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.29906542056074764, |
|
"grad_norm": 0.08846385776996613, |
|
"learning_rate": 4.9972246874049254e-05, |
|
"loss": 0.5834, |
|
"num_input_tokens_seen": 1385632, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.31568016614745587, |
|
"grad_norm": 0.062130190432071686, |
|
"learning_rate": 4.996907818908987e-05, |
|
"loss": 0.4045, |
|
"num_input_tokens_seen": 1470632, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.3322949117341641, |
|
"grad_norm": 0.07743565738201141, |
|
"learning_rate": 4.996573836886435e-05, |
|
"loss": 0.5283, |
|
"num_input_tokens_seen": 1547536, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.34890965732087226, |
|
"grad_norm": 0.06756695359945297, |
|
"learning_rate": 4.9962227436263453e-05, |
|
"loss": 0.4199, |
|
"num_input_tokens_seen": 1615528, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.3655244029075805, |
|
"grad_norm": 0.08662309497594833, |
|
"learning_rate": 4.995854541535071e-05, |
|
"loss": 0.4775, |
|
"num_input_tokens_seen": 1694352, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3821391484942887, |
|
"grad_norm": 0.08380820602178574, |
|
"learning_rate": 4.9954692331362294e-05, |
|
"loss": 0.4871, |
|
"num_input_tokens_seen": 1753776, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3987538940809969, |
|
"grad_norm": 0.09967435896396637, |
|
"learning_rate": 4.995066821070679e-05, |
|
"loss": 0.4871, |
|
"num_input_tokens_seen": 1809048, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4153686396677051, |
|
"grad_norm": 0.0871267095208168, |
|
"learning_rate": 4.994647308096509e-05, |
|
"loss": 0.5461, |
|
"num_input_tokens_seen": 1884264, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.43198338525441327, |
|
"grad_norm": 0.065020851790905, |
|
"learning_rate": 4.994210697089014e-05, |
|
"loss": 0.405, |
|
"num_input_tokens_seen": 1981704, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.4485981308411215, |
|
"grad_norm": 0.09853450953960419, |
|
"learning_rate": 4.9937569910406756e-05, |
|
"loss": 0.4487, |
|
"num_input_tokens_seen": 2044144, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4652128764278297, |
|
"grad_norm": 0.08763110637664795, |
|
"learning_rate": 4.9932861930611454e-05, |
|
"loss": 0.3946, |
|
"num_input_tokens_seen": 2107584, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.4818276220145379, |
|
"grad_norm": 0.08950547873973846, |
|
"learning_rate": 4.9927983063772196e-05, |
|
"loss": 0.4257, |
|
"num_input_tokens_seen": 2169248, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.4984423676012461, |
|
"grad_norm": 0.09980211406946182, |
|
"learning_rate": 4.99229333433282e-05, |
|
"loss": 0.3911, |
|
"num_input_tokens_seen": 2230344, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5150571131879543, |
|
"grad_norm": 0.092055544257164, |
|
"learning_rate": 4.9917712803889674e-05, |
|
"loss": 0.3749, |
|
"num_input_tokens_seen": 2302368, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5316718587746625, |
|
"grad_norm": 0.10067818313837051, |
|
"learning_rate": 4.991232148123761e-05, |
|
"loss": 0.4761, |
|
"num_input_tokens_seen": 2369984, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.5482866043613707, |
|
"grad_norm": 0.0717971920967102, |
|
"learning_rate": 4.990675941232353e-05, |
|
"loss": 0.4328, |
|
"num_input_tokens_seen": 2453032, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.564901349948079, |
|
"grad_norm": 0.07436250895261765, |
|
"learning_rate": 4.990102663526924e-05, |
|
"loss": 0.417, |
|
"num_input_tokens_seen": 2527464, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.5815160955347871, |
|
"grad_norm": 0.09256689995527267, |
|
"learning_rate": 4.989512318936655e-05, |
|
"loss": 0.4097, |
|
"num_input_tokens_seen": 2597032, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5981308411214953, |
|
"grad_norm": 0.09964177012443542, |
|
"learning_rate": 4.9889049115077005e-05, |
|
"loss": 0.4065, |
|
"num_input_tokens_seen": 2671704, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6147455867082036, |
|
"grad_norm": 0.06627887487411499, |
|
"learning_rate": 4.988280445403164e-05, |
|
"loss": 0.4136, |
|
"num_input_tokens_seen": 2767640, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6313603322949117, |
|
"grad_norm": 0.0746045857667923, |
|
"learning_rate": 4.987638924903067e-05, |
|
"loss": 0.4125, |
|
"num_input_tokens_seen": 2843720, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.6479750778816199, |
|
"grad_norm": 0.0795741006731987, |
|
"learning_rate": 4.9869803544043166e-05, |
|
"loss": 0.3135, |
|
"num_input_tokens_seen": 2921472, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6645898234683282, |
|
"grad_norm": 0.08914181590080261, |
|
"learning_rate": 4.9863047384206835e-05, |
|
"loss": 0.4549, |
|
"num_input_tokens_seen": 2998400, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6812045690550363, |
|
"grad_norm": 0.11220043897628784, |
|
"learning_rate": 4.985612081582764e-05, |
|
"loss": 0.4135, |
|
"num_input_tokens_seen": 3059648, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.6978193146417445, |
|
"grad_norm": 0.08390027284622192, |
|
"learning_rate": 4.98490238863795e-05, |
|
"loss": 0.3538, |
|
"num_input_tokens_seen": 3140184, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7144340602284528, |
|
"grad_norm": 0.08858532458543777, |
|
"learning_rate": 4.984175664450397e-05, |
|
"loss": 0.3644, |
|
"num_input_tokens_seen": 3207184, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.731048805815161, |
|
"grad_norm": 0.07439564168453217, |
|
"learning_rate": 4.983431914000991e-05, |
|
"loss": 0.4019, |
|
"num_input_tokens_seen": 3292344, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 0.08694300055503845, |
|
"learning_rate": 4.982671142387316e-05, |
|
"loss": 0.4238, |
|
"num_input_tokens_seen": 3365384, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7642782969885774, |
|
"grad_norm": 0.0867784395813942, |
|
"learning_rate": 4.981893354823614e-05, |
|
"loss": 0.3702, |
|
"num_input_tokens_seen": 3440720, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.7808930425752856, |
|
"grad_norm": 0.06278439611196518, |
|
"learning_rate": 4.9810985566407544e-05, |
|
"loss": 0.3354, |
|
"num_input_tokens_seen": 3533576, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.7975077881619937, |
|
"grad_norm": 0.08999717980623245, |
|
"learning_rate": 4.980286753286195e-05, |
|
"loss": 0.4981, |
|
"num_input_tokens_seen": 3599744, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.814122533748702, |
|
"grad_norm": 0.07938859611749649, |
|
"learning_rate": 4.979457950323945e-05, |
|
"loss": 0.4016, |
|
"num_input_tokens_seen": 3689520, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.8307372793354102, |
|
"grad_norm": 0.1045590192079544, |
|
"learning_rate": 4.9786121534345265e-05, |
|
"loss": 0.388, |
|
"num_input_tokens_seen": 3751808, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8473520249221184, |
|
"grad_norm": 0.07890618592500687, |
|
"learning_rate": 4.9777493684149375e-05, |
|
"loss": 0.3674, |
|
"num_input_tokens_seen": 3839096, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.8639667705088265, |
|
"grad_norm": 0.07802557945251465, |
|
"learning_rate": 4.976869601178609e-05, |
|
"loss": 0.4147, |
|
"num_input_tokens_seen": 3919824, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.8805815160955348, |
|
"grad_norm": 0.0913538783788681, |
|
"learning_rate": 4.975972857755369e-05, |
|
"loss": 0.2978, |
|
"num_input_tokens_seen": 3989312, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.897196261682243, |
|
"grad_norm": 0.08525951951742172, |
|
"learning_rate": 4.975059144291394e-05, |
|
"loss": 0.3923, |
|
"num_input_tokens_seen": 4060528, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9138110072689511, |
|
"grad_norm": 0.08649709820747375, |
|
"learning_rate": 4.974128467049176e-05, |
|
"loss": 0.3282, |
|
"num_input_tokens_seen": 4129368, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9304257528556594, |
|
"grad_norm": 0.11635593324899673, |
|
"learning_rate": 4.9731808324074717e-05, |
|
"loss": 0.3403, |
|
"num_input_tokens_seen": 4175208, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.9470404984423676, |
|
"grad_norm": 0.1115177720785141, |
|
"learning_rate": 4.972216246861262e-05, |
|
"loss": 0.3191, |
|
"num_input_tokens_seen": 4218096, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.9636552440290758, |
|
"grad_norm": 0.0986371859908104, |
|
"learning_rate": 4.971234717021709e-05, |
|
"loss": 0.3745, |
|
"num_input_tokens_seen": 4275968, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.980269989615784, |
|
"grad_norm": 0.07860780507326126, |
|
"learning_rate": 4.9702362496161085e-05, |
|
"loss": 0.3129, |
|
"num_input_tokens_seen": 4346616, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.9968847352024922, |
|
"grad_norm": 0.08581527322530746, |
|
"learning_rate": 4.9692208514878444e-05, |
|
"loss": 0.3324, |
|
"num_input_tokens_seen": 4425064, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.16779834032058716, |
|
"learning_rate": 4.968188529596342e-05, |
|
"loss": 0.2814, |
|
"num_input_tokens_seen": 4435328, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.0166147455867083, |
|
"grad_norm": 0.08948636800050735, |
|
"learning_rate": 4.9671392910170185e-05, |
|
"loss": 0.3467, |
|
"num_input_tokens_seen": 4500104, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.0332294911734163, |
|
"grad_norm": 0.07826830446720123, |
|
"learning_rate": 4.966073142941239e-05, |
|
"loss": 0.3892, |
|
"num_input_tokens_seen": 4581976, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.0498442367601246, |
|
"grad_norm": 0.08562575280666351, |
|
"learning_rate": 4.964990092676263e-05, |
|
"loss": 0.3354, |
|
"num_input_tokens_seen": 4652160, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.066458982346833, |
|
"grad_norm": 0.1057090312242508, |
|
"learning_rate": 4.9638901476451946e-05, |
|
"loss": 0.3457, |
|
"num_input_tokens_seen": 4709368, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.083073727933541, |
|
"grad_norm": 0.08131146430969238, |
|
"learning_rate": 4.962773315386935e-05, |
|
"loss": 0.3672, |
|
"num_input_tokens_seen": 4798256, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.0996884735202492, |
|
"grad_norm": 0.09464936703443527, |
|
"learning_rate": 4.961639603556127e-05, |
|
"loss": 0.3157, |
|
"num_input_tokens_seen": 4859200, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.1163032191069575, |
|
"grad_norm": 0.0999661460518837, |
|
"learning_rate": 4.960489019923105e-05, |
|
"loss": 0.3968, |
|
"num_input_tokens_seen": 4925992, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.1329179646936656, |
|
"grad_norm": 0.09851639717817307, |
|
"learning_rate": 4.9593215723738404e-05, |
|
"loss": 0.329, |
|
"num_input_tokens_seen": 4998808, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.1495327102803738, |
|
"grad_norm": 0.08382592350244522, |
|
"learning_rate": 4.958137268909887e-05, |
|
"loss": 0.2856, |
|
"num_input_tokens_seen": 5089672, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1661474558670821, |
|
"grad_norm": 0.09073847532272339, |
|
"learning_rate": 4.9569361176483286e-05, |
|
"loss": 0.3512, |
|
"num_input_tokens_seen": 5166744, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.1827622014537902, |
|
"grad_norm": 0.10290185362100601, |
|
"learning_rate": 4.9557181268217227e-05, |
|
"loss": 0.4263, |
|
"num_input_tokens_seen": 5228264, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.1993769470404985, |
|
"grad_norm": 0.07421435415744781, |
|
"learning_rate": 4.9544833047780394e-05, |
|
"loss": 0.3126, |
|
"num_input_tokens_seen": 5338224, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.2159916926272065, |
|
"grad_norm": 0.10284842550754547, |
|
"learning_rate": 4.9532316599806124e-05, |
|
"loss": 0.3473, |
|
"num_input_tokens_seen": 5399848, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.2326064382139148, |
|
"grad_norm": 0.10817047953605652, |
|
"learning_rate": 4.951963201008076e-05, |
|
"loss": 0.3275, |
|
"num_input_tokens_seen": 5468624, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.249221183800623, |
|
"grad_norm": 0.09662210941314697, |
|
"learning_rate": 4.9506779365543046e-05, |
|
"loss": 0.3296, |
|
"num_input_tokens_seen": 5536776, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.2658359293873311, |
|
"grad_norm": 0.11193853616714478, |
|
"learning_rate": 4.949375875428357e-05, |
|
"loss": 0.3605, |
|
"num_input_tokens_seen": 5609296, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.2824506749740394, |
|
"grad_norm": 0.11866679787635803, |
|
"learning_rate": 4.9480570265544144e-05, |
|
"loss": 0.3133, |
|
"num_input_tokens_seen": 5663824, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.2990654205607477, |
|
"grad_norm": 0.09865846484899521, |
|
"learning_rate": 4.94672139897172e-05, |
|
"loss": 0.3464, |
|
"num_input_tokens_seen": 5742032, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.3156801661474558, |
|
"grad_norm": 0.09930054098367691, |
|
"learning_rate": 4.9453690018345144e-05, |
|
"loss": 0.3346, |
|
"num_input_tokens_seen": 5816864, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.332294911734164, |
|
"grad_norm": 0.1085321381688118, |
|
"learning_rate": 4.943999844411977e-05, |
|
"loss": 0.3102, |
|
"num_input_tokens_seen": 5881624, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.3489096573208723, |
|
"grad_norm": 0.08012478053569794, |
|
"learning_rate": 4.94261393608816e-05, |
|
"loss": 0.2853, |
|
"num_input_tokens_seen": 5970272, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.3655244029075804, |
|
"grad_norm": 0.10291877388954163, |
|
"learning_rate": 4.941211286361922e-05, |
|
"loss": 0.3038, |
|
"num_input_tokens_seen": 6058752, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.3821391484942886, |
|
"grad_norm": 0.11999356001615524, |
|
"learning_rate": 4.939791904846869e-05, |
|
"loss": 0.3283, |
|
"num_input_tokens_seen": 6120064, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.398753894080997, |
|
"grad_norm": 0.10502559691667557, |
|
"learning_rate": 4.938355801271282e-05, |
|
"loss": 0.321, |
|
"num_input_tokens_seen": 6182072, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.415368639667705, |
|
"grad_norm": 0.12620873749256134, |
|
"learning_rate": 4.936902985478055e-05, |
|
"loss": 0.3296, |
|
"num_input_tokens_seen": 6269680, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.4319833852544133, |
|
"grad_norm": 0.13212910294532776, |
|
"learning_rate": 4.935433467424624e-05, |
|
"loss": 0.3225, |
|
"num_input_tokens_seen": 6347424, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.4485981308411215, |
|
"grad_norm": 0.11600925773382187, |
|
"learning_rate": 4.933947257182901e-05, |
|
"loss": 0.3479, |
|
"num_input_tokens_seen": 6412584, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.4652128764278296, |
|
"grad_norm": 0.11683235317468643, |
|
"learning_rate": 4.932444364939205e-05, |
|
"loss": 0.3322, |
|
"num_input_tokens_seen": 6482728, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.4818276220145379, |
|
"grad_norm": 0.11446017026901245, |
|
"learning_rate": 4.9309248009941914e-05, |
|
"loss": 0.3802, |
|
"num_input_tokens_seen": 6562104, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.4984423676012462, |
|
"grad_norm": 0.10500892251729965, |
|
"learning_rate": 4.929388575762782e-05, |
|
"loss": 0.3371, |
|
"num_input_tokens_seen": 6656552, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.5150571131879542, |
|
"grad_norm": 0.13279151916503906, |
|
"learning_rate": 4.9278356997740904e-05, |
|
"loss": 0.293, |
|
"num_input_tokens_seen": 6714184, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.5316718587746625, |
|
"grad_norm": 0.107506163418293, |
|
"learning_rate": 4.9262661836713564e-05, |
|
"loss": 0.3127, |
|
"num_input_tokens_seen": 6793552, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.5482866043613708, |
|
"grad_norm": 0.124021977186203, |
|
"learning_rate": 4.924680038211867e-05, |
|
"loss": 0.3263, |
|
"num_input_tokens_seen": 6865256, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.5649013499480788, |
|
"grad_norm": 0.14172782003879547, |
|
"learning_rate": 4.9230772742668866e-05, |
|
"loss": 0.3204, |
|
"num_input_tokens_seen": 6931152, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.5815160955347871, |
|
"grad_norm": 0.12229758501052856, |
|
"learning_rate": 4.9214579028215776e-05, |
|
"loss": 0.326, |
|
"num_input_tokens_seen": 6998408, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.5981308411214954, |
|
"grad_norm": 0.1242135688662529, |
|
"learning_rate": 4.919821934974933e-05, |
|
"loss": 0.2814, |
|
"num_input_tokens_seen": 7053008, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.6147455867082035, |
|
"grad_norm": 0.12830108404159546, |
|
"learning_rate": 4.918169381939692e-05, |
|
"loss": 0.3254, |
|
"num_input_tokens_seen": 7106440, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.6313603322949117, |
|
"grad_norm": 0.12180659174919128, |
|
"learning_rate": 4.916500255042268e-05, |
|
"loss": 0.3228, |
|
"num_input_tokens_seen": 7167032, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.64797507788162, |
|
"grad_norm": 0.10792312026023865, |
|
"learning_rate": 4.914814565722671e-05, |
|
"loss": 0.2729, |
|
"num_input_tokens_seen": 7245720, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.664589823468328, |
|
"grad_norm": 0.18523500859737396, |
|
"learning_rate": 4.913112325534426e-05, |
|
"loss": 0.3462, |
|
"num_input_tokens_seen": 7326320, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.6812045690550363, |
|
"grad_norm": 0.09529964625835419, |
|
"learning_rate": 4.9113935461444955e-05, |
|
"loss": 0.3096, |
|
"num_input_tokens_seen": 7442232, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.6978193146417446, |
|
"grad_norm": 0.14481183886528015, |
|
"learning_rate": 4.9096582393332025e-05, |
|
"loss": 0.3014, |
|
"num_input_tokens_seen": 7502496, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.7144340602284527, |
|
"grad_norm": 0.14645016193389893, |
|
"learning_rate": 4.907906416994146e-05, |
|
"loss": 0.3336, |
|
"num_input_tokens_seen": 7566496, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.731048805815161, |
|
"grad_norm": 0.1306885927915573, |
|
"learning_rate": 4.906138091134118e-05, |
|
"loss": 0.3911, |
|
"num_input_tokens_seen": 7629056, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.7476635514018692, |
|
"grad_norm": 0.10863160341978073, |
|
"learning_rate": 4.9043532738730284e-05, |
|
"loss": 0.3201, |
|
"num_input_tokens_seen": 7706096, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.7642782969885773, |
|
"grad_norm": 0.11725673079490662, |
|
"learning_rate": 4.9025519774438136e-05, |
|
"loss": 0.2783, |
|
"num_input_tokens_seen": 7780072, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.7808930425752856, |
|
"grad_norm": 0.1243867501616478, |
|
"learning_rate": 4.900734214192358e-05, |
|
"loss": 0.3044, |
|
"num_input_tokens_seen": 7857712, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.7975077881619939, |
|
"grad_norm": 0.13539955019950867, |
|
"learning_rate": 4.898899996577407e-05, |
|
"loss": 0.3009, |
|
"num_input_tokens_seen": 7916832, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.814122533748702, |
|
"grad_norm": 0.11198178678750992, |
|
"learning_rate": 4.8970493371704826e-05, |
|
"loss": 0.3229, |
|
"num_input_tokens_seen": 7993056, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.8307372793354102, |
|
"grad_norm": 0.11881165206432343, |
|
"learning_rate": 4.8951822486557986e-05, |
|
"loss": 0.3414, |
|
"num_input_tokens_seen": 8090056, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.8473520249221185, |
|
"grad_norm": 0.12841404974460602, |
|
"learning_rate": 4.893298743830168e-05, |
|
"loss": 0.2907, |
|
"num_input_tokens_seen": 8164808, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.8639667705088265, |
|
"grad_norm": 0.14767521619796753, |
|
"learning_rate": 4.891398835602925e-05, |
|
"loss": 0.2901, |
|
"num_input_tokens_seen": 8223568, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.8805815160955348, |
|
"grad_norm": 0.15326914191246033, |
|
"learning_rate": 4.8894825369958255e-05, |
|
"loss": 0.2918, |
|
"num_input_tokens_seen": 8276160, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.897196261682243, |
|
"grad_norm": 0.1210051029920578, |
|
"learning_rate": 4.8875498611429674e-05, |
|
"loss": 0.3074, |
|
"num_input_tokens_seen": 8354904, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.9138110072689511, |
|
"grad_norm": 0.13544373214244843, |
|
"learning_rate": 4.8856008212906925e-05, |
|
"loss": 0.3461, |
|
"num_input_tokens_seen": 8442584, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.9304257528556594, |
|
"grad_norm": 0.13535892963409424, |
|
"learning_rate": 4.8836354307975026e-05, |
|
"loss": 0.3078, |
|
"num_input_tokens_seen": 8506688, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.9470404984423677, |
|
"grad_norm": 0.10383590310811996, |
|
"learning_rate": 4.881653703133966e-05, |
|
"loss": 0.2432, |
|
"num_input_tokens_seen": 8610712, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.9636552440290758, |
|
"grad_norm": 0.12125886976718903, |
|
"learning_rate": 4.87965565188262e-05, |
|
"loss": 0.2915, |
|
"num_input_tokens_seen": 8692624, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.980269989615784, |
|
"grad_norm": 0.1351424902677536, |
|
"learning_rate": 4.877641290737884e-05, |
|
"loss": 0.3006, |
|
"num_input_tokens_seen": 8772208, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.9968847352024923, |
|
"grad_norm": 0.11472523212432861, |
|
"learning_rate": 4.8756106335059646e-05, |
|
"loss": 0.2774, |
|
"num_input_tokens_seen": 8854904, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.3606414794921875, |
|
"learning_rate": 4.87356369410476e-05, |
|
"loss": 0.2786, |
|
"num_input_tokens_seen": 8872656, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 2.016614745586708, |
|
"grad_norm": 0.13124766945838928, |
|
"learning_rate": 4.8715004865637614e-05, |
|
"loss": 0.294, |
|
"num_input_tokens_seen": 8946480, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 2.0332294911734166, |
|
"grad_norm": 0.12415049225091934, |
|
"learning_rate": 4.869421025023965e-05, |
|
"loss": 0.2931, |
|
"num_input_tokens_seen": 9023328, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 2.0498442367601246, |
|
"grad_norm": 0.16626115143299103, |
|
"learning_rate": 4.867325323737765e-05, |
|
"loss": 0.2887, |
|
"num_input_tokens_seen": 9074320, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.0664589823468327, |
|
"grad_norm": 0.153628870844841, |
|
"learning_rate": 4.8652133970688636e-05, |
|
"loss": 0.2776, |
|
"num_input_tokens_seen": 9148784, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.083073727933541, |
|
"grad_norm": 0.12231138348579407, |
|
"learning_rate": 4.8630852594921706e-05, |
|
"loss": 0.3091, |
|
"num_input_tokens_seen": 9246624, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 2.0996884735202492, |
|
"grad_norm": 0.15192057192325592, |
|
"learning_rate": 4.860940925593703e-05, |
|
"loss": 0.3354, |
|
"num_input_tokens_seen": 9328176, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 2.1163032191069573, |
|
"grad_norm": 0.13820070028305054, |
|
"learning_rate": 4.8587804100704845e-05, |
|
"loss": 0.282, |
|
"num_input_tokens_seen": 9388936, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 2.132917964693666, |
|
"grad_norm": 0.14466816186904907, |
|
"learning_rate": 4.856603727730447e-05, |
|
"loss": 0.2801, |
|
"num_input_tokens_seen": 9461664, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.149532710280374, |
|
"grad_norm": 0.14671838283538818, |
|
"learning_rate": 4.854410893492326e-05, |
|
"loss": 0.2927, |
|
"num_input_tokens_seen": 9535000, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 2.166147455867082, |
|
"grad_norm": 0.1757712960243225, |
|
"learning_rate": 4.852201922385564e-05, |
|
"loss": 0.2807, |
|
"num_input_tokens_seen": 9600296, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 2.1827622014537904, |
|
"grad_norm": 0.17755423486232758, |
|
"learning_rate": 4.8499768295502004e-05, |
|
"loss": 0.2765, |
|
"num_input_tokens_seen": 9686784, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 2.1993769470404985, |
|
"grad_norm": 0.13321827352046967, |
|
"learning_rate": 4.847735630236773e-05, |
|
"loss": 0.3068, |
|
"num_input_tokens_seen": 9781112, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 2.2159916926272065, |
|
"grad_norm": 0.15012745559215546, |
|
"learning_rate": 4.8454783398062106e-05, |
|
"loss": 0.2737, |
|
"num_input_tokens_seen": 9849528, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.232606438213915, |
|
"grad_norm": 0.14000360667705536, |
|
"learning_rate": 4.843204973729729e-05, |
|
"loss": 0.2831, |
|
"num_input_tokens_seen": 9931080, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 2.249221183800623, |
|
"grad_norm": 0.14742712676525116, |
|
"learning_rate": 4.840915547588725e-05, |
|
"loss": 0.3047, |
|
"num_input_tokens_seen": 10011176, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 2.265835929387331, |
|
"grad_norm": 0.16192346811294556, |
|
"learning_rate": 4.838610077074669e-05, |
|
"loss": 0.2759, |
|
"num_input_tokens_seen": 10084128, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 2.2824506749740396, |
|
"grad_norm": 0.1502583771944046, |
|
"learning_rate": 4.836288577988996e-05, |
|
"loss": 0.298, |
|
"num_input_tokens_seen": 10155536, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 2.2990654205607477, |
|
"grad_norm": 0.12661044299602509, |
|
"learning_rate": 4.8339510662430046e-05, |
|
"loss": 0.255, |
|
"num_input_tokens_seen": 10251160, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.3156801661474558, |
|
"grad_norm": 0.14002998173236847, |
|
"learning_rate": 4.8315975578577355e-05, |
|
"loss": 0.2566, |
|
"num_input_tokens_seen": 10345864, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 2.3322949117341643, |
|
"grad_norm": 0.17870523035526276, |
|
"learning_rate": 4.8292280689638725e-05, |
|
"loss": 0.4367, |
|
"num_input_tokens_seen": 10417616, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 2.3489096573208723, |
|
"grad_norm": 0.17209866642951965, |
|
"learning_rate": 4.826842615801628e-05, |
|
"loss": 0.2954, |
|
"num_input_tokens_seen": 10481816, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 2.3655244029075804, |
|
"grad_norm": 0.1665940284729004, |
|
"learning_rate": 4.8244412147206284e-05, |
|
"loss": 0.341, |
|
"num_input_tokens_seen": 10562056, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 2.382139148494289, |
|
"grad_norm": 0.18919898569583893, |
|
"learning_rate": 4.822023882179811e-05, |
|
"loss": 0.2716, |
|
"num_input_tokens_seen": 10612808, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.398753894080997, |
|
"grad_norm": 0.1681865006685257, |
|
"learning_rate": 4.8195906347473e-05, |
|
"loss": 0.2716, |
|
"num_input_tokens_seen": 10682328, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 2.415368639667705, |
|
"grad_norm": 0.13141104578971863, |
|
"learning_rate": 4.817141489100302e-05, |
|
"loss": 0.2829, |
|
"num_input_tokens_seen": 10771912, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 2.431983385254413, |
|
"grad_norm": 0.16544249653816223, |
|
"learning_rate": 4.814676462024988e-05, |
|
"loss": 0.3038, |
|
"num_input_tokens_seen": 10842232, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 2.4485981308411215, |
|
"grad_norm": 0.17946277558803558, |
|
"learning_rate": 4.8121955704163745e-05, |
|
"loss": 0.2792, |
|
"num_input_tokens_seen": 10902264, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 2.4652128764278296, |
|
"grad_norm": 0.14012685418128967, |
|
"learning_rate": 4.8096988312782174e-05, |
|
"loss": 0.2403, |
|
"num_input_tokens_seen": 10992744, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.4818276220145377, |
|
"grad_norm": 0.103813536465168, |
|
"learning_rate": 4.8071862617228855e-05, |
|
"loss": 0.1605, |
|
"num_input_tokens_seen": 11090064, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 2.498442367601246, |
|
"grad_norm": 0.1596001833677292, |
|
"learning_rate": 4.8046578789712515e-05, |
|
"loss": 0.2547, |
|
"num_input_tokens_seen": 11162864, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 2.515057113187954, |
|
"grad_norm": 0.17366129159927368, |
|
"learning_rate": 4.8021137003525664e-05, |
|
"loss": 0.2676, |
|
"num_input_tokens_seen": 11224368, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 2.5316718587746623, |
|
"grad_norm": 0.1615227609872818, |
|
"learning_rate": 4.7995537433043446e-05, |
|
"loss": 0.2898, |
|
"num_input_tokens_seen": 11291056, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 2.5482866043613708, |
|
"grad_norm": 0.1951528787612915, |
|
"learning_rate": 4.796978025372246e-05, |
|
"loss": 0.2546, |
|
"num_input_tokens_seen": 11345464, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.564901349948079, |
|
"grad_norm": 0.15065862238407135, |
|
"learning_rate": 4.794386564209953e-05, |
|
"loss": 0.3134, |
|
"num_input_tokens_seen": 11418912, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 2.581516095534787, |
|
"grad_norm": 0.17094938457012177, |
|
"learning_rate": 4.79177937757905e-05, |
|
"loss": 0.2689, |
|
"num_input_tokens_seen": 11491216, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 2.5981308411214954, |
|
"grad_norm": 0.16850312054157257, |
|
"learning_rate": 4.7891564833489035e-05, |
|
"loss": 0.2359, |
|
"num_input_tokens_seen": 11558016, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 2.6147455867082035, |
|
"grad_norm": 0.16789822280406952, |
|
"learning_rate": 4.7865178994965344e-05, |
|
"loss": 0.2735, |
|
"num_input_tokens_seen": 11630432, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 2.6313603322949115, |
|
"grad_norm": 0.19538354873657227, |
|
"learning_rate": 4.783863644106502e-05, |
|
"loss": 0.254, |
|
"num_input_tokens_seen": 11684624, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.64797507788162, |
|
"grad_norm": 0.1609475016593933, |
|
"learning_rate": 4.781193735370777e-05, |
|
"loss": 0.2763, |
|
"num_input_tokens_seen": 11770232, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 2.664589823468328, |
|
"grad_norm": 0.1964447796344757, |
|
"learning_rate": 4.7785081915886134e-05, |
|
"loss": 0.2663, |
|
"num_input_tokens_seen": 11828360, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 2.681204569055036, |
|
"grad_norm": 0.18869946897029877, |
|
"learning_rate": 4.775807031166428e-05, |
|
"loss": 0.2625, |
|
"num_input_tokens_seen": 11915944, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 2.6978193146417446, |
|
"grad_norm": 0.20539921522140503, |
|
"learning_rate": 4.773090272617672e-05, |
|
"loss": 0.2615, |
|
"num_input_tokens_seen": 11981792, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 2.7144340602284527, |
|
"grad_norm": 0.1616145521402359, |
|
"learning_rate": 4.7703579345627035e-05, |
|
"loss": 0.3453, |
|
"num_input_tokens_seen": 12044024, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.7310488058151607, |
|
"grad_norm": 0.22601978480815887, |
|
"learning_rate": 4.7676100357286624e-05, |
|
"loss": 0.3036, |
|
"num_input_tokens_seen": 12093424, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 2.7476635514018692, |
|
"grad_norm": 0.15262462198734283, |
|
"learning_rate": 4.76484659494934e-05, |
|
"loss": 0.2523, |
|
"num_input_tokens_seen": 12167792, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 2.7642782969885773, |
|
"grad_norm": 0.17928001284599304, |
|
"learning_rate": 4.762067631165049e-05, |
|
"loss": 0.2791, |
|
"num_input_tokens_seen": 12233712, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 2.7808930425752854, |
|
"grad_norm": 0.15228766202926636, |
|
"learning_rate": 4.7592731634224966e-05, |
|
"loss": 0.2291, |
|
"num_input_tokens_seen": 12310544, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 2.797507788161994, |
|
"grad_norm": 0.18862110376358032, |
|
"learning_rate": 4.756463210874652e-05, |
|
"loss": 0.2628, |
|
"num_input_tokens_seen": 12400160, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.814122533748702, |
|
"grad_norm": 0.16640189290046692, |
|
"learning_rate": 4.753637792780614e-05, |
|
"loss": 0.2824, |
|
"num_input_tokens_seen": 12480432, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 2.83073727933541, |
|
"grad_norm": 0.151117205619812, |
|
"learning_rate": 4.7507969285054845e-05, |
|
"loss": 0.2663, |
|
"num_input_tokens_seen": 12568064, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 2.8473520249221185, |
|
"grad_norm": 0.26551589369773865, |
|
"learning_rate": 4.7479406375202264e-05, |
|
"loss": 0.28, |
|
"num_input_tokens_seen": 12647400, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 2.8639667705088265, |
|
"grad_norm": 0.22416891157627106, |
|
"learning_rate": 4.745068939401539e-05, |
|
"loss": 0.2424, |
|
"num_input_tokens_seen": 12698208, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 2.8805815160955346, |
|
"grad_norm": 0.2024654597043991, |
|
"learning_rate": 4.742181853831721e-05, |
|
"loss": 0.2518, |
|
"num_input_tokens_seen": 12758528, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.897196261682243, |
|
"grad_norm": 0.18288369476795197, |
|
"learning_rate": 4.7392794005985326e-05, |
|
"loss": 0.259, |
|
"num_input_tokens_seen": 12837264, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 2.913811007268951, |
|
"grad_norm": 0.18088208138942719, |
|
"learning_rate": 4.7363615995950626e-05, |
|
"loss": 0.247, |
|
"num_input_tokens_seen": 12902368, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 2.930425752855659, |
|
"grad_norm": 0.16595424711704254, |
|
"learning_rate": 4.733428470819594e-05, |
|
"loss": 0.2438, |
|
"num_input_tokens_seen": 12974296, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 2.9470404984423677, |
|
"grad_norm": 0.17989091575145721, |
|
"learning_rate": 4.730480034375462e-05, |
|
"loss": 0.2708, |
|
"num_input_tokens_seen": 13057280, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 2.9636552440290758, |
|
"grad_norm": 0.16136637330055237, |
|
"learning_rate": 4.72751631047092e-05, |
|
"loss": 0.3171, |
|
"num_input_tokens_seen": 13158232, |
|
"step": 180 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 1200, |
|
"num_input_tokens_seen": 13158232, |
|
"num_train_epochs": 20, |
|
"save_steps": 60, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.614082820239524e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|