|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.999259807549963, |
|
"eval_steps": 100, |
|
"global_step": 675, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007401924500370096, |
|
"grad_norm": 2.6775108151736338, |
|
"learning_rate": 1.4705882352941177e-06, |
|
"loss": 1.0786, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014803849000740192, |
|
"grad_norm": 2.2851851588534995, |
|
"learning_rate": 2.9411764705882355e-06, |
|
"loss": 1.0902, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02220577350111029, |
|
"grad_norm": 1.417397856010511, |
|
"learning_rate": 4.411764705882353e-06, |
|
"loss": 1.0621, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.029607698001480384, |
|
"grad_norm": 1.3738879264596535, |
|
"learning_rate": 5.882352941176471e-06, |
|
"loss": 1.0088, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.037009622501850484, |
|
"grad_norm": 1.1109699834815427, |
|
"learning_rate": 7.352941176470589e-06, |
|
"loss": 0.9684, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04441154700222058, |
|
"grad_norm": 0.9463420086714082, |
|
"learning_rate": 8.823529411764707e-06, |
|
"loss": 0.9217, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05181347150259067, |
|
"grad_norm": 0.7526598257379206, |
|
"learning_rate": 1.0294117647058823e-05, |
|
"loss": 0.8859, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05921539600296077, |
|
"grad_norm": 0.6789903099544737, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 0.8631, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06661732050333087, |
|
"grad_norm": 0.7365159287878734, |
|
"learning_rate": 1.323529411764706e-05, |
|
"loss": 0.8487, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07401924500370097, |
|
"grad_norm": 0.8042938213713893, |
|
"learning_rate": 1.4705882352941179e-05, |
|
"loss": 0.861, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08142116950407106, |
|
"grad_norm": 0.6165613278255507, |
|
"learning_rate": 1.6176470588235296e-05, |
|
"loss": 0.8545, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08882309400444116, |
|
"grad_norm": 0.7210736075544439, |
|
"learning_rate": 1.7647058823529414e-05, |
|
"loss": 0.8293, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09622501850481126, |
|
"grad_norm": 0.6666914117441736, |
|
"learning_rate": 1.911764705882353e-05, |
|
"loss": 0.8258, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.10362694300518134, |
|
"grad_norm": 0.7516529208544338, |
|
"learning_rate": 1.9999464266898485e-05, |
|
"loss": 0.8209, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.11102886750555144, |
|
"grad_norm": 0.9295745950078508, |
|
"learning_rate": 1.9993437928712977e-05, |
|
"loss": 0.8163, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11843079200592153, |
|
"grad_norm": 0.7963544491349841, |
|
"learning_rate": 1.998071963486563e-05, |
|
"loss": 0.806, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12583271650629163, |
|
"grad_norm": 0.6518245421854616, |
|
"learning_rate": 1.9961317901970953e-05, |
|
"loss": 0.7944, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.13323464100666174, |
|
"grad_norm": 0.7301619917942446, |
|
"learning_rate": 1.993524572210807e-05, |
|
"loss": 0.7945, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.14063656550703182, |
|
"grad_norm": 0.7939833726588112, |
|
"learning_rate": 1.990252055412077e-05, |
|
"loss": 0.7905, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.14803849000740193, |
|
"grad_norm": 0.7581792486406171, |
|
"learning_rate": 1.9863164311926433e-05, |
|
"loss": 0.8172, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14803849000740193, |
|
"eval_loss": 0.8196660876274109, |
|
"eval_runtime": 3.1366, |
|
"eval_samples_per_second": 40.809, |
|
"eval_steps_per_second": 5.101, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15544041450777202, |
|
"grad_norm": 0.7016968966754382, |
|
"learning_rate": 1.981720334984174e-05, |
|
"loss": 0.7921, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.16284233900814213, |
|
"grad_norm": 0.6900772941844979, |
|
"learning_rate": 1.9764668444934853e-05, |
|
"loss": 0.7859, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.1702442635085122, |
|
"grad_norm": 0.7157070191788967, |
|
"learning_rate": 1.970559477641606e-05, |
|
"loss": 0.7632, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.17764618800888232, |
|
"grad_norm": 0.712102923863696, |
|
"learning_rate": 1.9640021902080523e-05, |
|
"loss": 0.7929, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.1850481125092524, |
|
"grad_norm": 0.6157872973388774, |
|
"learning_rate": 1.9567993731818988e-05, |
|
"loss": 0.7917, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.19245003700962252, |
|
"grad_norm": 0.7204939438541231, |
|
"learning_rate": 1.9489558498214197e-05, |
|
"loss": 0.7841, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.1998519615099926, |
|
"grad_norm": 0.6377400481525809, |
|
"learning_rate": 1.9404768724242667e-05, |
|
"loss": 0.7703, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.20725388601036268, |
|
"grad_norm": 0.7051032220059237, |
|
"learning_rate": 1.931368118810346e-05, |
|
"loss": 0.7946, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2146558105107328, |
|
"grad_norm": 0.7323582491272024, |
|
"learning_rate": 1.92163568851975e-05, |
|
"loss": 0.7758, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.22205773501110287, |
|
"grad_norm": 0.756759073944075, |
|
"learning_rate": 1.911286098728296e-05, |
|
"loss": 0.7719, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.22945965951147299, |
|
"grad_norm": 0.6865799388160192, |
|
"learning_rate": 1.900326279883392e-05, |
|
"loss": 0.8015, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.23686158401184307, |
|
"grad_norm": 0.6847504220269061, |
|
"learning_rate": 1.8887635710631716e-05, |
|
"loss": 0.8043, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.24426350851221318, |
|
"grad_norm": 0.6700839600945346, |
|
"learning_rate": 1.8766057150619865e-05, |
|
"loss": 0.7775, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.25166543301258326, |
|
"grad_norm": 0.6680950835148527, |
|
"learning_rate": 1.8638608532055635e-05, |
|
"loss": 0.7947, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.25906735751295334, |
|
"grad_norm": 0.6402750693582475, |
|
"learning_rate": 1.8505375198992856e-05, |
|
"loss": 0.7829, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.2664692820133235, |
|
"grad_norm": 0.6939213711416119, |
|
"learning_rate": 1.836644636913258e-05, |
|
"loss": 0.754, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.27387120651369357, |
|
"grad_norm": 0.7356761514310879, |
|
"learning_rate": 1.8221915074079764e-05, |
|
"loss": 0.7777, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.28127313101406365, |
|
"grad_norm": 0.802978821551516, |
|
"learning_rate": 1.8071878097046064e-05, |
|
"loss": 0.7563, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.28867505551443373, |
|
"grad_norm": 0.6184097232596234, |
|
"learning_rate": 1.7916435908040413e-05, |
|
"loss": 0.7724, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.29607698001480387, |
|
"grad_norm": 0.6385164678688181, |
|
"learning_rate": 1.7755692596590778e-05, |
|
"loss": 0.7745, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.29607698001480387, |
|
"eval_loss": 0.789804220199585, |
|
"eval_runtime": 3.1175, |
|
"eval_samples_per_second": 41.059, |
|
"eval_steps_per_second": 5.132, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.30347890451517395, |
|
"grad_norm": 0.6691522805213952, |
|
"learning_rate": 1.7589755802042188e-05, |
|
"loss": 0.7729, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.31088082901554404, |
|
"grad_norm": 0.6636501965912796, |
|
"learning_rate": 1.7418736641477636e-05, |
|
"loss": 0.7561, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3182827535159141, |
|
"grad_norm": 0.611145630788428, |
|
"learning_rate": 1.7242749635310222e-05, |
|
"loss": 0.758, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.32568467801628426, |
|
"grad_norm": 0.6305855837605814, |
|
"learning_rate": 1.7061912630596252e-05, |
|
"loss": 0.7603, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.33308660251665434, |
|
"grad_norm": 0.7090046779427459, |
|
"learning_rate": 1.6876346722120747e-05, |
|
"loss": 0.7751, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.3404885270170244, |
|
"grad_norm": 0.6532776336382563, |
|
"learning_rate": 1.6686176171308125e-05, |
|
"loss": 0.7977, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.3478904515173945, |
|
"grad_norm": 0.6472281999169955, |
|
"learning_rate": 1.6491528323012412e-05, |
|
"loss": 0.7594, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.35529237601776464, |
|
"grad_norm": 0.6244474985256501, |
|
"learning_rate": 1.6292533520242663e-05, |
|
"loss": 0.7623, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.3626943005181347, |
|
"grad_norm": 0.617771152272651, |
|
"learning_rate": 1.6089325016880737e-05, |
|
"loss": 0.7526, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.3700962250185048, |
|
"grad_norm": 0.662731194922601, |
|
"learning_rate": 1.588203888844982e-05, |
|
"loss": 0.768, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.3774981495188749, |
|
"grad_norm": 0.6103795212497318, |
|
"learning_rate": 1.5670813940993504e-05, |
|
"loss": 0.7409, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.38490007401924503, |
|
"grad_norm": 0.5749536608079369, |
|
"learning_rate": 1.5455791618126407e-05, |
|
"loss": 0.7332, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.3923019985196151, |
|
"grad_norm": 0.602251187466067, |
|
"learning_rate": 1.5237115906318565e-05, |
|
"loss": 0.7571, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.3997039230199852, |
|
"grad_norm": 0.6415472326761182, |
|
"learning_rate": 1.5014933238477069e-05, |
|
"loss": 0.7378, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.4071058475203553, |
|
"grad_norm": 0.659239039689651, |
|
"learning_rate": 1.4789392395889468e-05, |
|
"loss": 0.7631, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.41450777202072536, |
|
"grad_norm": 0.6281187227003566, |
|
"learning_rate": 1.4560644408594602e-05, |
|
"loss": 0.744, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.4219096965210955, |
|
"grad_norm": 0.6384180417923108, |
|
"learning_rate": 1.432884245424761e-05, |
|
"loss": 0.7556, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.4293116210214656, |
|
"grad_norm": 0.6391130592098728, |
|
"learning_rate": 1.4094141755546816e-05, |
|
"loss": 0.7831, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.43671354552183567, |
|
"grad_norm": 0.6054256423919505, |
|
"learning_rate": 1.3856699476291176e-05, |
|
"loss": 0.7426, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.44411547002220575, |
|
"grad_norm": 0.6630675705036657, |
|
"learning_rate": 1.3616674616137902e-05, |
|
"loss": 0.7643, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.44411547002220575, |
|
"eval_loss": 0.7754666805267334, |
|
"eval_runtime": 3.1148, |
|
"eval_samples_per_second": 41.094, |
|
"eval_steps_per_second": 5.137, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4515173945225759, |
|
"grad_norm": 0.6288014510844427, |
|
"learning_rate": 1.3374227904130724e-05, |
|
"loss": 0.7547, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.45891931902294597, |
|
"grad_norm": 0.6217403326546215, |
|
"learning_rate": 1.3129521691070108e-05, |
|
"loss": 0.7326, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.46632124352331605, |
|
"grad_norm": 0.6800329210502148, |
|
"learning_rate": 1.2882719840797473e-05, |
|
"loss": 0.7511, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.47372316802368614, |
|
"grad_norm": 0.6592173647356895, |
|
"learning_rate": 1.2633987620466229e-05, |
|
"loss": 0.7353, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.4811250925240563, |
|
"grad_norm": 0.61539180630925, |
|
"learning_rate": 1.2383491589873122e-05, |
|
"loss": 0.7404, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.48852701702442636, |
|
"grad_norm": 0.5886471962514216, |
|
"learning_rate": 1.213139948992394e-05, |
|
"loss": 0.7497, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.49592894152479644, |
|
"grad_norm": 0.6761058710511431, |
|
"learning_rate": 1.187788013030837e-05, |
|
"loss": 0.7466, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.5033308660251665, |
|
"grad_norm": 0.5915448374095998, |
|
"learning_rate": 1.1623103276459086e-05, |
|
"loss": 0.7505, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.5107327905255367, |
|
"grad_norm": 0.613959957668376, |
|
"learning_rate": 1.1367239535870913e-05, |
|
"loss": 0.7424, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.5181347150259067, |
|
"grad_norm": 0.6605036359255322, |
|
"learning_rate": 1.1110460243856051e-05, |
|
"loss": 0.73, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.5255366395262768, |
|
"grad_norm": 0.6398359412241029, |
|
"learning_rate": 1.085293734881197e-05, |
|
"loss": 0.7466, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.532938564026647, |
|
"grad_norm": 0.5798477551421798, |
|
"learning_rate": 1.0594843297078736e-05, |
|
"loss": 0.7658, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.540340488527017, |
|
"grad_norm": 0.6712170318895565, |
|
"learning_rate": 1.0336350917462925e-05, |
|
"loss": 0.7557, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.5477424130273871, |
|
"grad_norm": 0.5826431750982008, |
|
"learning_rate": 1.0077633305505402e-05, |
|
"loss": 0.7431, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.5551443375277573, |
|
"grad_norm": 0.6066723768251325, |
|
"learning_rate": 9.818863707570476e-06, |
|
"loss": 0.7606, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.5625462620281273, |
|
"grad_norm": 0.6314411822904215, |
|
"learning_rate": 9.560215404834094e-06, |
|
"loss": 0.7513, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.5699481865284974, |
|
"grad_norm": 0.6118960640467581, |
|
"learning_rate": 9.30186159724869e-06, |
|
"loss": 0.7143, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.5773501110288675, |
|
"grad_norm": 0.5791113121477641, |
|
"learning_rate": 9.043975287562443e-06, |
|
"loss": 0.7468, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.5847520355292376, |
|
"grad_norm": 0.6539665929663285, |
|
"learning_rate": 8.786729165470584e-06, |
|
"loss": 0.7252, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.5921539600296077, |
|
"grad_norm": 0.6570821598447145, |
|
"learning_rate": 8.530295491976338e-06, |
|
"loss": 0.7306, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5921539600296077, |
|
"eval_loss": 0.7634406089782715, |
|
"eval_runtime": 3.1189, |
|
"eval_samples_per_second": 41.04, |
|
"eval_steps_per_second": 5.13, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5995558845299778, |
|
"grad_norm": 0.5746384557086938, |
|
"learning_rate": 8.274845984038916e-06, |
|
"loss": 0.7172, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.6069578090303479, |
|
"grad_norm": 0.6320655117299494, |
|
"learning_rate": 8.020551699585843e-06, |
|
"loss": 0.7466, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.6143597335307179, |
|
"grad_norm": 0.5898386214469216, |
|
"learning_rate": 7.76758292296659e-06, |
|
"loss": 0.7263, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.6217616580310881, |
|
"grad_norm": 0.6220035683197175, |
|
"learning_rate": 7.5161090509242005e-06, |
|
"loss": 0.7416, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.6291635825314582, |
|
"grad_norm": 0.6329578494406625, |
|
"learning_rate": 7.2662984791613186e-06, |
|
"loss": 0.7342, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.6365655070318282, |
|
"grad_norm": 0.6311281483111099, |
|
"learning_rate": 7.01831848957653e-06, |
|
"loss": 0.7487, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.6439674315321984, |
|
"grad_norm": 0.5769698545850361, |
|
"learning_rate": 6.772335138246548e-06, |
|
"loss": 0.7468, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.6513693560325685, |
|
"grad_norm": 0.6102214233524648, |
|
"learning_rate": 6.528513144229256e-06, |
|
"loss": 0.7426, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.6587712805329385, |
|
"grad_norm": 0.5989944660076076, |
|
"learning_rate": 6.287015779262064e-06, |
|
"loss": 0.7487, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.6661732050333087, |
|
"grad_norm": 0.6314411792160693, |
|
"learning_rate": 6.048004758429451e-06, |
|
"loss": 0.7272, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.6735751295336787, |
|
"grad_norm": 0.5870201642987204, |
|
"learning_rate": 5.811640131872867e-06, |
|
"loss": 0.7496, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.6809770540340488, |
|
"grad_norm": 0.5903691127260594, |
|
"learning_rate": 5.578080177615575e-06, |
|
"loss": 0.72, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.688378978534419, |
|
"grad_norm": 0.6012396431395267, |
|
"learning_rate": 5.347481295574141e-06, |
|
"loss": 0.7169, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.695780903034789, |
|
"grad_norm": 0.5964212989584798, |
|
"learning_rate": 5.119997902827584e-06, |
|
"loss": 0.7285, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.7031828275351591, |
|
"grad_norm": 0.583131600111171, |
|
"learning_rate": 4.8957823302142916e-06, |
|
"loss": 0.7352, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.7105847520355293, |
|
"grad_norm": 0.6015054612003533, |
|
"learning_rate": 4.674984720325961e-06, |
|
"loss": 0.7207, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.7179866765358993, |
|
"grad_norm": 0.572136571323486, |
|
"learning_rate": 4.457752926966888e-06, |
|
"loss": 0.7148, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.7253886010362695, |
|
"grad_norm": 0.56847509551642, |
|
"learning_rate": 4.244232416145839e-06, |
|
"loss": 0.7336, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.7327905255366395, |
|
"grad_norm": 0.5757906808389407, |
|
"learning_rate": 4.0345661686669745e-06, |
|
"loss": 0.7268, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.7401924500370096, |
|
"grad_norm": 0.5983359707263747, |
|
"learning_rate": 3.828894584384867e-06, |
|
"loss": 0.7354, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7401924500370096, |
|
"eval_loss": 0.7560299634933472, |
|
"eval_runtime": 3.1145, |
|
"eval_samples_per_second": 41.098, |
|
"eval_steps_per_second": 5.137, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7475943745373798, |
|
"grad_norm": 0.5419364484450241, |
|
"learning_rate": 3.62735538818787e-06, |
|
"loss": 0.7197, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.7549962990377498, |
|
"grad_norm": 0.6155819372126309, |
|
"learning_rate": 3.4300835377726904e-06, |
|
"loss": 0.7231, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.7623982235381199, |
|
"grad_norm": 0.5981808223491859, |
|
"learning_rate": 3.2372111332720045e-06, |
|
"loss": 0.7586, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.7698001480384901, |
|
"grad_norm": 0.595017563636788, |
|
"learning_rate": 3.048867328795588e-06, |
|
"loss": 0.7155, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.7772020725388601, |
|
"grad_norm": 0.5884490157915484, |
|
"learning_rate": 2.865178245944218e-06, |
|
"loss": 0.7143, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.7846039970392302, |
|
"grad_norm": 0.5544892806408136, |
|
"learning_rate": 2.686266889354211e-06, |
|
"loss": 0.7373, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.7920059215396003, |
|
"grad_norm": 0.5749123573168023, |
|
"learning_rate": 2.5122530643292274e-06, |
|
"loss": 0.7427, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.7994078460399704, |
|
"grad_norm": 0.6154627187110704, |
|
"learning_rate": 2.3432532966144526e-06, |
|
"loss": 0.7322, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.8068097705403405, |
|
"grad_norm": 0.5733911384614117, |
|
"learning_rate": 2.1793807543668857e-06, |
|
"loss": 0.7339, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.8142116950407106, |
|
"grad_norm": 0.5381154079459709, |
|
"learning_rate": 2.0207451723739633e-06, |
|
"loss": 0.7257, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.8216136195410807, |
|
"grad_norm": 0.5433117693556794, |
|
"learning_rate": 1.8674527785713247e-06, |
|
"loss": 0.7322, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.8290155440414507, |
|
"grad_norm": 0.595956230708808, |
|
"learning_rate": 1.7196062229088606e-06, |
|
"loss": 0.6995, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.8364174685418209, |
|
"grad_norm": 0.6085342637714587, |
|
"learning_rate": 1.577304508612717e-06, |
|
"loss": 0.7297, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.843819393042191, |
|
"grad_norm": 0.5074631477260008, |
|
"learning_rate": 1.4406429258892762e-06, |
|
"loss": 0.7501, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.851221317542561, |
|
"grad_norm": 0.5995398947896622, |
|
"learning_rate": 1.3097129881154936e-06, |
|
"loss": 0.7198, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.8586232420429312, |
|
"grad_norm": 0.5695360882732575, |
|
"learning_rate": 1.1846023705583442e-06, |
|
"loss": 0.7162, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.8660251665433013, |
|
"grad_norm": 0.5623090258953476, |
|
"learning_rate": 1.065394851664394e-06, |
|
"loss": 0.7343, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.8734270910436713, |
|
"grad_norm": 0.5332283600389667, |
|
"learning_rate": 9.521702569588199e-07, |
|
"loss": 0.7534, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.8808290155440415, |
|
"grad_norm": 0.5633894812870882, |
|
"learning_rate": 8.450044055914497e-07, |
|
"loss": 0.7219, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.8882309400444115, |
|
"grad_norm": 0.5436178972328131, |
|
"learning_rate": 7.439690595656013e-07, |
|
"loss": 0.7444, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8882309400444115, |
|
"eval_loss": 0.753079891204834, |
|
"eval_runtime": 3.1158, |
|
"eval_samples_per_second": 41.081, |
|
"eval_steps_per_second": 5.135, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8956328645447816, |
|
"grad_norm": 0.6195975140382083, |
|
"learning_rate": 6.491318756837417e-07, |
|
"loss": 0.7298, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.9030347890451518, |
|
"grad_norm": 0.5006646668310524, |
|
"learning_rate": 5.605563602421149e-07, |
|
"loss": 0.7057, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.9104367135455218, |
|
"grad_norm": 0.5419782192826523, |
|
"learning_rate": 4.783018265047179e-07, |
|
"loss": 0.7555, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.9178386380458919, |
|
"grad_norm": 0.5640257061503108, |
|
"learning_rate": 4.024233549850509e-07, |
|
"loss": 0.7435, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.9252405625462621, |
|
"grad_norm": 0.5379715425608906, |
|
"learning_rate": 3.329717565622825e-07, |
|
"loss": 0.7403, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.9326424870466321, |
|
"grad_norm": 0.5381235622677996, |
|
"learning_rate": 2.6999353845651113e-07, |
|
"loss": 0.724, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.9400444115470022, |
|
"grad_norm": 0.5712874960471778, |
|
"learning_rate": 2.1353087308590314e-07, |
|
"loss": 0.7389, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.9474463360473723, |
|
"grad_norm": 0.5442876768823635, |
|
"learning_rate": 1.6362156982656085e-07, |
|
"loss": 0.729, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.9548482605477424, |
|
"grad_norm": 0.5108903031358344, |
|
"learning_rate": 1.2029904969404482e-07, |
|
"loss": 0.7124, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.9622501850481125, |
|
"grad_norm": 0.5895082578648588, |
|
"learning_rate": 8.359232296349163e-08, |
|
"loss": 0.7163, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.9696521095484826, |
|
"grad_norm": 0.5546776379603184, |
|
"learning_rate": 5.3525969743324356e-08, |
|
"loss": 0.732, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.9770540340488527, |
|
"grad_norm": 0.5289857496809237, |
|
"learning_rate": 3.012012351554017e-08, |
|
"loss": 0.7063, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.9844559585492227, |
|
"grad_norm": 0.5751846330162846, |
|
"learning_rate": 1.3390457653639221e-08, |
|
"loss": 0.7351, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.9918578830495929, |
|
"grad_norm": 0.6323603287547246, |
|
"learning_rate": 3.3481749271768726e-09, |
|
"loss": 0.746, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.999259807549963, |
|
"grad_norm": 0.5546360557881977, |
|
"learning_rate": 0.0, |
|
"loss": 0.7157, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.999259807549963, |
|
"step": 675, |
|
"total_flos": 76888336760832.0, |
|
"train_loss": 0.7674887858496772, |
|
"train_runtime": 1975.5352, |
|
"train_samples_per_second": 10.939, |
|
"train_steps_per_second": 0.342 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 675, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 76888336760832.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|