|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.8831531217300315, |
|
"global_step": 7000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.2783, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.4e-05, |
|
"loss": 0.079, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.937721160651098e-05, |
|
"loss": 0.0502, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 9.86694975230007e-05, |
|
"loss": 0.0464, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 0.029130851849913597, |
|
"eval_runtime": 1098.3952, |
|
"eval_samples_per_second": 6.711, |
|
"eval_steps_per_second": 0.28, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.796178343949045e-05, |
|
"loss": 0.0331, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 9.725406935598019e-05, |
|
"loss": 0.0292, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 9.654635527246992e-05, |
|
"loss": 0.0292, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.583864118895966e-05, |
|
"loss": 0.032, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_loss": 0.02652278169989586, |
|
"eval_runtime": 1094.8086, |
|
"eval_samples_per_second": 6.733, |
|
"eval_steps_per_second": 0.281, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.513092710544941e-05, |
|
"loss": 0.0272, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.442321302193913e-05, |
|
"loss": 0.026, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.371549893842888e-05, |
|
"loss": 0.0227, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 9.300778485491862e-05, |
|
"loss": 0.0246, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_loss": 0.02081386186182499, |
|
"eval_runtime": 1091.9223, |
|
"eval_samples_per_second": 6.75, |
|
"eval_steps_per_second": 0.282, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 9.230007077140835e-05, |
|
"loss": 0.0263, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 9.159235668789809e-05, |
|
"loss": 0.0252, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.088464260438783e-05, |
|
"loss": 0.0253, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.017692852087756e-05, |
|
"loss": 0.0274, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_loss": 0.019421329721808434, |
|
"eval_runtime": 1091.9762, |
|
"eval_samples_per_second": 6.75, |
|
"eval_steps_per_second": 0.282, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.946921443736731e-05, |
|
"loss": 0.0211, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 8.876150035385705e-05, |
|
"loss": 0.0234, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 8.805378627034678e-05, |
|
"loss": 0.0227, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 8.734607218683652e-05, |
|
"loss": 0.02, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_loss": 0.01722061075270176, |
|
"eval_runtime": 1091.8825, |
|
"eval_samples_per_second": 6.751, |
|
"eval_steps_per_second": 0.282, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 8.663835810332625e-05, |
|
"loss": 0.0229, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 8.5930644019816e-05, |
|
"loss": 0.0189, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 8.522292993630574e-05, |
|
"loss": 0.0188, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.451521585279548e-05, |
|
"loss": 0.018, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_loss": 0.017043238505721092, |
|
"eval_runtime": 1091.8986, |
|
"eval_samples_per_second": 6.751, |
|
"eval_steps_per_second": 0.282, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.380750176928521e-05, |
|
"loss": 0.0183, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 8.309978768577495e-05, |
|
"loss": 0.0207, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 8.239207360226468e-05, |
|
"loss": 0.0147, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 8.168435951875443e-05, |
|
"loss": 0.0151, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_loss": 0.015169227495789528, |
|
"eval_runtime": 1091.8795, |
|
"eval_samples_per_second": 6.751, |
|
"eval_steps_per_second": 0.282, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 8.097664543524417e-05, |
|
"loss": 0.0148, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 8.02689313517339e-05, |
|
"loss": 0.0125, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 7.956121726822364e-05, |
|
"loss": 0.0129, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 7.885350318471339e-05, |
|
"loss": 0.0138, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_loss": 0.014357708394527435, |
|
"eval_runtime": 1091.7946, |
|
"eval_samples_per_second": 6.751, |
|
"eval_steps_per_second": 0.282, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 7.814578910120311e-05, |
|
"loss": 0.015, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 7.743807501769286e-05, |
|
"loss": 0.0152, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 7.67303609341826e-05, |
|
"loss": 0.014, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 7.602264685067233e-05, |
|
"loss": 0.015, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_loss": 0.013772633858025074, |
|
"eval_runtime": 1091.9078, |
|
"eval_samples_per_second": 6.751, |
|
"eval_steps_per_second": 0.282, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 7.531493276716207e-05, |
|
"loss": 0.0134, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 7.46072186836518e-05, |
|
"loss": 0.0116, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 7.389950460014154e-05, |
|
"loss": 0.0136, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 7.319179051663129e-05, |
|
"loss": 0.0134, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_loss": 0.013411927036941051, |
|
"eval_runtime": 1091.4452, |
|
"eval_samples_per_second": 6.753, |
|
"eval_steps_per_second": 0.282, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 7.248407643312101e-05, |
|
"loss": 0.0129, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 7.177636234961076e-05, |
|
"loss": 0.0144, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 7.10686482661005e-05, |
|
"loss": 0.0119, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 7.036093418259024e-05, |
|
"loss": 0.0152, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 0.014650107361376286, |
|
"eval_runtime": 1091.4623, |
|
"eval_samples_per_second": 6.753, |
|
"eval_steps_per_second": 0.282, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 6.965322009907997e-05, |
|
"loss": 0.0145, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 6.894550601556972e-05, |
|
"loss": 0.0154, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 6.823779193205944e-05, |
|
"loss": 0.013, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 6.75300778485492e-05, |
|
"loss": 0.0123, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_loss": 0.013516324572265148, |
|
"eval_runtime": 1091.733, |
|
"eval_samples_per_second": 6.752, |
|
"eval_steps_per_second": 0.282, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 6.682236376503893e-05, |
|
"loss": 0.0123, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 6.611464968152867e-05, |
|
"loss": 0.0132, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 6.54069355980184e-05, |
|
"loss": 0.0172, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.469922151450815e-05, |
|
"loss": 0.0121, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"eval_loss": 0.013174451887607574, |
|
"eval_runtime": 1091.4369, |
|
"eval_samples_per_second": 6.753, |
|
"eval_steps_per_second": 0.282, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.399150743099787e-05, |
|
"loss": 0.0112, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.328379334748762e-05, |
|
"loss": 0.0137, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 6.257607926397736e-05, |
|
"loss": 0.0107, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 6.18683651804671e-05, |
|
"loss": 0.0115, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"eval_loss": 0.01469574123620987, |
|
"eval_runtime": 1091.5407, |
|
"eval_samples_per_second": 6.753, |
|
"eval_steps_per_second": 0.282, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 6.116065109695683e-05, |
|
"loss": 0.015, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 6.045293701344657e-05, |
|
"loss": 0.0123, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 5.974522292993631e-05, |
|
"loss": 0.0101, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 5.9037508846426045e-05, |
|
"loss": 0.0115, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"eval_loss": 0.01295243389904499, |
|
"eval_runtime": 1091.2144, |
|
"eval_samples_per_second": 6.755, |
|
"eval_steps_per_second": 0.282, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 5.832979476291578e-05, |
|
"loss": 0.0116, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 5.7622080679405524e-05, |
|
"loss": 0.0117, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.6914366595895267e-05, |
|
"loss": 0.0148, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.6206652512384996e-05, |
|
"loss": 0.0104, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_loss": 0.012959270738065243, |
|
"eval_runtime": 1091.2195, |
|
"eval_samples_per_second": 6.755, |
|
"eval_steps_per_second": 0.282, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 5.549893842887474e-05, |
|
"loss": 0.01, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 5.479122434536448e-05, |
|
"loss": 0.0095, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 5.408351026185421e-05, |
|
"loss": 0.0108, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 5.337579617834395e-05, |
|
"loss": 0.0112, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"eval_loss": 0.013205363415181637, |
|
"eval_runtime": 1091.0349, |
|
"eval_samples_per_second": 6.756, |
|
"eval_steps_per_second": 0.282, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 5.2668082094833696e-05, |
|
"loss": 0.0119, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 5.1960368011323425e-05, |
|
"loss": 0.0107, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 5.125265392781317e-05, |
|
"loss": 0.0129, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 5.054493984430291e-05, |
|
"loss": 0.0103, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"eval_loss": 0.013000305742025375, |
|
"eval_runtime": 1091.2105, |
|
"eval_samples_per_second": 6.755, |
|
"eval_steps_per_second": 0.282, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.9837225760792646e-05, |
|
"loss": 0.0114, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 4.912951167728238e-05, |
|
"loss": 0.0116, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.842179759377212e-05, |
|
"loss": 0.0106, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.771408351026186e-05, |
|
"loss": 0.0099, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"eval_loss": 0.01273022498935461, |
|
"eval_runtime": 1091.0891, |
|
"eval_samples_per_second": 6.756, |
|
"eval_steps_per_second": 0.282, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.70063694267516e-05, |
|
"loss": 0.0109, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.629865534324133e-05, |
|
"loss": 0.0125, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.559094125973107e-05, |
|
"loss": 0.0135, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.488322717622081e-05, |
|
"loss": 0.0111, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"eval_loss": 0.012479917146265507, |
|
"eval_runtime": 1091.0461, |
|
"eval_samples_per_second": 6.756, |
|
"eval_steps_per_second": 0.282, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 4.417551309271055e-05, |
|
"loss": 0.0106, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.3467799009200284e-05, |
|
"loss": 0.0115, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.2760084925690026e-05, |
|
"loss": 0.0107, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.205237084217976e-05, |
|
"loss": 0.0094, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"eval_loss": 0.0125270402058959, |
|
"eval_runtime": 1091.2851, |
|
"eval_samples_per_second": 6.754, |
|
"eval_steps_per_second": 0.282, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.13446567586695e-05, |
|
"loss": 0.0127, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.063694267515924e-05, |
|
"loss": 0.0088, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.992922859164898e-05, |
|
"loss": 0.008, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.922151450813871e-05, |
|
"loss": 0.0083, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"eval_loss": 0.012798724696040154, |
|
"eval_runtime": 1090.8595, |
|
"eval_samples_per_second": 6.757, |
|
"eval_steps_per_second": 0.282, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.8513800424628456e-05, |
|
"loss": 0.0101, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.780608634111819e-05, |
|
"loss": 0.0085, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 3.709837225760793e-05, |
|
"loss": 0.0083, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.6390658174097663e-05, |
|
"loss": 0.0093, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"eval_loss": 0.012657725252211094, |
|
"eval_runtime": 1090.8271, |
|
"eval_samples_per_second": 6.757, |
|
"eval_steps_per_second": 0.282, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.5682944090587406e-05, |
|
"loss": 0.0099, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 3.497523000707714e-05, |
|
"loss": 0.0107, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 3.426751592356688e-05, |
|
"loss": 0.0097, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.355980184005662e-05, |
|
"loss": 0.0099, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"eval_loss": 0.012795734219253063, |
|
"eval_runtime": 1090.6151, |
|
"eval_samples_per_second": 6.759, |
|
"eval_steps_per_second": 0.282, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.285208775654636e-05, |
|
"loss": 0.0094, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.214437367303609e-05, |
|
"loss": 0.0099, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 3.1436659589525835e-05, |
|
"loss": 0.0097, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 3.072894550601557e-05, |
|
"loss": 0.0109, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"eval_loss": 0.01254407037049532, |
|
"eval_runtime": 1090.2952, |
|
"eval_samples_per_second": 6.761, |
|
"eval_steps_per_second": 0.282, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 3.0021231422505307e-05, |
|
"loss": 0.009, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.931351733899505e-05, |
|
"loss": 0.0106, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.8605803255484786e-05, |
|
"loss": 0.0112, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 2.7898089171974522e-05, |
|
"loss": 0.0089, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"eval_loss": 0.012657931074500084, |
|
"eval_runtime": 1090.5111, |
|
"eval_samples_per_second": 6.759, |
|
"eval_steps_per_second": 0.282, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 2.7190375088464258e-05, |
|
"loss": 0.0088, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 2.6482661004954e-05, |
|
"loss": 0.0113, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 2.5774946921443737e-05, |
|
"loss": 0.0105, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 2.5067232837933473e-05, |
|
"loss": 0.0095, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"eval_loss": 0.012662014923989773, |
|
"eval_runtime": 1090.6147, |
|
"eval_samples_per_second": 6.759, |
|
"eval_steps_per_second": 0.282, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 2.4359518754423212e-05, |
|
"loss": 0.0098, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 2.365180467091295e-05, |
|
"loss": 0.0083, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 2.294409058740269e-05, |
|
"loss": 0.0109, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 2.2236376503892427e-05, |
|
"loss": 0.0101, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_loss": 0.012734564021229744, |
|
"eval_runtime": 1090.6719, |
|
"eval_samples_per_second": 6.758, |
|
"eval_steps_per_second": 0.282, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 2.1528662420382166e-05, |
|
"loss": 0.0103, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 2.0820948336871905e-05, |
|
"loss": 0.0084, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 2.0113234253361645e-05, |
|
"loss": 0.0092, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 1.9405520169851384e-05, |
|
"loss": 0.0098, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"eval_loss": 0.01291683316230774, |
|
"eval_runtime": 1090.3976, |
|
"eval_samples_per_second": 6.76, |
|
"eval_steps_per_second": 0.282, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 1.869780608634112e-05, |
|
"loss": 0.0081, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 1.799009200283086e-05, |
|
"loss": 0.0082, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 1.7282377919320595e-05, |
|
"loss": 0.0074, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.6574663835810334e-05, |
|
"loss": 0.0084, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"eval_loss": 0.012655936181545258, |
|
"eval_runtime": 1090.3951, |
|
"eval_samples_per_second": 6.76, |
|
"eval_steps_per_second": 0.282, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 1.5866949752300074e-05, |
|
"loss": 0.0084, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 1.515923566878981e-05, |
|
"loss": 0.0095, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 1.4451521585279547e-05, |
|
"loss": 0.0108, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.3743807501769285e-05, |
|
"loss": 0.0082, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"eval_loss": 0.012752115726470947, |
|
"eval_runtime": 1090.0724, |
|
"eval_samples_per_second": 6.762, |
|
"eval_steps_per_second": 0.283, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.3036093418259024e-05, |
|
"loss": 0.0091, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 1.2328379334748762e-05, |
|
"loss": 0.0083, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.1620665251238501e-05, |
|
"loss": 0.0082, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 1.0912951167728239e-05, |
|
"loss": 0.0088, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"eval_loss": 0.013044040650129318, |
|
"eval_runtime": 1090.0452, |
|
"eval_samples_per_second": 6.762, |
|
"eval_steps_per_second": 0.283, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.0205237084217977e-05, |
|
"loss": 0.0084, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 9.497523000707714e-06, |
|
"loss": 0.0086, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 8.789808917197452e-06, |
|
"loss": 0.0089, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 8.082094833687191e-06, |
|
"loss": 0.0077, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"eval_loss": 0.012679013423621655, |
|
"eval_runtime": 1089.906, |
|
"eval_samples_per_second": 6.763, |
|
"eval_steps_per_second": 0.283, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 7.374380750176929e-06, |
|
"loss": 0.0086, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.0075, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 5.958952583156405e-06, |
|
"loss": 0.008, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 5.251238499646143e-06, |
|
"loss": 0.0082, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"eval_loss": 0.01266387477517128, |
|
"eval_runtime": 1090.0881, |
|
"eval_samples_per_second": 6.762, |
|
"eval_steps_per_second": 0.283, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 4.543524416135881e-06, |
|
"loss": 0.0075, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 3.83581033262562e-06, |
|
"loss": 0.0077, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 3.1280962491153574e-06, |
|
"loss": 0.0089, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 2.420382165605096e-06, |
|
"loss": 0.0093, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"eval_loss": 0.012687072157859802, |
|
"eval_runtime": 1089.9152, |
|
"eval_samples_per_second": 6.763, |
|
"eval_steps_per_second": 0.283, |
|
"step": 7000 |
|
} |
|
], |
|
"max_steps": 7165, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.675440592244441e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|