{ "best_global_step": 2000, "best_metric": 0.0787040963768959, "best_model_checkpoint": "/tmp/results/checkpoint-2000", "epoch": 2.220324979344533, "eval_steps": 200, "global_step": 2015, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.055081244836133296, "grad_norm": 0.15732981264591217, "learning_rate": 0.00019640234948604994, "loss": 1.2703, "step": 50 }, { "epoch": 0.11016248967226659, "grad_norm": 0.13457506895065308, "learning_rate": 0.00019273127753303965, "loss": 0.2101, "step": 100 }, { "epoch": 0.16524373450839988, "grad_norm": 0.11798923462629318, "learning_rate": 0.00018906020558002938, "loss": 0.1801, "step": 150 }, { "epoch": 0.22032497934453318, "grad_norm": 0.10562872886657715, "learning_rate": 0.00018538913362701911, "loss": 0.1621, "step": 200 }, { "epoch": 0.22032497934453318, "eval_loss": 0.15831586718559265, "eval_runtime": 95.9669, "eval_samples_per_second": 33.626, "eval_steps_per_second": 4.21, "step": 200 }, { "epoch": 0.27540622418066646, "grad_norm": 0.11607076972723007, "learning_rate": 0.00018171806167400882, "loss": 0.1526, "step": 250 }, { "epoch": 0.33048746901679976, "grad_norm": 0.20103491842746735, "learning_rate": 0.00017804698972099853, "loss": 0.1498, "step": 300 }, { "epoch": 0.38556871385293306, "grad_norm": 0.20092593133449554, "learning_rate": 0.00017437591776798826, "loss": 0.1481, "step": 350 }, { "epoch": 0.44064995868906637, "grad_norm": 0.15526321530342102, "learning_rate": 0.000170704845814978, "loss": 0.1533, "step": 400 }, { "epoch": 0.44064995868906637, "eval_loss": 0.13016295433044434, "eval_runtime": 96.1932, "eval_samples_per_second": 33.547, "eval_steps_per_second": 4.2, "step": 400 }, { "epoch": 0.49573120352519967, "grad_norm": 0.23773688077926636, "learning_rate": 0.0001670337738619677, "loss": 0.1434, "step": 450 }, { "epoch": 0.5508124483613329, "grad_norm": 0.14756543934345245, "learning_rate": 0.00016336270190895743, "loss": 0.1316, "step": 500 }, { "epoch": 0.6058936931974662, "grad_norm": 0.19601435959339142, "learning_rate": 0.00015969162995594716, "loss": 0.1301, "step": 550 }, { "epoch": 0.6609749380335995, "grad_norm": 0.1771874874830246, "learning_rate": 0.00015602055800293687, "loss": 0.1215, "step": 600 }, { "epoch": 0.6609749380335995, "eval_loss": 0.11715047806501389, "eval_runtime": 95.8683, "eval_samples_per_second": 33.661, "eval_steps_per_second": 4.214, "step": 600 }, { "epoch": 0.7160561828697328, "grad_norm": 0.16557306051254272, "learning_rate": 0.00015234948604992657, "loss": 0.1139, "step": 650 }, { "epoch": 0.7711374277058661, "grad_norm": 0.2927381992340088, "learning_rate": 0.0001486784140969163, "loss": 0.1214, "step": 700 }, { "epoch": 0.8262186725419994, "grad_norm": 0.3338780701160431, "learning_rate": 0.00014500734214390604, "loss": 0.1285, "step": 750 }, { "epoch": 0.8812999173781327, "grad_norm": 0.2900579273700714, "learning_rate": 0.00014133627019089574, "loss": 0.1274, "step": 800 }, { "epoch": 0.8812999173781327, "eval_loss": 0.10116679221391678, "eval_runtime": 94.9307, "eval_samples_per_second": 33.993, "eval_steps_per_second": 4.256, "step": 800 }, { "epoch": 0.936381162214266, "grad_norm": 0.2934402823448181, "learning_rate": 0.00013766519823788548, "loss": 0.1169, "step": 850 }, { "epoch": 0.9914624070503993, "grad_norm": 0.2068411260843277, "learning_rate": 0.0001339941262848752, "loss": 0.1179, "step": 900 }, { "epoch": 1.0473698705590746, "grad_norm": 0.10344358533620834, "learning_rate": 0.0001303230543318649, "loss": 0.1003, "step": 950 }, { "epoch": 1.102451115395208, "grad_norm": 0.24283887445926666, "learning_rate": 0.00012665198237885462, "loss": 0.1125, "step": 1000 }, { "epoch": 1.102451115395208, "eval_loss": 0.09346170723438263, "eval_runtime": 98.7546, "eval_samples_per_second": 32.677, "eval_steps_per_second": 4.091, "step": 1000 }, { "epoch": 1.1575323602313412, "grad_norm": 0.14652925729751587, "learning_rate": 0.00012298091042584435, "loss": 0.1058, "step": 1050 }, { "epoch": 1.2126136050674745, "grad_norm": 0.23155081272125244, "learning_rate": 0.00011930983847283408, "loss": 0.114, "step": 1100 }, { "epoch": 1.2676948499036078, "grad_norm": 0.22973254323005676, "learning_rate": 0.00011563876651982379, "loss": 0.1065, "step": 1150 }, { "epoch": 1.3227760947397411, "grad_norm": 0.35016024112701416, "learning_rate": 0.00011196769456681351, "loss": 0.117, "step": 1200 }, { "epoch": 1.3227760947397411, "eval_loss": 0.0892082005739212, "eval_runtime": 99.5776, "eval_samples_per_second": 32.407, "eval_steps_per_second": 4.057, "step": 1200 }, { "epoch": 1.3767557146791518, "grad_norm": 0.2444484978914261, "learning_rate": 0.00010829662261380324, "loss": 0.1042, "step": 1250 }, { "epoch": 1.4318369595152851, "grad_norm": 0.21167899668216705, "learning_rate": 0.00010462555066079296, "loss": 0.1111, "step": 1300 }, { "epoch": 1.4869182043514184, "grad_norm": 0.282637357711792, "learning_rate": 0.00010095447870778268, "loss": 0.0953, "step": 1350 }, { "epoch": 1.5419994491875517, "grad_norm": 0.159465953707695, "learning_rate": 9.72834067547724e-05, "loss": 0.1015, "step": 1400 }, { "epoch": 1.5419994491875517, "eval_loss": 0.08904121816158295, "eval_runtime": 98.1262, "eval_samples_per_second": 32.886, "eval_steps_per_second": 4.117, "step": 1400 }, { "epoch": 1.5970806940236848, "grad_norm": 0.30280402302742004, "learning_rate": 9.361233480176212e-05, "loss": 0.0955, "step": 1450 }, { "epoch": 1.6521619388598183, "grad_norm": 0.2655673623085022, "learning_rate": 8.994126284875184e-05, "loss": 0.0975, "step": 1500 }, { "epoch": 1.7072431836959514, "grad_norm": 0.3565061688423157, "learning_rate": 8.627019089574157e-05, "loss": 0.081, "step": 1550 }, { "epoch": 1.762324428532085, "grad_norm": 0.5034757852554321, "learning_rate": 8.259911894273127e-05, "loss": 0.1027, "step": 1600 }, { "epoch": 1.762324428532085, "eval_loss": 0.08446714282035828, "eval_runtime": 98.0553, "eval_samples_per_second": 32.91, "eval_steps_per_second": 4.12, "step": 1600 }, { "epoch": 1.817405673368218, "grad_norm": 0.20851312577724457, "learning_rate": 7.892804698972101e-05, "loss": 0.0979, "step": 1650 }, { "epoch": 1.8724869182043515, "grad_norm": 0.28781506419181824, "learning_rate": 7.525697503671073e-05, "loss": 0.1001, "step": 1700 }, { "epoch": 1.9275681630404846, "grad_norm": 0.2584012448787689, "learning_rate": 7.158590308370045e-05, "loss": 0.0978, "step": 1750 }, { "epoch": 1.9826494078766181, "grad_norm": 0.2780962288379669, "learning_rate": 6.791483113069016e-05, "loss": 0.0976, "step": 1800 }, { "epoch": 1.9826494078766181, "eval_loss": 0.08305753767490387, "eval_runtime": 98.1323, "eval_samples_per_second": 32.884, "eval_steps_per_second": 4.117, "step": 1800 }, { "epoch": 2.0385568713852935, "grad_norm": 0.15813182294368744, "learning_rate": 6.424375917767988e-05, "loss": 0.089, "step": 1850 }, { "epoch": 2.0936381162214266, "grad_norm": 0.2571295499801636, "learning_rate": 6.057268722466961e-05, "loss": 0.0912, "step": 1900 }, { "epoch": 2.1487193610575597, "grad_norm": 0.27553290128707886, "learning_rate": 5.690161527165933e-05, "loss": 0.0922, "step": 1950 }, { "epoch": 2.203800605893693, "grad_norm": 0.2373432070016861, "learning_rate": 5.3230543318649054e-05, "loss": 0.0963, "step": 2000 }, { "epoch": 2.203800605893693, "eval_loss": 0.0787040963768959, "eval_runtime": 96.6596, "eval_samples_per_second": 33.385, "eval_steps_per_second": 4.18, "step": 2000 } ], "logging_steps": 50, "max_steps": 2724, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 200, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 411986682593280.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }