{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.0, "eval_steps": 500, "global_step": 6512, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 2.138775110244751, "learning_rate": 6.142506142506143e-09, "loss": 0.7144, "step": 1 }, { "epoch": 0.06, "grad_norm": 2.9382898807525635, "learning_rate": 1.2285012285012285e-06, "loss": 0.6919, "step": 200 }, { "epoch": 0.12, "grad_norm": 5.854135513305664, "learning_rate": 2.457002457002457e-06, "loss": 0.5159, "step": 400 }, { "epoch": 0.18, "grad_norm": 10.978944778442383, "learning_rate": 3.685503685503686e-06, "loss": 0.4512, "step": 600 }, { "epoch": 0.25, "grad_norm": 5.893263816833496, "learning_rate": 4.914004914004914e-06, "loss": 0.4188, "step": 800 }, { "epoch": 0.31, "grad_norm": 5.747641086578369, "learning_rate": 6.1425061425061425e-06, "loss": 0.3966, "step": 1000 }, { "epoch": 0.37, "grad_norm": 4.317325115203857, "learning_rate": 7.371007371007372e-06, "loss": 0.38, "step": 1200 }, { "epoch": 0.43, "grad_norm": 3.575576066970825, "learning_rate": 8.5995085995086e-06, "loss": 0.3664, "step": 1400 }, { "epoch": 0.49, "grad_norm": 4.714512825012207, "learning_rate": 9.828009828009828e-06, "loss": 0.3571, "step": 1600 }, { "epoch": 0.55, "grad_norm": 10.362600326538086, "learning_rate": 9.99660019816015e-06, "loss": 0.3477, "step": 1800 }, { "epoch": 0.61, "grad_norm": 7.849954605102539, "learning_rate": 9.984103498544067e-06, "loss": 0.344, "step": 2000 }, { "epoch": 0.68, "grad_norm": 7.9534077644348145, "learning_rate": 9.962442770775675e-06, "loss": 0.3286, "step": 2200 }, { "epoch": 0.74, "grad_norm": 8.997251510620117, "learning_rate": 9.931657841379274e-06, "loss": 0.3187, "step": 2400 }, { "epoch": 0.8, "grad_norm": 3.736711263656616, "learning_rate": 9.891805313103928e-06, "loss": 0.3098, "step": 2600 }, { "epoch": 0.86, "grad_norm": 4.244268417358398, "learning_rate": 9.842958460850763e-06, "loss": 0.3154, "step": 2800 }, { "epoch": 0.92, "grad_norm": 6.168263912200928, "learning_rate": 9.785207096945972e-06, "loss": 0.3013, "step": 3000 }, { "epoch": 0.98, "grad_norm": 5.536916732788086, "learning_rate": 9.718657406007276e-06, "loss": 0.306, "step": 3200 }, { "epoch": 1.04, "grad_norm": 4.842708587646484, "learning_rate": 9.643431749707433e-06, "loss": 0.2877, "step": 3400 }, { "epoch": 1.11, "grad_norm": 10.104561805725098, "learning_rate": 9.559668441793803e-06, "loss": 0.2735, "step": 3600 }, { "epoch": 1.17, "grad_norm": 3.159233808517456, "learning_rate": 9.467521493777597e-06, "loss": 0.2815, "step": 3800 }, { "epoch": 1.23, "grad_norm": 8.2909517288208, "learning_rate": 9.367160331760423e-06, "loss": 0.2771, "step": 4000 }, { "epoch": 1.29, "grad_norm": 6.18996000289917, "learning_rate": 9.258769484918773e-06, "loss": 0.2697, "step": 4200 }, { "epoch": 1.35, "grad_norm": 3.3391902446746826, "learning_rate": 9.142548246219212e-06, "loss": 0.2659, "step": 4400 }, { "epoch": 1.41, "grad_norm": 5.173523426055908, "learning_rate": 9.018710305988126e-06, "loss": 0.2657, "step": 4600 }, { "epoch": 1.47, "grad_norm": 2.5208077430725098, "learning_rate": 8.88748335900973e-06, "loss": 0.2558, "step": 4800 }, { "epoch": 1.54, "grad_norm": 7.448746681213379, "learning_rate": 8.749108685874764e-06, "loss": 0.2706, "step": 5000 }, { "epoch": 1.6, "grad_norm": 3.4153690338134766, "learning_rate": 8.60384070934964e-06, "loss": 0.2618, "step": 5200 }, { "epoch": 1.66, "grad_norm": 5.566818714141846, "learning_rate": 8.4519465265817e-06, "loss": 0.2484, "step": 5400 }, { "epoch": 1.72, "grad_norm": 6.106826305389404, "learning_rate": 8.293705418000702e-06, "loss": 0.2625, "step": 5600 }, { "epoch": 1.78, "grad_norm": 3.50177001953125, "learning_rate": 8.129408333819524e-06, "loss": 0.2593, "step": 5800 }, { "epoch": 1.84, "grad_norm": 3.8748812675476074, "learning_rate": 7.959357359078174e-06, "loss": 0.257, "step": 6000 }, { "epoch": 1.9, "grad_norm": 2.6173479557037354, "learning_rate": 7.783865158214768e-06, "loss": 0.2505, "step": 6200 }, { "epoch": 1.97, "grad_norm": 3.0926144123077393, "learning_rate": 7.6032544001846476e-06, "loss": 0.2482, "step": 6400 } ], "logging_steps": 200, "max_steps": 16280, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 2.193125396059259e+17, "train_batch_size": 16, "trial_name": null, "trial_params": null }