{ "best_metric": 1.2344011068344116, "best_model_checkpoint": "outputs/checkpoint-170", "epoch": 4.874551971326165, "eval_steps": 500, "global_step": 170, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.5734767025089605, "grad_norm": 1.3787349462509155, "learning_rate": 2e-05, "loss": 2.8783, "step": 20 }, { "epoch": 0.974910394265233, "eval_loss": 2.1124985218048096, "eval_runtime": 12.3452, "eval_samples_per_second": 30.133, "eval_steps_per_second": 3.807, "step": 34 }, { "epoch": 1.146953405017921, "grad_norm": 0.693749725818634, "learning_rate": 4e-05, "loss": 2.3799, "step": 40 }, { "epoch": 1.7204301075268817, "grad_norm": 0.6103734970092773, "learning_rate": 6e-05, "loss": 1.8956, "step": 60 }, { "epoch": 1.978494623655914, "eval_loss": 1.6946210861206055, "eval_runtime": 12.0355, "eval_samples_per_second": 30.909, "eval_steps_per_second": 3.905, "step": 69 }, { "epoch": 2.293906810035842, "grad_norm": 0.7133463025093079, "learning_rate": 8e-05, "loss": 1.708, "step": 80 }, { "epoch": 2.867383512544803, "grad_norm": 1.05070960521698, "learning_rate": 0.0001, "loss": 1.5119, "step": 100 }, { "epoch": 2.982078853046595, "eval_loss": 1.4306025505065918, "eval_runtime": 12.0287, "eval_samples_per_second": 30.926, "eval_steps_per_second": 3.907, "step": 104 }, { "epoch": 3.4408602150537635, "grad_norm": 1.3218475580215454, "learning_rate": 8.117449009293668e-05, "loss": 1.3232, "step": 120 }, { "epoch": 3.985663082437276, "eval_loss": 1.257010817527771, "eval_runtime": 12.0264, "eval_samples_per_second": 30.932, "eval_steps_per_second": 3.908, "step": 139 }, { "epoch": 4.014336917562724, "grad_norm": 1.4180114269256592, "learning_rate": 3.887395330218429e-05, "loss": 1.2124, "step": 140 }, { "epoch": 4.587813620071684, "grad_norm": 1.681579828262329, "learning_rate": 4.951556604879048e-06, "loss": 1.0872, "step": 160 }, { "epoch": 4.874551971326165, "eval_loss": 1.2344011068344116, "eval_runtime": 12.0272, "eval_samples_per_second": 30.93, "eval_steps_per_second": 3.908, "step": 170 } ], "logging_steps": 20, "max_steps": 170, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 8953785954189312.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }