{ "best_metric": 1.470105528831482, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 3.0285714285714285, "eval_steps": 50, "global_step": 53, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.05714285714285714, "grad_norm": 7.248203277587891, "learning_rate": 1e-05, "loss": 2.647, "step": 1 }, { "epoch": 0.05714285714285714, "eval_loss": 3.8393256664276123, "eval_runtime": 0.5254, "eval_samples_per_second": 57.097, "eval_steps_per_second": 15.226, "step": 1 }, { "epoch": 0.11428571428571428, "grad_norm": 18.592464447021484, "learning_rate": 2e-05, "loss": 3.1974, "step": 2 }, { "epoch": 0.17142857142857143, "grad_norm": 52.620887756347656, "learning_rate": 3e-05, "loss": 5.3147, "step": 3 }, { "epoch": 0.22857142857142856, "grad_norm": 40.471134185791016, "learning_rate": 4e-05, "loss": 5.3507, "step": 4 }, { "epoch": 0.2857142857142857, "grad_norm": 2.791353940963745, "learning_rate": 5e-05, "loss": 2.3549, "step": 5 }, { "epoch": 0.34285714285714286, "grad_norm": 4.121531963348389, "learning_rate": 6e-05, "loss": 2.4909, "step": 6 }, { "epoch": 0.4, "grad_norm": 17.40095329284668, "learning_rate": 7e-05, "loss": 4.4181, "step": 7 }, { "epoch": 0.45714285714285713, "grad_norm": 18.52124786376953, "learning_rate": 8e-05, "loss": 4.5092, "step": 8 }, { "epoch": 0.5142857142857142, "grad_norm": 1.829758644104004, "learning_rate": 9e-05, "loss": 2.1481, "step": 9 }, { "epoch": 0.5714285714285714, "grad_norm": 2.442340850830078, "learning_rate": 0.0001, "loss": 2.1406, "step": 10 }, { "epoch": 0.6285714285714286, "grad_norm": 5.458558559417725, "learning_rate": 9.986661418317759e-05, "loss": 2.2616, "step": 11 }, { "epoch": 0.6857142857142857, "grad_norm": 11.321267127990723, "learning_rate": 9.946716840375551e-05, "loss": 3.0082, "step": 12 }, { "epoch": 0.7428571428571429, "grad_norm": 1.7342873811721802, "learning_rate": 9.880379387779637e-05, "loss": 1.8843, "step": 13 }, { "epoch": 0.8, "grad_norm": 2.1906113624572754, "learning_rate": 9.78800299954203e-05, "loss": 1.8916, "step": 14 }, { "epoch": 0.8571428571428571, "grad_norm": 4.323538780212402, "learning_rate": 9.67008054366274e-05, "loss": 1.9801, "step": 15 }, { "epoch": 0.9142857142857143, "grad_norm": 5.735661029815674, "learning_rate": 9.527241187465734e-05, "loss": 1.729, "step": 16 }, { "epoch": 0.9714285714285714, "grad_norm": 2.082949638366699, "learning_rate": 9.360247040719039e-05, "loss": 1.8639, "step": 17 }, { "epoch": 1.0285714285714285, "grad_norm": 6.3943190574646, "learning_rate": 9.16998908944939e-05, "loss": 2.438, "step": 18 }, { "epoch": 1.0857142857142856, "grad_norm": 1.7607228755950928, "learning_rate": 8.957482442146272e-05, "loss": 1.6208, "step": 19 }, { "epoch": 1.1428571428571428, "grad_norm": 3.193545341491699, "learning_rate": 8.72386091371891e-05, "loss": 1.682, "step": 20 }, { "epoch": 1.2, "grad_norm": 3.23166561126709, "learning_rate": 8.47037097610317e-05, "loss": 0.9162, "step": 21 }, { "epoch": 1.2571428571428571, "grad_norm": 3.0824878215789795, "learning_rate": 8.198365107794457e-05, "loss": 1.5992, "step": 22 }, { "epoch": 1.3142857142857143, "grad_norm": 1.696791172027588, "learning_rate": 7.909294577789766e-05, "loss": 1.4764, "step": 23 }, { "epoch": 1.3714285714285714, "grad_norm": 2.419218063354492, "learning_rate": 7.604701702439651e-05, "loss": 1.2588, "step": 24 }, { "epoch": 1.4285714285714286, "grad_norm": 3.271068572998047, "learning_rate": 7.286211616523193e-05, "loss": 0.8795, "step": 25 }, { "epoch": 1.4857142857142858, "grad_norm": 4.406923294067383, "learning_rate": 6.95552360245078e-05, "loss": 1.4232, "step": 26 }, { "epoch": 1.5428571428571427, "grad_norm": 1.7464239597320557, "learning_rate": 6.614402023857232e-05, "loss": 1.5745, "step": 27 }, { "epoch": 1.6, "grad_norm": 2.5956149101257324, "learning_rate": 6.264666911958404e-05, "loss": 0.9325, "step": 28 }, { "epoch": 1.657142857142857, "grad_norm": 3.6733956336975098, "learning_rate": 5.908184254897182e-05, "loss": 1.0339, "step": 29 }, { "epoch": 1.7142857142857144, "grad_norm": 2.5009891986846924, "learning_rate": 5.546856041889373e-05, "loss": 1.455, "step": 30 }, { "epoch": 1.7714285714285714, "grad_norm": 1.8941113948822021, "learning_rate": 5.182610115288295e-05, "loss": 1.4617, "step": 31 }, { "epoch": 1.8285714285714287, "grad_norm": 2.875288248062134, "learning_rate": 4.817389884711705e-05, "loss": 1.3048, "step": 32 }, { "epoch": 1.8857142857142857, "grad_norm": 3.436675548553467, "learning_rate": 4.4531439581106295e-05, "loss": 1.0613, "step": 33 }, { "epoch": 1.9428571428571428, "grad_norm": 2.804304838180542, "learning_rate": 4.0918157451028185e-05, "loss": 1.7596, "step": 34 }, { "epoch": 2.0, "grad_norm": 4.853079795837402, "learning_rate": 3.735333088041596e-05, "loss": 1.0828, "step": 35 }, { "epoch": 2.057142857142857, "grad_norm": 1.2410961389541626, "learning_rate": 3.38559797614277e-05, "loss": 1.448, "step": 36 }, { "epoch": 2.1142857142857143, "grad_norm": 1.4228153228759766, "learning_rate": 3.0444763975492208e-05, "loss": 1.1607, "step": 37 }, { "epoch": 2.1714285714285713, "grad_norm": 2.402080535888672, "learning_rate": 2.7137883834768073e-05, "loss": 1.0873, "step": 38 }, { "epoch": 2.2285714285714286, "grad_norm": 2.7407891750335693, "learning_rate": 2.3952982975603496e-05, "loss": 0.6478, "step": 39 }, { "epoch": 2.2857142857142856, "grad_norm": 1.181046962738037, "learning_rate": 2.090705422210237e-05, "loss": 1.4208, "step": 40 }, { "epoch": 2.342857142857143, "grad_norm": 1.7889063358306885, "learning_rate": 1.801634892205545e-05, "loss": 1.1349, "step": 41 }, { "epoch": 2.4, "grad_norm": 2.678232431411743, "learning_rate": 1.5296290238968303e-05, "loss": 0.665, "step": 42 }, { "epoch": 2.4571428571428573, "grad_norm": 4.582726955413818, "learning_rate": 1.2761390862810907e-05, "loss": 0.7553, "step": 43 }, { "epoch": 2.5142857142857142, "grad_norm": 1.157283067703247, "learning_rate": 1.0425175578537299e-05, "loss": 1.383, "step": 44 }, { "epoch": 2.571428571428571, "grad_norm": 1.7080718278884888, "learning_rate": 8.30010910550611e-06, "loss": 1.1187, "step": 45 }, { "epoch": 2.6285714285714286, "grad_norm": 2.5248517990112305, "learning_rate": 6.397529592809614e-06, "loss": 0.8633, "step": 46 }, { "epoch": 2.685714285714286, "grad_norm": 3.649900197982788, "learning_rate": 4.727588125342669e-06, "loss": 0.671, "step": 47 }, { "epoch": 2.742857142857143, "grad_norm": 1.1063644886016846, "learning_rate": 3.299194563372604e-06, "loss": 1.3111, "step": 48 }, { "epoch": 2.8, "grad_norm": 1.4684921503067017, "learning_rate": 2.1199700045797077e-06, "loss": 1.1218, "step": 49 }, { "epoch": 2.857142857142857, "grad_norm": 2.0206542015075684, "learning_rate": 1.196206122203647e-06, "loss": 1.0019, "step": 50 }, { "epoch": 2.857142857142857, "eval_loss": 1.470105528831482, "eval_runtime": 0.5267, "eval_samples_per_second": 56.963, "eval_steps_per_second": 15.19, "step": 50 }, { "epoch": 2.914285714285714, "grad_norm": 3.2814621925354004, "learning_rate": 5.328315962444874e-07, "loss": 0.8637, "step": 51 }, { "epoch": 2.9714285714285715, "grad_norm": 1.216376781463623, "learning_rate": 1.333858168224178e-07, "loss": 1.1248, "step": 52 }, { "epoch": 3.0285714285714285, "grad_norm": 4.243889331817627, "learning_rate": 0.0, "loss": 1.5185, "step": 53 } ], "logging_steps": 1, "max_steps": 53, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 3497938460344320.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }