{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.006916346785627831, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00013832693571255662, "grad_norm": 0.7460320591926575, "learning_rate": 3.3333333333333335e-05, "loss": 0.0622, "step": 1 }, { "epoch": 0.00013832693571255662, "eval_loss": 0.3660264313220978, "eval_runtime": 470.1985, "eval_samples_per_second": 25.895, "eval_steps_per_second": 12.948, "step": 1 }, { "epoch": 0.00027665387142511324, "grad_norm": 0.5172986388206482, "learning_rate": 6.666666666666667e-05, "loss": 0.0654, "step": 2 }, { "epoch": 0.00041498080713766987, "grad_norm": 0.3715982437133789, "learning_rate": 0.0001, "loss": 0.0601, "step": 3 }, { "epoch": 0.0005533077428502265, "grad_norm": 0.22900886833667755, "learning_rate": 9.99524110790929e-05, "loss": 0.1241, "step": 4 }, { "epoch": 0.0006916346785627831, "grad_norm": 0.15617172420024872, "learning_rate": 9.980973490458728e-05, "loss": 0.1584, "step": 5 }, { "epoch": 0.0008299616142753397, "grad_norm": 0.22854697704315186, "learning_rate": 9.957224306869053e-05, "loss": 0.2273, "step": 6 }, { "epoch": 0.0009682885499878964, "grad_norm": 0.2444339096546173, "learning_rate": 9.924038765061042e-05, "loss": 0.1921, "step": 7 }, { "epoch": 0.001106615485700453, "grad_norm": 0.21913830935955048, "learning_rate": 9.881480035599667e-05, "loss": 0.2113, "step": 8 }, { "epoch": 0.0012449424214130097, "grad_norm": 0.1947929710149765, "learning_rate": 9.829629131445342e-05, "loss": 0.231, "step": 9 }, { "epoch": 0.0013832693571255662, "grad_norm": 0.18104974925518036, "learning_rate": 9.768584753741134e-05, "loss": 0.2594, "step": 10 }, { "epoch": 0.001521596292838123, "grad_norm": 0.18011099100112915, "learning_rate": 9.698463103929542e-05, "loss": 0.2716, "step": 11 }, { "epoch": 0.0016599232285506795, "grad_norm": 0.2006697952747345, "learning_rate": 9.619397662556435e-05, "loss": 0.3017, "step": 12 }, { "epoch": 0.0017982501642632362, "grad_norm": 0.19725461304187775, "learning_rate": 9.53153893518325e-05, "loss": 0.3147, "step": 13 }, { "epoch": 0.0019365770999757927, "grad_norm": 0.20050567388534546, "learning_rate": 9.435054165891109e-05, "loss": 0.3898, "step": 14 }, { "epoch": 0.0020749040356883492, "grad_norm": 0.18300771713256836, "learning_rate": 9.330127018922194e-05, "loss": 0.2822, "step": 15 }, { "epoch": 0.002213230971400906, "grad_norm": 0.16420985758304596, "learning_rate": 9.21695722906443e-05, "loss": 0.2893, "step": 16 }, { "epoch": 0.0023515579071134627, "grad_norm": 0.1584509164094925, "learning_rate": 9.09576022144496e-05, "loss": 0.283, "step": 17 }, { "epoch": 0.0024898848428260194, "grad_norm": 0.18125592172145844, "learning_rate": 8.966766701456177e-05, "loss": 0.3069, "step": 18 }, { "epoch": 0.0026282117785385757, "grad_norm": 0.16136623919010162, "learning_rate": 8.83022221559489e-05, "loss": 0.2682, "step": 19 }, { "epoch": 0.0027665387142511324, "grad_norm": 0.14875389635562897, "learning_rate": 8.68638668405062e-05, "loss": 0.2303, "step": 20 }, { "epoch": 0.002904865649963689, "grad_norm": 0.15708525478839874, "learning_rate": 8.535533905932738e-05, "loss": 0.2628, "step": 21 }, { "epoch": 0.003043192585676246, "grad_norm": 0.16460222005844116, "learning_rate": 8.377951038078302e-05, "loss": 0.2652, "step": 22 }, { "epoch": 0.0031815195213888026, "grad_norm": 0.15555903315544128, "learning_rate": 8.213938048432697e-05, "loss": 0.2429, "step": 23 }, { "epoch": 0.003319846457101359, "grad_norm": 0.18757903575897217, "learning_rate": 8.043807145043604e-05, "loss": 0.3733, "step": 24 }, { "epoch": 0.0034581733928139157, "grad_norm": 0.18898479640483856, "learning_rate": 7.86788218175523e-05, "loss": 0.301, "step": 25 }, { "epoch": 0.0034581733928139157, "eval_loss": 0.26305505633354187, "eval_runtime": 469.7372, "eval_samples_per_second": 25.921, "eval_steps_per_second": 12.96, "step": 25 }, { "epoch": 0.0035965003285264724, "grad_norm": 0.19418561458587646, "learning_rate": 7.68649804173412e-05, "loss": 0.3394, "step": 26 }, { "epoch": 0.003734827264239029, "grad_norm": 0.17552641034126282, "learning_rate": 7.500000000000001e-05, "loss": 0.3258, "step": 27 }, { "epoch": 0.0038731541999515854, "grad_norm": 0.19901876151561737, "learning_rate": 7.308743066175172e-05, "loss": 0.3147, "step": 28 }, { "epoch": 0.004011481135664142, "grad_norm": 0.20790669322013855, "learning_rate": 7.113091308703498e-05, "loss": 0.3242, "step": 29 }, { "epoch": 0.0041498080713766984, "grad_norm": 0.21712379157543182, "learning_rate": 6.91341716182545e-05, "loss": 0.3609, "step": 30 }, { "epoch": 0.004288135007089256, "grad_norm": 0.1990925520658493, "learning_rate": 6.710100716628344e-05, "loss": 0.3226, "step": 31 }, { "epoch": 0.004426461942801812, "grad_norm": 0.20528583228588104, "learning_rate": 6.503528997521366e-05, "loss": 0.319, "step": 32 }, { "epoch": 0.004564788878514369, "grad_norm": 0.20410776138305664, "learning_rate": 6.294095225512603e-05, "loss": 0.2925, "step": 33 }, { "epoch": 0.004703115814226925, "grad_norm": 0.19837261736392975, "learning_rate": 6.0821980696905146e-05, "loss": 0.2745, "step": 34 }, { "epoch": 0.004841442749939482, "grad_norm": 0.22647859156131744, "learning_rate": 5.868240888334653e-05, "loss": 0.3222, "step": 35 }, { "epoch": 0.004979769685652039, "grad_norm": 0.20827357470989227, "learning_rate": 5.6526309611002594e-05, "loss": 0.3405, "step": 36 }, { "epoch": 0.005118096621364595, "grad_norm": 0.22090105712413788, "learning_rate": 5.435778713738292e-05, "loss": 0.3558, "step": 37 }, { "epoch": 0.005256423557077151, "grad_norm": 0.22308292984962463, "learning_rate": 5.218096936826681e-05, "loss": 0.3127, "step": 38 }, { "epoch": 0.005394750492789709, "grad_norm": 0.21499277651309967, "learning_rate": 5e-05, "loss": 0.3221, "step": 39 }, { "epoch": 0.005533077428502265, "grad_norm": 0.24013201892375946, "learning_rate": 4.781903063173321e-05, "loss": 0.3376, "step": 40 }, { "epoch": 0.005671404364214822, "grad_norm": 0.24089254438877106, "learning_rate": 4.564221286261709e-05, "loss": 0.2831, "step": 41 }, { "epoch": 0.005809731299927378, "grad_norm": 0.23052391409873962, "learning_rate": 4.347369038899744e-05, "loss": 0.2879, "step": 42 }, { "epoch": 0.005948058235639935, "grad_norm": 0.2739996314048767, "learning_rate": 4.131759111665349e-05, "loss": 0.3677, "step": 43 }, { "epoch": 0.006086385171352492, "grad_norm": 0.2731296718120575, "learning_rate": 3.917801930309486e-05, "loss": 0.288, "step": 44 }, { "epoch": 0.006224712107065048, "grad_norm": 0.26021909713745117, "learning_rate": 3.705904774487396e-05, "loss": 0.2641, "step": 45 }, { "epoch": 0.006363039042777605, "grad_norm": 0.2645817995071411, "learning_rate": 3.4964710024786354e-05, "loss": 0.293, "step": 46 }, { "epoch": 0.0065013659784901616, "grad_norm": 0.30752235651016235, "learning_rate": 3.289899283371657e-05, "loss": 0.3216, "step": 47 }, { "epoch": 0.006639692914202718, "grad_norm": 0.3008212745189667, "learning_rate": 3.086582838174551e-05, "loss": 0.2964, "step": 48 }, { "epoch": 0.006778019849915275, "grad_norm": 0.3753189146518707, "learning_rate": 2.886908691296504e-05, "loss": 0.3555, "step": 49 }, { "epoch": 0.006916346785627831, "grad_norm": 0.47307515144348145, "learning_rate": 2.6912569338248315e-05, "loss": 0.3284, "step": 50 }, { "epoch": 0.006916346785627831, "eval_loss": 0.24489927291870117, "eval_runtime": 470.1026, "eval_samples_per_second": 25.901, "eval_steps_per_second": 12.95, "step": 50 } ], "logging_steps": 1, "max_steps": 75, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 9837211652456448.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }