{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 3.0, "eval_steps": 500, "global_step": 87, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03508771929824561, "grad_norm": 2.067296788002129, "learning_rate": 0.0, "loss": 0.866, "step": 1 }, { "epoch": 0.07017543859649122, "grad_norm": 2.290485750060095, "learning_rate": 7.142857142857143e-06, "loss": 1.043, "step": 2 }, { "epoch": 0.10526315789473684, "grad_norm": 1.8761831403153433, "learning_rate": 1.4285714285714285e-05, "loss": 0.9406, "step": 3 }, { "epoch": 0.14035087719298245, "grad_norm": 1.5348726385452636, "learning_rate": 2.1428571428571428e-05, "loss": 0.9753, "step": 4 }, { "epoch": 0.17543859649122806, "grad_norm": 2.451336617862282, "learning_rate": 2.857142857142857e-05, "loss": 0.9902, "step": 5 }, { "epoch": 0.21052631578947367, "grad_norm": 3.321340013917274, "learning_rate": 3.571428571428572e-05, "loss": 0.9761, "step": 6 }, { "epoch": 0.24561403508771928, "grad_norm": 2.813148280382836, "learning_rate": 4.2857142857142856e-05, "loss": 0.9736, "step": 7 }, { "epoch": 0.2807017543859649, "grad_norm": 1.5924965375006837, "learning_rate": 5e-05, "loss": 0.9838, "step": 8 }, { "epoch": 0.3157894736842105, "grad_norm": 1.5690611350244767, "learning_rate": 4.9993025930300686e-05, "loss": 0.9958, "step": 9 }, { "epoch": 0.3508771929824561, "grad_norm": 1.3166502579359358, "learning_rate": 4.99721076122146e-05, "loss": 0.9588, "step": 10 }, { "epoch": 0.38596491228070173, "grad_norm": 1.136691779583638, "learning_rate": 4.9937256716606394e-05, "loss": 0.8503, "step": 11 }, { "epoch": 0.42105263157894735, "grad_norm": 0.8350469358137226, "learning_rate": 4.9888492687682096e-05, "loss": 0.8724, "step": 12 }, { "epoch": 0.45614035087719296, "grad_norm": 0.7872759895415901, "learning_rate": 4.982584273214061e-05, "loss": 0.9391, "step": 13 }, { "epoch": 0.49122807017543857, "grad_norm": 0.7631657638189762, "learning_rate": 4.9749341803994465e-05, "loss": 0.8446, "step": 14 }, { "epoch": 0.5263157894736842, "grad_norm": 0.6807894928709346, "learning_rate": 4.965903258506806e-05, "loss": 0.8727, "step": 15 }, { "epoch": 0.5614035087719298, "grad_norm": 0.5683155258085846, "learning_rate": 4.955496546118439e-05, "loss": 0.8189, "step": 16 }, { "epoch": 0.5964912280701754, "grad_norm": 0.7801939464768173, "learning_rate": 4.9437198494053464e-05, "loss": 0.9303, "step": 17 }, { "epoch": 0.631578947368421, "grad_norm": 0.5867664533954665, "learning_rate": 4.9305797388878264e-05, "loss": 0.83, "step": 18 }, { "epoch": 0.6666666666666666, "grad_norm": 0.5567512617299766, "learning_rate": 4.916083545769607e-05, "loss": 0.8938, "step": 19 }, { "epoch": 0.7017543859649122, "grad_norm": 0.5135799351938194, "learning_rate": 4.9002393578475816e-05, "loss": 0.8907, "step": 20 }, { "epoch": 0.7368421052631579, "grad_norm": 0.5604879970565443, "learning_rate": 4.883056014999423e-05, "loss": 0.8582, "step": 21 }, { "epoch": 0.7719298245614035, "grad_norm": 0.5008974828940795, "learning_rate": 4.864543104251587e-05, "loss": 0.8132, "step": 22 }, { "epoch": 0.8070175438596491, "grad_norm": 0.5351207184045396, "learning_rate": 4.8447109544304636e-05, "loss": 0.8775, "step": 23 }, { "epoch": 0.8421052631578947, "grad_norm": 0.48534815393327974, "learning_rate": 4.823570630399665e-05, "loss": 0.8478, "step": 24 }, { "epoch": 0.8771929824561403, "grad_norm": 0.4766396695563186, "learning_rate": 4.8011339268866505e-05, "loss": 0.8363, "step": 25 }, { "epoch": 0.9122807017543859, "grad_norm": 0.4856803005330466, "learning_rate": 4.7774133619021514e-05, "loss": 0.9035, "step": 26 }, { "epoch": 0.9473684210526315, "grad_norm": 0.3964013618666619, "learning_rate": 4.752422169756048e-05, "loss": 0.8033, "step": 27 }, { "epoch": 0.9824561403508771, "grad_norm": 0.48912255110703906, "learning_rate": 4.726174293673612e-05, "loss": 0.8316, "step": 28 }, { "epoch": 1.0, "grad_norm": 0.48912255110703906, "learning_rate": 4.698684378016222e-05, "loss": 0.6913, "step": 29 }, { "epoch": 1.0350877192982457, "grad_norm": 0.9512574190630522, "learning_rate": 4.669967760110908e-05, "loss": 0.7426, "step": 30 }, { "epoch": 1.0701754385964912, "grad_norm": 0.5119153708536178, "learning_rate": 4.6400404616932505e-05, "loss": 0.705, "step": 31 }, { "epoch": 1.1052631578947367, "grad_norm": 0.5326897543607073, "learning_rate": 4.608919179968457e-05, "loss": 0.7502, "step": 32 }, { "epoch": 1.1403508771929824, "grad_norm": 0.7107030254650039, "learning_rate": 4.576621278295558e-05, "loss": 0.7215, "step": 33 }, { "epoch": 1.1754385964912282, "grad_norm": 0.6935703731339729, "learning_rate": 4.5431647764999455e-05, "loss": 0.6888, "step": 34 }, { "epoch": 1.2105263157894737, "grad_norm": 0.5468190267311956, "learning_rate": 4.5085683408196535e-05, "loss": 0.7342, "step": 35 }, { "epoch": 1.2456140350877192, "grad_norm": 0.6166217447509906, "learning_rate": 4.4728512734909844e-05, "loss": 0.7456, "step": 36 }, { "epoch": 1.280701754385965, "grad_norm": 0.9130217986036923, "learning_rate": 4.436033501979299e-05, "loss": 0.7106, "step": 37 }, { "epoch": 1.3157894736842106, "grad_norm": 0.5281267490436047, "learning_rate": 4.398135567860972e-05, "loss": 0.693, "step": 38 }, { "epoch": 1.3508771929824561, "grad_norm": 0.5213754312271124, "learning_rate": 4.3591786153627247e-05, "loss": 0.6418, "step": 39 }, { "epoch": 1.3859649122807016, "grad_norm": 0.5481623741259232, "learning_rate": 4.319184379564716e-05, "loss": 0.7452, "step": 40 }, { "epoch": 1.4210526315789473, "grad_norm": 0.6014133516410636, "learning_rate": 4.2781751742739885e-05, "loss": 0.686, "step": 41 }, { "epoch": 1.456140350877193, "grad_norm": 0.44416005870003245, "learning_rate": 4.2361738795750214e-05, "loss": 0.6433, "step": 42 }, { "epoch": 1.4912280701754386, "grad_norm": 0.46835862575073184, "learning_rate": 4.193203929064353e-05, "loss": 0.6381, "step": 43 }, { "epoch": 1.526315789473684, "grad_norm": 0.5015932878379395, "learning_rate": 4.1492892967763686e-05, "loss": 0.7615, "step": 44 }, { "epoch": 1.5614035087719298, "grad_norm": 0.49117089644567374, "learning_rate": 4.1044544838075794e-05, "loss": 0.7258, "step": 45 }, { "epoch": 1.5964912280701755, "grad_norm": 0.4310052429661563, "learning_rate": 4.058724504646834e-05, "loss": 0.6558, "step": 46 }, { "epoch": 1.631578947368421, "grad_norm": 0.46975098597348564, "learning_rate": 4.012124873219094e-05, "loss": 0.6587, "step": 47 }, { "epoch": 1.6666666666666665, "grad_norm": 0.46564012035747604, "learning_rate": 3.964681588650562e-05, "loss": 0.7071, "step": 48 }, { "epoch": 1.7017543859649122, "grad_norm": 0.4292070243922818, "learning_rate": 3.916421120763106e-05, "loss": 0.7292, "step": 49 }, { "epoch": 1.736842105263158, "grad_norm": 0.4700066192248402, "learning_rate": 3.867370395306068e-05, "loss": 0.7118, "step": 50 }, { "epoch": 1.7719298245614035, "grad_norm": 0.41414590310746596, "learning_rate": 3.817556778933698e-05, "loss": 0.7187, "step": 51 }, { "epoch": 1.807017543859649, "grad_norm": 0.4934164034770572, "learning_rate": 3.7670080639366004e-05, "loss": 0.7425, "step": 52 }, { "epoch": 1.8421052631578947, "grad_norm": 0.4258237994121863, "learning_rate": 3.715752452735704e-05, "loss": 0.5943, "step": 53 }, { "epoch": 1.8771929824561404, "grad_norm": 0.5078698766882597, "learning_rate": 3.6638185421474084e-05, "loss": 0.7346, "step": 54 }, { "epoch": 1.912280701754386, "grad_norm": 0.44841215920564664, "learning_rate": 3.61123530742869e-05, "loss": 0.6742, "step": 55 }, { "epoch": 1.9473684210526314, "grad_norm": 0.39580481306947396, "learning_rate": 3.5580320861110625e-05, "loss": 0.7273, "step": 56 }, { "epoch": 1.9824561403508771, "grad_norm": 0.48313542261877046, "learning_rate": 3.504238561632424e-05, "loss": 0.732, "step": 57 }, { "epoch": 2.0, "grad_norm": 0.8524767663864421, "learning_rate": 3.4498847467759e-05, "loss": 0.6104, "step": 58 }, { "epoch": 2.0350877192982457, "grad_norm": 0.9497059015721913, "learning_rate": 3.3950009669249497e-05, "loss": 0.5313, "step": 59 }, { "epoch": 2.0701754385964914, "grad_norm": 2.6382740891339025, "learning_rate": 3.339617843144057e-05, "loss": 0.6382, "step": 60 }, { "epoch": 2.1052631578947367, "grad_norm": 2.9127895766727887, "learning_rate": 3.2837662750944535e-05, "loss": 0.557, "step": 61 }, { "epoch": 2.1403508771929824, "grad_norm": 0.5994326492557629, "learning_rate": 3.227477423794412e-05, "loss": 0.5275, "step": 62 }, { "epoch": 2.175438596491228, "grad_norm": 0.7402586486266488, "learning_rate": 3.170782694233712e-05, "loss": 0.5685, "step": 63 }, { "epoch": 2.2105263157894735, "grad_norm": 0.8114992716723906, "learning_rate": 3.1137137178519985e-05, "loss": 0.5608, "step": 64 }, { "epoch": 2.245614035087719, "grad_norm": 0.4638568586846813, "learning_rate": 3.056302334890786e-05, "loss": 0.5008, "step": 65 }, { "epoch": 2.280701754385965, "grad_norm": 0.5045959497131294, "learning_rate": 2.9985805766289817e-05, "loss": 0.5395, "step": 66 }, { "epoch": 2.3157894736842106, "grad_norm": 0.5579563118760068, "learning_rate": 2.9405806475118048e-05, "loss": 0.5138, "step": 67 }, { "epoch": 2.3508771929824563, "grad_norm": 0.5637466858166899, "learning_rate": 2.882334907183115e-05, "loss": 0.5698, "step": 68 }, { "epoch": 2.3859649122807016, "grad_norm": 0.4578787068510027, "learning_rate": 2.8238758524311314e-05, "loss": 0.5291, "step": 69 }, { "epoch": 2.4210526315789473, "grad_norm": 0.761131940332416, "learning_rate": 2.7652360990576453e-05, "loss": 0.6047, "step": 70 }, { "epoch": 2.456140350877193, "grad_norm": 0.5221912763262103, "learning_rate": 2.7064483636808313e-05, "loss": 0.5368, "step": 71 }, { "epoch": 2.4912280701754383, "grad_norm": 0.469481484634673, "learning_rate": 2.6475454454818073e-05, "loss": 0.5374, "step": 72 }, { "epoch": 2.526315789473684, "grad_norm": 0.48764324851448193, "learning_rate": 2.5885602079051353e-05, "loss": 0.4709, "step": 73 }, { "epoch": 2.56140350877193, "grad_norm": 0.5222547729689572, "learning_rate": 2.529525560323462e-05, "loss": 0.5577, "step": 74 }, { "epoch": 2.5964912280701755, "grad_norm": 0.48783850266999607, "learning_rate": 2.470474439676539e-05, "loss": 0.5425, "step": 75 }, { "epoch": 2.6315789473684212, "grad_norm": 0.5489576835164317, "learning_rate": 2.4114397920948657e-05, "loss": 0.5442, "step": 76 }, { "epoch": 2.6666666666666665, "grad_norm": 0.43701838993444003, "learning_rate": 2.3524545545181933e-05, "loss": 0.5711, "step": 77 }, { "epoch": 2.7017543859649122, "grad_norm": 0.3981876016589946, "learning_rate": 2.2935516363191693e-05, "loss": 0.5559, "step": 78 }, { "epoch": 2.736842105263158, "grad_norm": 0.43288188752005125, "learning_rate": 2.2347639009423553e-05, "loss": 0.5459, "step": 79 }, { "epoch": 2.7719298245614032, "grad_norm": 0.3879671575828641, "learning_rate": 2.1761241475688695e-05, "loss": 0.4994, "step": 80 }, { "epoch": 2.807017543859649, "grad_norm": 0.36710005595302997, "learning_rate": 2.117665092816885e-05, "loss": 0.4588, "step": 81 }, { "epoch": 2.8421052631578947, "grad_norm": 0.4298634595452112, "learning_rate": 2.059419352488196e-05, "loss": 0.5301, "step": 82 }, { "epoch": 2.8771929824561404, "grad_norm": 0.37298113858035087, "learning_rate": 2.0014194233710193e-05, "loss": 0.5323, "step": 83 }, { "epoch": 2.912280701754386, "grad_norm": 0.4497970060495327, "learning_rate": 1.9436976651092144e-05, "loss": 0.564, "step": 84 }, { "epoch": 2.9473684210526314, "grad_norm": 0.4115774413493238, "learning_rate": 1.8862862821480025e-05, "loss": 0.4881, "step": 85 }, { "epoch": 2.982456140350877, "grad_norm": 0.3499066739001436, "learning_rate": 1.829217305766289e-05, "loss": 0.5444, "step": 86 }, { "epoch": 3.0, "grad_norm": 0.3499066739001436, "learning_rate": 1.7725225762055887e-05, "loss": 0.3119, "step": 87 } ], "logging_steps": 1, "max_steps": 140, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 39139677044736.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }