{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 2.0, "eval_steps": 500, "global_step": 70, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.028985507246376812, "grad_norm": 11.274454167048727, "learning_rate": 0.0, "loss": 0.6357, "step": 1 }, { "epoch": 0.057971014492753624, "grad_norm": 10.143348640958312, "learning_rate": 9.090909090909091e-07, "loss": 0.6666, "step": 2 }, { "epoch": 0.08695652173913043, "grad_norm": 9.086409087474683, "learning_rate": 1.8181818181818183e-06, "loss": 0.6451, "step": 3 }, { "epoch": 0.11594202898550725, "grad_norm": 8.70666602748876, "learning_rate": 2.7272727272727272e-06, "loss": 0.6048, "step": 4 }, { "epoch": 0.14492753623188406, "grad_norm": 8.976482480167713, "learning_rate": 3.6363636363636366e-06, "loss": 0.5937, "step": 5 }, { "epoch": 0.17391304347826086, "grad_norm": 6.543196052759934, "learning_rate": 4.5454545454545455e-06, "loss": 0.4943, "step": 6 }, { "epoch": 0.2028985507246377, "grad_norm": 7.511256155710179, "learning_rate": 5.4545454545454545e-06, "loss": 0.4247, "step": 7 }, { "epoch": 0.2318840579710145, "grad_norm": 3.274069972289533, "learning_rate": 6.363636363636364e-06, "loss": 0.302, "step": 8 }, { "epoch": 0.2608695652173913, "grad_norm": 5.507527578193957, "learning_rate": 7.272727272727273e-06, "loss": 0.2751, "step": 9 }, { "epoch": 0.2898550724637681, "grad_norm": 3.651481150079746, "learning_rate": 8.181818181818183e-06, "loss": 0.2311, "step": 10 }, { "epoch": 0.3188405797101449, "grad_norm": 2.9570009811876314, "learning_rate": 9.090909090909091e-06, "loss": 0.2133, "step": 11 }, { "epoch": 0.34782608695652173, "grad_norm": 2.5680951348234355, "learning_rate": 1e-05, "loss": 0.2021, "step": 12 }, { "epoch": 0.37681159420289856, "grad_norm": 2.4726534615132394, "learning_rate": 9.997020702755353e-06, "loss": 0.2086, "step": 13 }, { "epoch": 0.4057971014492754, "grad_norm": 2.0168121750509207, "learning_rate": 9.98808636150624e-06, "loss": 0.1844, "step": 14 }, { "epoch": 0.43478260869565216, "grad_norm": 1.988964439347895, "learning_rate": 9.973207623475964e-06, "loss": 0.1561, "step": 15 }, { "epoch": 0.463768115942029, "grad_norm": 1.703404936865681, "learning_rate": 9.952402219937817e-06, "loss": 0.1561, "step": 16 }, { "epoch": 0.4927536231884058, "grad_norm": 1.4224019007386586, "learning_rate": 9.925694945084369e-06, "loss": 0.1359, "step": 17 }, { "epoch": 0.5217391304347826, "grad_norm": 1.390079768168245, "learning_rate": 9.893117626479778e-06, "loss": 0.1422, "step": 18 }, { "epoch": 0.5507246376811594, "grad_norm": 1.363268530198362, "learning_rate": 9.854709087130261e-06, "loss": 0.1448, "step": 19 }, { "epoch": 0.5797101449275363, "grad_norm": 1.2167548650541202, "learning_rate": 9.810515099218004e-06, "loss": 0.1276, "step": 20 }, { "epoch": 0.6086956521739131, "grad_norm": 1.2149476390607494, "learning_rate": 9.76058832955357e-06, "loss": 0.118, "step": 21 }, { "epoch": 0.6376811594202898, "grad_norm": 1.1518569512633139, "learning_rate": 9.704988276811883e-06, "loss": 0.1088, "step": 22 }, { "epoch": 0.6666666666666666, "grad_norm": 1.1392991979040483, "learning_rate": 9.643781200626512e-06, "loss": 0.1287, "step": 23 }, { "epoch": 0.6956521739130435, "grad_norm": 1.2433820634735937, "learning_rate": 9.577040042626832e-06, "loss": 0.1186, "step": 24 }, { "epoch": 0.7246376811594203, "grad_norm": 1.230070806896417, "learning_rate": 9.504844339512096e-06, "loss": 0.1212, "step": 25 }, { "epoch": 0.7536231884057971, "grad_norm": 1.276827967155928, "learning_rate": 9.427280128266049e-06, "loss": 0.1173, "step": 26 }, { "epoch": 0.782608695652174, "grad_norm": 1.1539744752759111, "learning_rate": 9.344439843625034e-06, "loss": 0.1145, "step": 27 }, { "epoch": 0.8115942028985508, "grad_norm": 1.101305700093271, "learning_rate": 9.256422207921757e-06, "loss": 0.1246, "step": 28 }, { "epoch": 0.8405797101449275, "grad_norm": 1.0720557642935318, "learning_rate": 9.163332113436031e-06, "loss": 0.118, "step": 29 }, { "epoch": 0.8695652173913043, "grad_norm": 1.0709210914227136, "learning_rate": 9.065280497392663e-06, "loss": 0.1097, "step": 30 }, { "epoch": 0.8985507246376812, "grad_norm": 1.3387140042516312, "learning_rate": 8.962384209755453e-06, "loss": 0.119, "step": 31 }, { "epoch": 0.927536231884058, "grad_norm": 1.2000636204494934, "learning_rate": 8.854765873974898e-06, "loss": 0.1103, "step": 32 }, { "epoch": 0.9565217391304348, "grad_norm": 1.1660635680582712, "learning_rate": 8.742553740855507e-06, "loss": 0.1168, "step": 33 }, { "epoch": 0.9855072463768116, "grad_norm": 1.0295558925452102, "learning_rate": 8.625881535716883e-06, "loss": 0.1047, "step": 34 }, { "epoch": 1.0, "grad_norm": 1.0295558925452102, "learning_rate": 8.504888299030748e-06, "loss": 0.0989, "step": 35 }, { "epoch": 1.0289855072463767, "grad_norm": 1.506591807470232, "learning_rate": 8.379718220723772e-06, "loss": 0.0736, "step": 36 }, { "epoch": 1.0579710144927537, "grad_norm": 0.846102643595135, "learning_rate": 8.250520468343722e-06, "loss": 0.0876, "step": 37 }, { "epoch": 1.0869565217391304, "grad_norm": 0.8226526583596483, "learning_rate": 8.117449009293668e-06, "loss": 0.0773, "step": 38 }, { "epoch": 1.1159420289855073, "grad_norm": 1.0811685151313555, "learning_rate": 7.980662427346127e-06, "loss": 0.0855, "step": 39 }, { "epoch": 1.144927536231884, "grad_norm": 0.9305493105996977, "learning_rate": 7.84032373365578e-06, "loss": 0.0923, "step": 40 }, { "epoch": 1.1739130434782608, "grad_norm": 0.9787521121239098, "learning_rate": 7.696600172495997e-06, "loss": 0.0747, "step": 41 }, { "epoch": 1.2028985507246377, "grad_norm": 0.9440280872408277, "learning_rate": 7.5496630219506805e-06, "loss": 0.0624, "step": 42 }, { "epoch": 1.2318840579710144, "grad_norm": 1.0168023667319657, "learning_rate": 7.399687389798933e-06, "loss": 0.0862, "step": 43 }, { "epoch": 1.2608695652173914, "grad_norm": 1.0534520932326086, "learning_rate": 7.246852004835807e-06, "loss": 0.0832, "step": 44 }, { "epoch": 1.289855072463768, "grad_norm": 0.8939408658703963, "learning_rate": 7.091339003877826e-06, "loss": 0.0779, "step": 45 }, { "epoch": 1.318840579710145, "grad_norm": 0.8502452526837244, "learning_rate": 6.933333714707094e-06, "loss": 0.0808, "step": 46 }, { "epoch": 1.3478260869565217, "grad_norm": 0.838670630249861, "learning_rate": 6.773024435212678e-06, "loss": 0.0688, "step": 47 }, { "epoch": 1.3768115942028984, "grad_norm": 0.801607798732365, "learning_rate": 6.6106022089924535e-06, "loss": 0.0897, "step": 48 }, { "epoch": 1.4057971014492754, "grad_norm": 0.8006660956713604, "learning_rate": 6.4462605976828395e-06, "loss": 0.0742, "step": 49 }, { "epoch": 1.434782608695652, "grad_norm": 0.8042569883275191, "learning_rate": 6.280195450287736e-06, "loss": 0.0953, "step": 50 }, { "epoch": 1.463768115942029, "grad_norm": 0.9038324935270676, "learning_rate": 6.112604669781572e-06, "loss": 0.0839, "step": 51 }, { "epoch": 1.4927536231884058, "grad_norm": 0.8460692397365317, "learning_rate": 5.943687977264584e-06, "loss": 0.0771, "step": 52 }, { "epoch": 1.5217391304347827, "grad_norm": 0.8300524398027148, "learning_rate": 5.773646673951406e-06, "loss": 0.0824, "step": 53 }, { "epoch": 1.5507246376811594, "grad_norm": 0.8512930075204654, "learning_rate": 5.6026834012766155e-06, "loss": 0.0835, "step": 54 }, { "epoch": 1.5797101449275361, "grad_norm": 0.7732172809054101, "learning_rate": 5.4310018994030974e-06, "loss": 0.0787, "step": 55 }, { "epoch": 1.608695652173913, "grad_norm": 0.8694598913019425, "learning_rate": 5.258806764421048e-06, "loss": 0.0764, "step": 56 }, { "epoch": 1.6376811594202898, "grad_norm": 0.9361374054111669, "learning_rate": 5.0863032045269435e-06, "loss": 0.0773, "step": 57 }, { "epoch": 1.6666666666666665, "grad_norm": 0.8070286203847241, "learning_rate": 4.913696795473058e-06, "loss": 0.0814, "step": 58 }, { "epoch": 1.6956521739130435, "grad_norm": 0.9276738248259214, "learning_rate": 4.741193235578953e-06, "loss": 0.0924, "step": 59 }, { "epoch": 1.7246376811594204, "grad_norm": 0.8188739154958693, "learning_rate": 4.568998100596903e-06, "loss": 0.0659, "step": 60 }, { "epoch": 1.7536231884057971, "grad_norm": 0.7835730541507403, "learning_rate": 4.397316598723385e-06, "loss": 0.0827, "step": 61 }, { "epoch": 1.7826086956521738, "grad_norm": 0.8254420427828969, "learning_rate": 4.226353326048594e-06, "loss": 0.0777, "step": 62 }, { "epoch": 1.8115942028985508, "grad_norm": 0.8014760946487198, "learning_rate": 4.056312022735417e-06, "loss": 0.0799, "step": 63 }, { "epoch": 1.8405797101449275, "grad_norm": 0.909284799411466, "learning_rate": 3.887395330218429e-06, "loss": 0.0769, "step": 64 }, { "epoch": 1.8695652173913042, "grad_norm": 0.9101996746678516, "learning_rate": 3.7198045497122647e-06, "loss": 0.0783, "step": 65 }, { "epoch": 1.8985507246376812, "grad_norm": 0.7981223504922568, "learning_rate": 3.553739402317162e-06, "loss": 0.0769, "step": 66 }, { "epoch": 1.927536231884058, "grad_norm": 0.8131851608148943, "learning_rate": 3.389397791007548e-06, "loss": 0.0975, "step": 67 }, { "epoch": 1.9565217391304348, "grad_norm": 0.7422094984620301, "learning_rate": 3.226975564787322e-06, "loss": 0.0755, "step": 68 }, { "epoch": 1.9855072463768115, "grad_norm": 0.8122097222072056, "learning_rate": 3.0666662852929063e-06, "loss": 0.079, "step": 69 }, { "epoch": 2.0, "grad_norm": 1.2716537005367277, "learning_rate": 2.9086609961221758e-06, "loss": 0.066, "step": 70 } ], "logging_steps": 1, "max_steps": 102, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 6977636007936.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }