bbytxt's picture
Training in progress, step 192, checkpoint
ddf79df verified
{
"best_metric": 2.4975922107696533,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 3.011764705882353,
"eval_steps": 50,
"global_step": 192,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01568627450980392,
"grad_norm": 0.9290716052055359,
"learning_rate": 1e-05,
"loss": 2.8736,
"step": 1
},
{
"epoch": 0.01568627450980392,
"eval_loss": 3.324666738510132,
"eval_runtime": 8.0111,
"eval_samples_per_second": 13.481,
"eval_steps_per_second": 3.37,
"step": 1
},
{
"epoch": 0.03137254901960784,
"grad_norm": 1.0439985990524292,
"learning_rate": 2e-05,
"loss": 2.8739,
"step": 2
},
{
"epoch": 0.047058823529411764,
"grad_norm": 1.0311352014541626,
"learning_rate": 3e-05,
"loss": 2.9072,
"step": 3
},
{
"epoch": 0.06274509803921569,
"grad_norm": 0.9413182735443115,
"learning_rate": 4e-05,
"loss": 2.9037,
"step": 4
},
{
"epoch": 0.0784313725490196,
"grad_norm": 0.9702916741371155,
"learning_rate": 5e-05,
"loss": 2.8073,
"step": 5
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.9648849964141846,
"learning_rate": 6e-05,
"loss": 2.9957,
"step": 6
},
{
"epoch": 0.10980392156862745,
"grad_norm": 0.8518514037132263,
"learning_rate": 7e-05,
"loss": 2.9002,
"step": 7
},
{
"epoch": 0.12549019607843137,
"grad_norm": 0.901975691318512,
"learning_rate": 8e-05,
"loss": 2.7139,
"step": 8
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.8090202808380127,
"learning_rate": 9e-05,
"loss": 2.7318,
"step": 9
},
{
"epoch": 0.1568627450980392,
"grad_norm": 0.8015203475952148,
"learning_rate": 0.0001,
"loss": 2.6717,
"step": 10
},
{
"epoch": 0.17254901960784313,
"grad_norm": 0.9336282014846802,
"learning_rate": 9.999255120204248e-05,
"loss": 2.6711,
"step": 11
},
{
"epoch": 0.18823529411764706,
"grad_norm": 1.0132514238357544,
"learning_rate": 9.997020702755353e-05,
"loss": 2.7361,
"step": 12
},
{
"epoch": 0.20392156862745098,
"grad_norm": 1.0461229085922241,
"learning_rate": 9.99329741340228e-05,
"loss": 2.6794,
"step": 13
},
{
"epoch": 0.2196078431372549,
"grad_norm": 1.241179347038269,
"learning_rate": 9.98808636150624e-05,
"loss": 2.6331,
"step": 14
},
{
"epoch": 0.23529411764705882,
"grad_norm": 1.49308180809021,
"learning_rate": 9.981389099710132e-05,
"loss": 3.0748,
"step": 15
},
{
"epoch": 0.25098039215686274,
"grad_norm": 0.5110021829605103,
"learning_rate": 9.973207623475965e-05,
"loss": 2.409,
"step": 16
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.6003811955451965,
"learning_rate": 9.96354437049027e-05,
"loss": 2.5082,
"step": 17
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.5671645998954773,
"learning_rate": 9.952402219937816e-05,
"loss": 2.5027,
"step": 18
},
{
"epoch": 0.2980392156862745,
"grad_norm": 0.5409471988677979,
"learning_rate": 9.939784491643734e-05,
"loss": 2.46,
"step": 19
},
{
"epoch": 0.3137254901960784,
"grad_norm": 0.5076523423194885,
"learning_rate": 9.92569494508437e-05,
"loss": 2.6268,
"step": 20
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.47187337279319763,
"learning_rate": 9.910137778267152e-05,
"loss": 2.5395,
"step": 21
},
{
"epoch": 0.34509803921568627,
"grad_norm": 0.4689958393573761,
"learning_rate": 9.893117626479777e-05,
"loss": 2.5222,
"step": 22
},
{
"epoch": 0.3607843137254902,
"grad_norm": 0.4607944190502167,
"learning_rate": 9.874639560909117e-05,
"loss": 2.4532,
"step": 23
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.5923854112625122,
"learning_rate": 9.85470908713026e-05,
"loss": 2.4775,
"step": 24
},
{
"epoch": 0.39215686274509803,
"grad_norm": 0.6515561938285828,
"learning_rate": 9.833332143466099e-05,
"loss": 2.5327,
"step": 25
},
{
"epoch": 0.40784313725490196,
"grad_norm": 0.5666857361793518,
"learning_rate": 9.810515099218003e-05,
"loss": 2.715,
"step": 26
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.6389107704162598,
"learning_rate": 9.78626475276808e-05,
"loss": 2.5737,
"step": 27
},
{
"epoch": 0.4392156862745098,
"grad_norm": 0.7637258172035217,
"learning_rate": 9.760588329553571e-05,
"loss": 2.6324,
"step": 28
},
{
"epoch": 0.4549019607843137,
"grad_norm": 0.8146405816078186,
"learning_rate": 9.73349347991403e-05,
"loss": 2.5954,
"step": 29
},
{
"epoch": 0.47058823529411764,
"grad_norm": 1.183030605316162,
"learning_rate": 9.704988276811883e-05,
"loss": 2.9496,
"step": 30
},
{
"epoch": 0.48627450980392156,
"grad_norm": 0.2972618639469147,
"learning_rate": 9.675081213427076e-05,
"loss": 2.3553,
"step": 31
},
{
"epoch": 0.5019607843137255,
"grad_norm": 0.4887743294239044,
"learning_rate": 9.643781200626511e-05,
"loss": 2.4257,
"step": 32
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.4153500497341156,
"learning_rate": 9.611097564309053e-05,
"loss": 2.5037,
"step": 33
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.41760119795799255,
"learning_rate": 9.577040042626833e-05,
"loss": 2.3801,
"step": 34
},
{
"epoch": 0.5490196078431373,
"grad_norm": 0.4032338857650757,
"learning_rate": 9.54161878308377e-05,
"loss": 2.4549,
"step": 35
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.4308023452758789,
"learning_rate": 9.504844339512095e-05,
"loss": 2.4362,
"step": 36
},
{
"epoch": 0.5803921568627451,
"grad_norm": 0.44621264934539795,
"learning_rate": 9.466727668927816e-05,
"loss": 2.5759,
"step": 37
},
{
"epoch": 0.596078431372549,
"grad_norm": 0.4338065981864929,
"learning_rate": 9.42728012826605e-05,
"loss": 2.4042,
"step": 38
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.4249041974544525,
"learning_rate": 9.38651347099721e-05,
"loss": 2.3852,
"step": 39
},
{
"epoch": 0.6274509803921569,
"grad_norm": 0.46373647451400757,
"learning_rate": 9.344439843625034e-05,
"loss": 2.4953,
"step": 40
},
{
"epoch": 0.6431372549019608,
"grad_norm": 0.4927481412887573,
"learning_rate": 9.301071782067504e-05,
"loss": 2.4796,
"step": 41
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.6068108081817627,
"learning_rate": 9.256422207921757e-05,
"loss": 2.5241,
"step": 42
},
{
"epoch": 0.6745098039215687,
"grad_norm": 0.5682300925254822,
"learning_rate": 9.210504424614059e-05,
"loss": 2.4522,
"step": 43
},
{
"epoch": 0.6901960784313725,
"grad_norm": 0.6877283453941345,
"learning_rate": 9.163332113436032e-05,
"loss": 2.5321,
"step": 44
},
{
"epoch": 0.7058823529411765,
"grad_norm": 1.1677114963531494,
"learning_rate": 9.114919329468282e-05,
"loss": 2.8537,
"step": 45
},
{
"epoch": 0.7215686274509804,
"grad_norm": 0.38572782278060913,
"learning_rate": 9.065280497392663e-05,
"loss": 2.4599,
"step": 46
},
{
"epoch": 0.7372549019607844,
"grad_norm": 0.3719371557235718,
"learning_rate": 9.014430407194413e-05,
"loss": 2.4086,
"step": 47
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.3547096252441406,
"learning_rate": 8.962384209755452e-05,
"loss": 2.5402,
"step": 48
},
{
"epoch": 0.7686274509803922,
"grad_norm": 0.385681688785553,
"learning_rate": 8.90915741234015e-05,
"loss": 2.4216,
"step": 49
},
{
"epoch": 0.7843137254901961,
"grad_norm": 0.39732497930526733,
"learning_rate": 8.854765873974898e-05,
"loss": 2.597,
"step": 50
},
{
"epoch": 0.7843137254901961,
"eval_loss": 2.5103862285614014,
"eval_runtime": 8.1694,
"eval_samples_per_second": 13.22,
"eval_steps_per_second": 3.305,
"step": 50
},
{
"epoch": 0.8,
"grad_norm": 0.382482647895813,
"learning_rate": 8.799225800722895e-05,
"loss": 2.4412,
"step": 51
},
{
"epoch": 0.8156862745098039,
"grad_norm": 0.4492253065109253,
"learning_rate": 8.742553740855506e-05,
"loss": 2.4853,
"step": 52
},
{
"epoch": 0.8313725490196079,
"grad_norm": 0.40429365634918213,
"learning_rate": 8.684766579921684e-05,
"loss": 2.4598,
"step": 53
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.4485275447368622,
"learning_rate": 8.625881535716883e-05,
"loss": 2.4755,
"step": 54
},
{
"epoch": 0.8627450980392157,
"grad_norm": 0.44603973627090454,
"learning_rate": 8.565916153152983e-05,
"loss": 2.4749,
"step": 55
},
{
"epoch": 0.8784313725490196,
"grad_norm": 0.4761582612991333,
"learning_rate": 8.504888299030747e-05,
"loss": 2.6098,
"step": 56
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.510277271270752,
"learning_rate": 8.442816156716385e-05,
"loss": 2.5405,
"step": 57
},
{
"epoch": 0.9098039215686274,
"grad_norm": 0.5629740357398987,
"learning_rate": 8.379718220723773e-05,
"loss": 2.5641,
"step": 58
},
{
"epoch": 0.9254901960784314,
"grad_norm": 0.6572622060775757,
"learning_rate": 8.315613291203976e-05,
"loss": 2.5844,
"step": 59
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.9117251634597778,
"learning_rate": 8.250520468343722e-05,
"loss": 2.9142,
"step": 60
},
{
"epoch": 0.9568627450980393,
"grad_norm": 0.30293720960617065,
"learning_rate": 8.184459146674446e-05,
"loss": 2.3538,
"step": 61
},
{
"epoch": 0.9725490196078431,
"grad_norm": 0.37605491280555725,
"learning_rate": 8.117449009293668e-05,
"loss": 2.3623,
"step": 62
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.5062958002090454,
"learning_rate": 8.049510022000364e-05,
"loss": 2.5813,
"step": 63
},
{
"epoch": 1.003921568627451,
"grad_norm": 0.8249017596244812,
"learning_rate": 7.980662427346127e-05,
"loss": 3.606,
"step": 64
},
{
"epoch": 1.0196078431372548,
"grad_norm": 0.24899208545684814,
"learning_rate": 7.910926738603854e-05,
"loss": 2.1644,
"step": 65
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.2910781502723694,
"learning_rate": 7.840323733655778e-05,
"loss": 2.2541,
"step": 66
},
{
"epoch": 1.0509803921568628,
"grad_norm": 0.3324323892593384,
"learning_rate": 7.768874448802665e-05,
"loss": 2.4114,
"step": 67
},
{
"epoch": 1.0666666666666667,
"grad_norm": 0.3214768171310425,
"learning_rate": 7.696600172495997e-05,
"loss": 2.2752,
"step": 68
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.34132134914398193,
"learning_rate": 7.62352243899504e-05,
"loss": 2.3824,
"step": 69
},
{
"epoch": 1.0980392156862746,
"grad_norm": 0.3731231689453125,
"learning_rate": 7.54966302195068e-05,
"loss": 2.4044,
"step": 70
},
{
"epoch": 1.1137254901960785,
"grad_norm": 0.3671027421951294,
"learning_rate": 7.475043927917907e-05,
"loss": 2.352,
"step": 71
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.3711940348148346,
"learning_rate": 7.399687389798933e-05,
"loss": 2.2605,
"step": 72
},
{
"epoch": 1.1450980392156862,
"grad_norm": 0.41749900579452515,
"learning_rate": 7.323615860218843e-05,
"loss": 2.36,
"step": 73
},
{
"epoch": 1.1607843137254903,
"grad_norm": 0.4730680286884308,
"learning_rate": 7.246852004835807e-05,
"loss": 2.3771,
"step": 74
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.482975035905838,
"learning_rate": 7.169418695587791e-05,
"loss": 2.3348,
"step": 75
},
{
"epoch": 1.192156862745098,
"grad_norm": 0.5463060140609741,
"learning_rate": 7.091339003877826e-05,
"loss": 2.318,
"step": 76
},
{
"epoch": 1.2078431372549019,
"grad_norm": 0.6054916381835938,
"learning_rate": 7.012636193699837e-05,
"loss": 2.1102,
"step": 77
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.719634473323822,
"learning_rate": 6.933333714707094e-05,
"loss": 2.3898,
"step": 78
},
{
"epoch": 1.2392156862745098,
"grad_norm": 0.8080974221229553,
"learning_rate": 6.853455195225338e-05,
"loss": 2.6727,
"step": 79
},
{
"epoch": 1.2549019607843137,
"grad_norm": 0.33123213052749634,
"learning_rate": 6.773024435212678e-05,
"loss": 2.1137,
"step": 80
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.416637659072876,
"learning_rate": 6.692065399168352e-05,
"loss": 2.3176,
"step": 81
},
{
"epoch": 1.2862745098039214,
"grad_norm": 0.415536493062973,
"learning_rate": 6.610602208992454e-05,
"loss": 2.2767,
"step": 82
},
{
"epoch": 1.3019607843137255,
"grad_norm": 0.42290326952934265,
"learning_rate": 6.528659136798764e-05,
"loss": 2.277,
"step": 83
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.4520817697048187,
"learning_rate": 6.446260597682839e-05,
"loss": 2.3805,
"step": 84
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.46147623658180237,
"learning_rate": 6.363431142447469e-05,
"loss": 2.3688,
"step": 85
},
{
"epoch": 1.3490196078431373,
"grad_norm": 0.48055508732795715,
"learning_rate": 6.280195450287736e-05,
"loss": 2.3444,
"step": 86
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.477186381816864,
"learning_rate": 6.19657832143779e-05,
"loss": 2.4343,
"step": 87
},
{
"epoch": 1.380392156862745,
"grad_norm": 0.4650111794471741,
"learning_rate": 6.112604669781572e-05,
"loss": 2.2705,
"step": 88
},
{
"epoch": 1.396078431372549,
"grad_norm": 0.5212112069129944,
"learning_rate": 6.028299515429683e-05,
"loss": 2.3152,
"step": 89
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.5310537815093994,
"learning_rate": 5.943687977264584e-05,
"loss": 2.3124,
"step": 90
},
{
"epoch": 1.427450980392157,
"grad_norm": 0.60441654920578,
"learning_rate": 5.8587952654563817e-05,
"loss": 2.3926,
"step": 91
},
{
"epoch": 1.4431372549019608,
"grad_norm": 0.6553325057029724,
"learning_rate": 5.773646673951406e-05,
"loss": 2.2566,
"step": 92
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.8707178235054016,
"learning_rate": 5.688267572935842e-05,
"loss": 2.5228,
"step": 93
},
{
"epoch": 1.4745098039215687,
"grad_norm": 0.844674825668335,
"learning_rate": 5.602683401276615e-05,
"loss": 2.5466,
"step": 94
},
{
"epoch": 1.4901960784313726,
"grad_norm": 0.3334199786186218,
"learning_rate": 5.5169196589418504e-05,
"loss": 2.0263,
"step": 95
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.4047907888889313,
"learning_rate": 5.431001899403098e-05,
"loss": 2.3,
"step": 96
},
{
"epoch": 1.5215686274509803,
"grad_norm": 0.42536845803260803,
"learning_rate": 5.344955722021624e-05,
"loss": 2.3235,
"step": 97
},
{
"epoch": 1.5372549019607842,
"grad_norm": 0.42733073234558105,
"learning_rate": 5.258806764421048e-05,
"loss": 2.2457,
"step": 98
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.47130250930786133,
"learning_rate": 5.172580694848541e-05,
"loss": 2.3569,
"step": 99
},
{
"epoch": 1.5686274509803921,
"grad_norm": 0.49199551343917847,
"learning_rate": 5.086303204526943e-05,
"loss": 2.3505,
"step": 100
},
{
"epoch": 1.5686274509803921,
"eval_loss": 2.4975922107696533,
"eval_runtime": 8.1691,
"eval_samples_per_second": 13.221,
"eval_steps_per_second": 3.305,
"step": 100
},
{
"epoch": 1.5843137254901962,
"grad_norm": 0.5012519359588623,
"learning_rate": 5e-05,
"loss": 2.2786,
"step": 101
},
{
"epoch": 1.6,
"grad_norm": 0.5001832842826843,
"learning_rate": 4.913696795473058e-05,
"loss": 2.2554,
"step": 102
},
{
"epoch": 1.615686274509804,
"grad_norm": 0.5830171704292297,
"learning_rate": 4.827419305151461e-05,
"loss": 2.298,
"step": 103
},
{
"epoch": 1.6313725490196078,
"grad_norm": 0.5844489336013794,
"learning_rate": 4.741193235578952e-05,
"loss": 2.4365,
"step": 104
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.6321450471878052,
"learning_rate": 4.655044277978375e-05,
"loss": 2.3374,
"step": 105
},
{
"epoch": 1.6627450980392156,
"grad_norm": 0.6509003639221191,
"learning_rate": 4.568998100596903e-05,
"loss": 2.3349,
"step": 106
},
{
"epoch": 1.6784313725490196,
"grad_norm": 0.7752017378807068,
"learning_rate": 4.48308034105815e-05,
"loss": 2.2024,
"step": 107
},
{
"epoch": 1.6941176470588235,
"grad_norm": 0.8786823153495789,
"learning_rate": 4.397316598723385e-05,
"loss": 2.4999,
"step": 108
},
{
"epoch": 1.7098039215686276,
"grad_norm": 0.808777928352356,
"learning_rate": 4.31173242706416e-05,
"loss": 2.6242,
"step": 109
},
{
"epoch": 1.7254901960784315,
"grad_norm": 0.3257434666156769,
"learning_rate": 4.226353326048593e-05,
"loss": 1.9046,
"step": 110
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.4144049882888794,
"learning_rate": 4.1412047345436195e-05,
"loss": 2.2418,
"step": 111
},
{
"epoch": 1.7568627450980392,
"grad_norm": 0.4396112859249115,
"learning_rate": 4.056312022735417e-05,
"loss": 2.3271,
"step": 112
},
{
"epoch": 1.772549019607843,
"grad_norm": 0.4944157302379608,
"learning_rate": 3.971700484570318e-05,
"loss": 2.3628,
"step": 113
},
{
"epoch": 1.788235294117647,
"grad_norm": 0.4805627465248108,
"learning_rate": 3.887395330218429e-05,
"loss": 2.3454,
"step": 114
},
{
"epoch": 1.803921568627451,
"grad_norm": 0.5068280100822449,
"learning_rate": 3.803421678562213e-05,
"loss": 2.213,
"step": 115
},
{
"epoch": 1.8196078431372549,
"grad_norm": 0.533676028251648,
"learning_rate": 3.719804549712265e-05,
"loss": 2.2378,
"step": 116
},
{
"epoch": 1.835294117647059,
"grad_norm": 0.5281825065612793,
"learning_rate": 3.6365688575525315e-05,
"loss": 2.2165,
"step": 117
},
{
"epoch": 1.8509803921568628,
"grad_norm": 0.5699700713157654,
"learning_rate": 3.553739402317162e-05,
"loss": 2.3169,
"step": 118
},
{
"epoch": 1.8666666666666667,
"grad_norm": 0.590506911277771,
"learning_rate": 3.471340863201237e-05,
"loss": 2.2439,
"step": 119
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.6972747445106506,
"learning_rate": 3.389397791007548e-05,
"loss": 2.2499,
"step": 120
},
{
"epoch": 1.8980392156862744,
"grad_norm": 0.7098711133003235,
"learning_rate": 3.307934600831648e-05,
"loss": 2.1955,
"step": 121
},
{
"epoch": 1.9137254901960783,
"grad_norm": 0.8078807592391968,
"learning_rate": 3.226975564787322e-05,
"loss": 2.3609,
"step": 122
},
{
"epoch": 1.9294117647058824,
"grad_norm": 0.9425795674324036,
"learning_rate": 3.146544804774663e-05,
"loss": 2.2705,
"step": 123
},
{
"epoch": 1.9450980392156862,
"grad_norm": 0.9871799349784851,
"learning_rate": 3.066666285292906e-05,
"loss": 2.5466,
"step": 124
},
{
"epoch": 1.9607843137254903,
"grad_norm": 0.38264453411102295,
"learning_rate": 2.9873638063001628e-05,
"loss": 1.9602,
"step": 125
},
{
"epoch": 1.9764705882352942,
"grad_norm": 0.5518760085105896,
"learning_rate": 2.9086609961221755e-05,
"loss": 2.3357,
"step": 126
},
{
"epoch": 1.992156862745098,
"grad_norm": 0.9333047866821289,
"learning_rate": 2.8305813044122097e-05,
"loss": 2.5903,
"step": 127
},
{
"epoch": 2.007843137254902,
"grad_norm": 1.0014146566390991,
"learning_rate": 2.7531479951641924e-05,
"loss": 2.6325,
"step": 128
},
{
"epoch": 2.023529411764706,
"grad_norm": 0.38848909735679626,
"learning_rate": 2.6763841397811573e-05,
"loss": 2.0197,
"step": 129
},
{
"epoch": 2.0392156862745097,
"grad_norm": 0.4253176748752594,
"learning_rate": 2.6003126102010695e-05,
"loss": 2.1717,
"step": 130
},
{
"epoch": 2.0549019607843135,
"grad_norm": 0.44695788621902466,
"learning_rate": 2.5249560720820932e-05,
"loss": 2.2973,
"step": 131
},
{
"epoch": 2.070588235294118,
"grad_norm": 0.4659847319126129,
"learning_rate": 2.450336978049322e-05,
"loss": 2.1405,
"step": 132
},
{
"epoch": 2.0862745098039217,
"grad_norm": 0.4852655529975891,
"learning_rate": 2.37647756100496e-05,
"loss": 2.1411,
"step": 133
},
{
"epoch": 2.1019607843137256,
"grad_norm": 0.5167310833930969,
"learning_rate": 2.3033998275040046e-05,
"loss": 2.2418,
"step": 134
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.5107938647270203,
"learning_rate": 2.2311255511973345e-05,
"loss": 2.0234,
"step": 135
},
{
"epoch": 2.1333333333333333,
"grad_norm": 0.5627231597900391,
"learning_rate": 2.1596762663442218e-05,
"loss": 2.2439,
"step": 136
},
{
"epoch": 2.149019607843137,
"grad_norm": 0.5920283794403076,
"learning_rate": 2.0890732613961478e-05,
"loss": 2.1049,
"step": 137
},
{
"epoch": 2.164705882352941,
"grad_norm": 0.6250417232513428,
"learning_rate": 2.0193375726538737e-05,
"loss": 2.0543,
"step": 138
},
{
"epoch": 2.180392156862745,
"grad_norm": 0.7671651840209961,
"learning_rate": 1.9504899779996355e-05,
"loss": 2.013,
"step": 139
},
{
"epoch": 2.196078431372549,
"grad_norm": 0.7899158000946045,
"learning_rate": 1.8825509907063327e-05,
"loss": 1.9573,
"step": 140
},
{
"epoch": 2.211764705882353,
"grad_norm": 0.935232937335968,
"learning_rate": 1.8155408533255553e-05,
"loss": 2.1216,
"step": 141
},
{
"epoch": 2.227450980392157,
"grad_norm": 1.1227110624313354,
"learning_rate": 1.749479531656279e-05,
"loss": 2.005,
"step": 142
},
{
"epoch": 2.243137254901961,
"grad_norm": 0.7702217698097229,
"learning_rate": 1.684386708796025e-05,
"loss": 2.2587,
"step": 143
},
{
"epoch": 2.2588235294117647,
"grad_norm": 0.4433023929595947,
"learning_rate": 1.6202817792762282e-05,
"loss": 2.1471,
"step": 144
},
{
"epoch": 2.2745098039215685,
"grad_norm": 0.48942095041275024,
"learning_rate": 1.557183843283614e-05,
"loss": 2.2903,
"step": 145
},
{
"epoch": 2.2901960784313724,
"grad_norm": 0.5090365409851074,
"learning_rate": 1.4951117009692528e-05,
"loss": 2.2312,
"step": 146
},
{
"epoch": 2.3058823529411763,
"grad_norm": 0.5861947536468506,
"learning_rate": 1.4340838468470197e-05,
"loss": 2.1935,
"step": 147
},
{
"epoch": 2.3215686274509806,
"grad_norm": 0.6123110055923462,
"learning_rate": 1.3741184642831189e-05,
"loss": 2.3119,
"step": 148
},
{
"epoch": 2.3372549019607844,
"grad_norm": 0.6334862112998962,
"learning_rate": 1.3152334200783167e-05,
"loss": 2.2283,
"step": 149
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.6538926362991333,
"learning_rate": 1.257446259144494e-05,
"loss": 2.1705,
"step": 150
},
{
"epoch": 2.3529411764705883,
"eval_loss": 2.53293514251709,
"eval_runtime": 8.1691,
"eval_samples_per_second": 13.221,
"eval_steps_per_second": 3.305,
"step": 150
},
{
"epoch": 2.368627450980392,
"grad_norm": 0.6534598469734192,
"learning_rate": 1.2007741992771065e-05,
"loss": 2.0549,
"step": 151
},
{
"epoch": 2.384313725490196,
"grad_norm": 0.7089661359786987,
"learning_rate": 1.145234126025102e-05,
"loss": 2.11,
"step": 152
},
{
"epoch": 2.4,
"grad_norm": 0.7214992642402649,
"learning_rate": 1.090842587659851e-05,
"loss": 2.0214,
"step": 153
},
{
"epoch": 2.4156862745098038,
"grad_norm": 0.8083807826042175,
"learning_rate": 1.0376157902445488e-05,
"loss": 1.9978,
"step": 154
},
{
"epoch": 2.431372549019608,
"grad_norm": 0.8732563853263855,
"learning_rate": 9.85569592805588e-06,
"loss": 2.0557,
"step": 155
},
{
"epoch": 2.447058823529412,
"grad_norm": 0.9465264678001404,
"learning_rate": 9.347195026073369e-06,
"loss": 1.9518,
"step": 156
},
{
"epoch": 2.462745098039216,
"grad_norm": 1.2992017269134521,
"learning_rate": 8.850806705317183e-06,
"loss": 2.0409,
"step": 157
},
{
"epoch": 2.4784313725490197,
"grad_norm": 0.8976895809173584,
"learning_rate": 8.366678865639688e-06,
"loss": 2.1384,
"step": 158
},
{
"epoch": 2.4941176470588236,
"grad_norm": 0.4451674520969391,
"learning_rate": 7.894955753859413e-06,
"loss": 1.9495,
"step": 159
},
{
"epoch": 2.5098039215686274,
"grad_norm": 0.5084771513938904,
"learning_rate": 7.435777920782444e-06,
"loss": 2.1625,
"step": 160
},
{
"epoch": 2.5254901960784313,
"grad_norm": 0.5429627895355225,
"learning_rate": 6.989282179324963e-06,
"loss": 2.1547,
"step": 161
},
{
"epoch": 2.541176470588235,
"grad_norm": 0.5458650588989258,
"learning_rate": 6.555601563749675e-06,
"loss": 2.1326,
"step": 162
},
{
"epoch": 2.556862745098039,
"grad_norm": 0.6028746366500854,
"learning_rate": 6.1348652900279025e-06,
"loss": 2.2729,
"step": 163
},
{
"epoch": 2.572549019607843,
"grad_norm": 0.6017650365829468,
"learning_rate": 5.727198717339511e-06,
"loss": 2.1029,
"step": 164
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.6778936982154846,
"learning_rate": 5.332723310721854e-06,
"loss": 2.155,
"step": 165
},
{
"epoch": 2.603921568627451,
"grad_norm": 0.716666042804718,
"learning_rate": 4.951556604879048e-06,
"loss": 2.1131,
"step": 166
},
{
"epoch": 2.619607843137255,
"grad_norm": 0.7246668338775635,
"learning_rate": 4.5838121691623e-06,
"loss": 2.0944,
"step": 167
},
{
"epoch": 2.635294117647059,
"grad_norm": 0.8063449263572693,
"learning_rate": 4.229599573731685e-06,
"loss": 2.1857,
"step": 168
},
{
"epoch": 2.6509803921568627,
"grad_norm": 0.8714430332183838,
"learning_rate": 3.8890243569094874e-06,
"loss": 2.2817,
"step": 169
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.9104402661323547,
"learning_rate": 3.5621879937348836e-06,
"loss": 2.0639,
"step": 170
},
{
"epoch": 2.682352941176471,
"grad_norm": 0.9760646820068359,
"learning_rate": 3.249187865729264e-06,
"loss": 1.984,
"step": 171
},
{
"epoch": 2.6980392156862747,
"grad_norm": 1.343449354171753,
"learning_rate": 2.950117231881183e-06,
"loss": 2.0159,
"step": 172
},
{
"epoch": 2.7137254901960786,
"grad_norm": 0.9933635592460632,
"learning_rate": 2.6650652008597068e-06,
"loss": 2.3773,
"step": 173
},
{
"epoch": 2.7294117647058824,
"grad_norm": 0.46657246351242065,
"learning_rate": 2.3941167044642944e-06,
"loss": 1.8924,
"step": 174
},
{
"epoch": 2.7450980392156863,
"grad_norm": 0.540969967842102,
"learning_rate": 2.137352472319215e-06,
"loss": 2.2537,
"step": 175
},
{
"epoch": 2.76078431372549,
"grad_norm": 0.5452009439468384,
"learning_rate": 1.8948490078199764e-06,
"loss": 2.2074,
"step": 176
},
{
"epoch": 2.776470588235294,
"grad_norm": 0.5477240681648254,
"learning_rate": 1.6666785653390249e-06,
"loss": 2.1253,
"step": 177
},
{
"epoch": 2.792156862745098,
"grad_norm": 0.6009814739227295,
"learning_rate": 1.4529091286973995e-06,
"loss": 2.2472,
"step": 178
},
{
"epoch": 2.8078431372549018,
"grad_norm": 0.6575978994369507,
"learning_rate": 1.2536043909088191e-06,
"loss": 2.2573,
"step": 179
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.6617922782897949,
"learning_rate": 1.0688237352022345e-06,
"loss": 2.2366,
"step": 180
},
{
"epoch": 2.83921568627451,
"grad_norm": 0.6877896785736084,
"learning_rate": 8.986222173284875e-07,
"loss": 2.1161,
"step": 181
},
{
"epoch": 2.854901960784314,
"grad_norm": 0.7269079089164734,
"learning_rate": 7.4305054915631e-07,
"loss": 2.1263,
"step": 182
},
{
"epoch": 2.8705882352941177,
"grad_norm": 0.7962198257446289,
"learning_rate": 6.021550835626777e-07,
"loss": 2.1707,
"step": 183
},
{
"epoch": 2.8862745098039215,
"grad_norm": 0.856226921081543,
"learning_rate": 4.7597780062184073e-07,
"loss": 2.1721,
"step": 184
},
{
"epoch": 2.9019607843137254,
"grad_norm": 0.9060380458831787,
"learning_rate": 3.6455629509730136e-07,
"loss": 1.9191,
"step": 185
},
{
"epoch": 2.9176470588235293,
"grad_norm": 0.9597678184509277,
"learning_rate": 2.6792376524036877e-07,
"loss": 1.8294,
"step": 186
},
{
"epoch": 2.9333333333333336,
"grad_norm": 1.1927050352096558,
"learning_rate": 1.8610900289867673e-07,
"loss": 2.0034,
"step": 187
},
{
"epoch": 2.9490196078431374,
"grad_norm": 0.9366448521614075,
"learning_rate": 1.191363849376237e-07,
"loss": 2.2449,
"step": 188
},
{
"epoch": 2.9647058823529413,
"grad_norm": 0.531377375125885,
"learning_rate": 6.702586597719385e-08,
"loss": 1.838,
"step": 189
},
{
"epoch": 2.980392156862745,
"grad_norm": 0.7152462005615234,
"learning_rate": 2.9792972446479605e-08,
"loss": 2.2187,
"step": 190
},
{
"epoch": 2.996078431372549,
"grad_norm": 1.2911417484283447,
"learning_rate": 7.448797957526621e-09,
"loss": 2.4433,
"step": 191
},
{
"epoch": 3.011764705882353,
"grad_norm": 0.6130143404006958,
"learning_rate": 0.0,
"loss": 2.2335,
"step": 192
}
],
"logging_steps": 1,
"max_steps": 192,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.62298517110784e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}