ste_s1_sl16k_bs32_lr1e-5_ckpt140 / trainer_state.json
fengyao1909's picture
Upload folder using huggingface_hub
cc7b9e0 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.842105263157895,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03508771929824561,
"grad_norm": 2.0673070244813063,
"learning_rate": 0.0,
"loss": 0.866,
"step": 1
},
{
"epoch": 0.07017543859649122,
"grad_norm": 2.290645149886701,
"learning_rate": 1.4285714285714286e-06,
"loss": 1.043,
"step": 2
},
{
"epoch": 0.10526315789473684,
"grad_norm": 2.0384407294665747,
"learning_rate": 2.8571428571428573e-06,
"loss": 0.9475,
"step": 3
},
{
"epoch": 0.14035087719298245,
"grad_norm": 2.0751386251214314,
"learning_rate": 4.2857142857142855e-06,
"loss": 1.0015,
"step": 4
},
{
"epoch": 0.17543859649122806,
"grad_norm": 1.8611754309476487,
"learning_rate": 5.7142857142857145e-06,
"loss": 1.0106,
"step": 5
},
{
"epoch": 0.21052631578947367,
"grad_norm": 1.5605070320275363,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.9839,
"step": 6
},
{
"epoch": 0.24561403508771928,
"grad_norm": 1.323817282105345,
"learning_rate": 8.571428571428571e-06,
"loss": 0.9658,
"step": 7
},
{
"epoch": 0.2807017543859649,
"grad_norm": 1.1766396808562414,
"learning_rate": 1e-05,
"loss": 0.9877,
"step": 8
},
{
"epoch": 0.3157894736842105,
"grad_norm": 1.920335999253467,
"learning_rate": 9.998605186060138e-06,
"loss": 1.0056,
"step": 9
},
{
"epoch": 0.3508771929824561,
"grad_norm": 1.8982677371348105,
"learning_rate": 9.99442152244292e-06,
"loss": 0.9781,
"step": 10
},
{
"epoch": 0.38596491228070173,
"grad_norm": 1.7435846632830103,
"learning_rate": 9.98745134332128e-06,
"loss": 0.8702,
"step": 11
},
{
"epoch": 0.42105263157894735,
"grad_norm": 1.8684092704303354,
"learning_rate": 9.97769853753642e-06,
"loss": 0.8934,
"step": 12
},
{
"epoch": 0.45614035087719296,
"grad_norm": 1.761909365090902,
"learning_rate": 9.965168546428122e-06,
"loss": 0.9624,
"step": 13
},
{
"epoch": 0.49122807017543857,
"grad_norm": 1.3688611326166515,
"learning_rate": 9.949868360798893e-06,
"loss": 0.8695,
"step": 14
},
{
"epoch": 0.5263157894736842,
"grad_norm": 1.1087321036703561,
"learning_rate": 9.931806517013612e-06,
"loss": 0.8931,
"step": 15
},
{
"epoch": 0.5614035087719298,
"grad_norm": 0.8075436412081172,
"learning_rate": 9.910993092236878e-06,
"loss": 0.8353,
"step": 16
},
{
"epoch": 0.5964912280701754,
"grad_norm": 0.836233225723369,
"learning_rate": 9.887439698810694e-06,
"loss": 0.9445,
"step": 17
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.715862149797478,
"learning_rate": 9.861159477775653e-06,
"loss": 0.8434,
"step": 18
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.7888763860979434,
"learning_rate": 9.832167091539215e-06,
"loss": 0.9075,
"step": 19
},
{
"epoch": 0.7017543859649122,
"grad_norm": 0.7782919163094735,
"learning_rate": 9.800478715695165e-06,
"loss": 0.9018,
"step": 20
},
{
"epoch": 0.7368421052631579,
"grad_norm": 0.8319523753602327,
"learning_rate": 9.766112029998847e-06,
"loss": 0.869,
"step": 21
},
{
"epoch": 0.7719298245614035,
"grad_norm": 0.7124372949071385,
"learning_rate": 9.729086208503174e-06,
"loss": 0.822,
"step": 22
},
{
"epoch": 0.8070175438596491,
"grad_norm": 0.7500909047451202,
"learning_rate": 9.689421908860928e-06,
"loss": 0.8867,
"step": 23
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.5886562659462773,
"learning_rate": 9.64714126079933e-06,
"loss": 0.8565,
"step": 24
},
{
"epoch": 0.8771929824561403,
"grad_norm": 0.5469375245913981,
"learning_rate": 9.602267853773301e-06,
"loss": 0.8436,
"step": 25
},
{
"epoch": 0.9122807017543859,
"grad_norm": 0.48742789244387086,
"learning_rate": 9.554826723804304e-06,
"loss": 0.91,
"step": 26
},
{
"epoch": 0.9473684210526315,
"grad_norm": 0.481986089398419,
"learning_rate": 9.504844339512096e-06,
"loss": 0.808,
"step": 27
},
{
"epoch": 0.9824561403508771,
"grad_norm": 0.5338814362033426,
"learning_rate": 9.452348587347224e-06,
"loss": 0.8373,
"step": 28
},
{
"epoch": 1.0,
"grad_norm": 0.5338814362033426,
"learning_rate": 9.397368756032445e-06,
"loss": 0.7497,
"step": 29
},
{
"epoch": 1.0350877192982457,
"grad_norm": 0.9216087684021367,
"learning_rate": 9.339935520221816e-06,
"loss": 0.8368,
"step": 30
},
{
"epoch": 1.0701754385964912,
"grad_norm": 0.5517402285558708,
"learning_rate": 9.280080923386501e-06,
"loss": 0.803,
"step": 31
},
{
"epoch": 1.1052631578947367,
"grad_norm": 0.4780695144971756,
"learning_rate": 9.217838359936914e-06,
"loss": 0.8483,
"step": 32
},
{
"epoch": 1.1403508771929824,
"grad_norm": 0.4352106079609395,
"learning_rate": 9.153242556591115e-06,
"loss": 0.8255,
"step": 33
},
{
"epoch": 1.1754385964912282,
"grad_norm": 0.41608709718428166,
"learning_rate": 9.08632955299989e-06,
"loss": 0.7893,
"step": 34
},
{
"epoch": 1.2105263157894737,
"grad_norm": 0.39539913867385906,
"learning_rate": 9.017136681639307e-06,
"loss": 0.8458,
"step": 35
},
{
"epoch": 1.2456140350877192,
"grad_norm": 0.4147873782216342,
"learning_rate": 8.94570254698197e-06,
"loss": 0.8336,
"step": 36
},
{
"epoch": 1.280701754385965,
"grad_norm": 0.4859811666166825,
"learning_rate": 8.872067003958597e-06,
"loss": 0.8216,
"step": 37
},
{
"epoch": 1.3157894736842106,
"grad_norm": 0.3825847458202985,
"learning_rate": 8.796271135721944e-06,
"loss": 0.7914,
"step": 38
},
{
"epoch": 1.3508771929824561,
"grad_norm": 0.38418403992840294,
"learning_rate": 8.71835723072545e-06,
"loss": 0.7468,
"step": 39
},
{
"epoch": 1.3859649122807016,
"grad_norm": 0.37144282673439744,
"learning_rate": 8.638368759129433e-06,
"loss": 0.8526,
"step": 40
},
{
"epoch": 1.4210526315789473,
"grad_norm": 0.4263001257500465,
"learning_rate": 8.556350348547978e-06,
"loss": 0.7826,
"step": 41
},
{
"epoch": 1.456140350877193,
"grad_norm": 0.3725947130107881,
"learning_rate": 8.472347759150044e-06,
"loss": 0.7509,
"step": 42
},
{
"epoch": 1.4912280701754386,
"grad_norm": 0.35479611609977757,
"learning_rate": 8.386407858128707e-06,
"loss": 0.7388,
"step": 43
},
{
"epoch": 1.526315789473684,
"grad_norm": 0.3523727317535909,
"learning_rate": 8.298578593552737e-06,
"loss": 0.8733,
"step": 44
},
{
"epoch": 1.5614035087719298,
"grad_norm": 0.35090923069349866,
"learning_rate": 8.208908967615159e-06,
"loss": 0.8275,
"step": 45
},
{
"epoch": 1.5964912280701755,
"grad_norm": 0.3363022296982819,
"learning_rate": 8.117449009293668e-06,
"loss": 0.7592,
"step": 46
},
{
"epoch": 1.631578947368421,
"grad_norm": 0.33371096186357996,
"learning_rate": 8.024249746438189e-06,
"loss": 0.7492,
"step": 47
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.3503795383305235,
"learning_rate": 7.929363177301124e-06,
"loss": 0.8032,
"step": 48
},
{
"epoch": 1.7017543859649122,
"grad_norm": 0.31772810586348393,
"learning_rate": 7.832842241526212e-06,
"loss": 0.8147,
"step": 49
},
{
"epoch": 1.736842105263158,
"grad_norm": 0.34558444572109065,
"learning_rate": 7.734740790612137e-06,
"loss": 0.8039,
"step": 50
},
{
"epoch": 1.7719298245614035,
"grad_norm": 0.3683078602862912,
"learning_rate": 7.635113557867395e-06,
"loss": 0.821,
"step": 51
},
{
"epoch": 1.807017543859649,
"grad_norm": 0.3779532923279179,
"learning_rate": 7.5340161278732e-06,
"loss": 0.8398,
"step": 52
},
{
"epoch": 1.8421052631578947,
"grad_norm": 0.34514628086251675,
"learning_rate": 7.431504905471407e-06,
"loss": 0.6881,
"step": 53
},
{
"epoch": 1.8771929824561404,
"grad_norm": 0.3766820846229699,
"learning_rate": 7.327637084294818e-06,
"loss": 0.8313,
"step": 54
},
{
"epoch": 1.912280701754386,
"grad_norm": 0.3715998928933267,
"learning_rate": 7.22247061485738e-06,
"loss": 0.7854,
"step": 55
},
{
"epoch": 1.9473684210526314,
"grad_norm": 0.3507736964447335,
"learning_rate": 7.1160641722221255e-06,
"loss": 0.8326,
"step": 56
},
{
"epoch": 1.9824561403508771,
"grad_norm": 0.34806693367183694,
"learning_rate": 7.008477123264849e-06,
"loss": 0.8366,
"step": 57
},
{
"epoch": 2.0,
"grad_norm": 0.5059705893154623,
"learning_rate": 6.8997694935518e-06,
"loss": 0.8023,
"step": 58
},
{
"epoch": 2.0350877192982457,
"grad_norm": 0.36732538969218886,
"learning_rate": 6.7900019338499005e-06,
"loss": 0.7269,
"step": 59
},
{
"epoch": 2.0701754385964914,
"grad_norm": 0.38816138679916923,
"learning_rate": 6.6792356862881144e-06,
"loss": 0.8553,
"step": 60
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.2980417664812599,
"learning_rate": 6.567532550188908e-06,
"loss": 0.7446,
"step": 61
},
{
"epoch": 2.1403508771929824,
"grad_norm": 0.33750149488854814,
"learning_rate": 6.454954847588824e-06,
"loss": 0.7341,
"step": 62
},
{
"epoch": 2.175438596491228,
"grad_norm": 0.3235359591643424,
"learning_rate": 6.341565388467425e-06,
"loss": 0.7762,
"step": 63
},
{
"epoch": 2.2105263157894735,
"grad_norm": 0.3188121927886282,
"learning_rate": 6.227427435703997e-06,
"loss": 0.7962,
"step": 64
},
{
"epoch": 2.245614035087719,
"grad_norm": 0.3250154832460792,
"learning_rate": 6.112604669781572e-06,
"loss": 0.691,
"step": 65
},
{
"epoch": 2.280701754385965,
"grad_norm": 0.34832993543964275,
"learning_rate": 5.997161153257963e-06,
"loss": 0.7472,
"step": 66
},
{
"epoch": 2.3157894736842106,
"grad_norm": 0.3346266356546261,
"learning_rate": 5.88116129502361e-06,
"loss": 0.7332,
"step": 67
},
{
"epoch": 2.3508771929824563,
"grad_norm": 0.3380898664917708,
"learning_rate": 5.764669814366231e-06,
"loss": 0.7963,
"step": 68
},
{
"epoch": 2.3859649122807016,
"grad_norm": 0.2938364946133426,
"learning_rate": 5.647751704862263e-06,
"loss": 0.7254,
"step": 69
},
{
"epoch": 2.4210526315789473,
"grad_norm": 0.33097036890842274,
"learning_rate": 5.530472198115291e-06,
"loss": 0.8319,
"step": 70
},
{
"epoch": 2.456140350877193,
"grad_norm": 0.3203526720947049,
"learning_rate": 5.412896727361663e-06,
"loss": 0.759,
"step": 71
},
{
"epoch": 2.4912280701754383,
"grad_norm": 0.30349016859987393,
"learning_rate": 5.2950908909636144e-06,
"loss": 0.7713,
"step": 72
},
{
"epoch": 2.526315789473684,
"grad_norm": 0.3129174721997617,
"learning_rate": 5.177120415810271e-06,
"loss": 0.701,
"step": 73
},
{
"epoch": 2.56140350877193,
"grad_norm": 0.311973264845113,
"learning_rate": 5.059051120646924e-06,
"loss": 0.7475,
"step": 74
},
{
"epoch": 2.5964912280701755,
"grad_norm": 0.3229947499800019,
"learning_rate": 4.940948879353078e-06,
"loss": 0.7584,
"step": 75
},
{
"epoch": 2.6315789473684212,
"grad_norm": 0.5066103233489299,
"learning_rate": 4.822879584189732e-06,
"loss": 0.7755,
"step": 76
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.31244071017364927,
"learning_rate": 4.704909109036387e-06,
"loss": 0.7986,
"step": 77
},
{
"epoch": 2.7017543859649122,
"grad_norm": 0.32284665228956844,
"learning_rate": 4.587103272638339e-06,
"loss": 0.7757,
"step": 78
},
{
"epoch": 2.736842105263158,
"grad_norm": 0.3186010169256378,
"learning_rate": 4.46952780188471e-06,
"loss": 0.7521,
"step": 79
},
{
"epoch": 2.7719298245614032,
"grad_norm": 0.28846269969958793,
"learning_rate": 4.352248295137739e-06,
"loss": 0.7074,
"step": 80
},
{
"epoch": 2.807017543859649,
"grad_norm": 0.3048547667354007,
"learning_rate": 4.23533018563377e-06,
"loss": 0.6802,
"step": 81
},
{
"epoch": 2.8421052631578947,
"grad_norm": 0.3386181101278266,
"learning_rate": 4.118838704976392e-06,
"loss": 0.7647,
"step": 82
},
{
"epoch": 2.8771929824561404,
"grad_norm": 0.3271528578378757,
"learning_rate": 4.002838846742039e-06,
"loss": 0.7617,
"step": 83
},
{
"epoch": 2.912280701754386,
"grad_norm": 0.32075888680959214,
"learning_rate": 3.887395330218429e-06,
"loss": 0.7706,
"step": 84
},
{
"epoch": 2.9473684210526314,
"grad_norm": 0.29833302106635273,
"learning_rate": 3.7725725642960047e-06,
"loss": 0.7099,
"step": 85
},
{
"epoch": 2.982456140350877,
"grad_norm": 0.3062957423483349,
"learning_rate": 3.658434611532578e-06,
"loss": 0.7638,
"step": 86
},
{
"epoch": 3.0,
"grad_norm": 0.3062957423483349,
"learning_rate": 3.545045152411178e-06,
"loss": 0.6088,
"step": 87
},
{
"epoch": 3.0350877192982457,
"grad_norm": 0.5482814905537367,
"learning_rate": 3.4324674498110956e-06,
"loss": 0.7223,
"step": 88
},
{
"epoch": 3.0701754385964914,
"grad_norm": 0.41862251591120203,
"learning_rate": 3.3207643137118872e-06,
"loss": 0.7913,
"step": 89
},
{
"epoch": 3.1052631578947367,
"grad_norm": 0.3244934215825421,
"learning_rate": 3.2099980661501016e-06,
"loss": 0.7557,
"step": 90
},
{
"epoch": 3.1403508771929824,
"grad_norm": 0.3969196085560306,
"learning_rate": 3.1002305064482006e-06,
"loss": 0.7263,
"step": 91
},
{
"epoch": 3.175438596491228,
"grad_norm": 0.43992957203194183,
"learning_rate": 2.991522876735154e-06,
"loss": 0.7216,
"step": 92
},
{
"epoch": 3.2105263157894735,
"grad_norm": 0.41651928918050923,
"learning_rate": 2.8839358277778758e-06,
"loss": 0.8204,
"step": 93
},
{
"epoch": 3.245614035087719,
"grad_norm": 0.2986644588563747,
"learning_rate": 2.7775293851426233e-06,
"loss": 0.6971,
"step": 94
},
{
"epoch": 3.280701754385965,
"grad_norm": 0.2802522062222716,
"learning_rate": 2.6723629157051844e-06,
"loss": 0.6917,
"step": 95
},
{
"epoch": 3.3157894736842106,
"grad_norm": 0.40318713749421614,
"learning_rate": 2.5684950945285937e-06,
"loss": 0.7642,
"step": 96
},
{
"epoch": 3.3508771929824563,
"grad_norm": 0.3425889553518609,
"learning_rate": 2.4659838721268005e-06,
"loss": 0.7384,
"step": 97
},
{
"epoch": 3.3859649122807016,
"grad_norm": 0.30677645652568974,
"learning_rate": 2.364886442132606e-06,
"loss": 0.6411,
"step": 98
},
{
"epoch": 3.4210526315789473,
"grad_norm": 0.30070095711232125,
"learning_rate": 2.265259209387867e-06,
"loss": 0.6619,
"step": 99
},
{
"epoch": 3.456140350877193,
"grad_norm": 0.3218588836728724,
"learning_rate": 2.16715775847379e-06,
"loss": 0.6622,
"step": 100
},
{
"epoch": 3.4912280701754383,
"grad_norm": 0.2879660334650254,
"learning_rate": 2.0706368226988772e-06,
"loss": 0.6959,
"step": 101
},
{
"epoch": 3.526315789473684,
"grad_norm": 0.34066475296129334,
"learning_rate": 1.9757502535618137e-06,
"loss": 0.6848,
"step": 102
},
{
"epoch": 3.56140350877193,
"grad_norm": 0.2796823907822852,
"learning_rate": 1.8825509907063328e-06,
"loss": 0.6986,
"step": 103
},
{
"epoch": 3.5964912280701755,
"grad_norm": 0.34218464755974587,
"learning_rate": 1.7910910323848435e-06,
"loss": 0.6903,
"step": 104
},
{
"epoch": 3.6315789473684212,
"grad_norm": 0.3201128199990079,
"learning_rate": 1.7014214064472646e-06,
"loss": 0.614,
"step": 105
},
{
"epoch": 3.6666666666666665,
"grad_norm": 0.30125374157139934,
"learning_rate": 1.6135921418712959e-06,
"loss": 0.6915,
"step": 106
},
{
"epoch": 3.7017543859649122,
"grad_norm": 0.304680180290739,
"learning_rate": 1.5276522408499567e-06,
"loss": 0.6943,
"step": 107
},
{
"epoch": 3.736842105263158,
"grad_norm": 0.3621378255456493,
"learning_rate": 1.4436496514520253e-06,
"loss": 0.7568,
"step": 108
},
{
"epoch": 3.7719298245614032,
"grad_norm": 0.30215239608253547,
"learning_rate": 1.361631240870569e-06,
"loss": 0.7791,
"step": 109
},
{
"epoch": 3.807017543859649,
"grad_norm": 0.2989892612179986,
"learning_rate": 1.281642769274552e-06,
"loss": 0.7399,
"step": 110
},
{
"epoch": 3.8421052631578947,
"grad_norm": 0.31314273131226966,
"learning_rate": 1.2037288642780575e-06,
"loss": 0.7435,
"step": 111
},
{
"epoch": 3.8771929824561404,
"grad_norm": 0.3142500922087154,
"learning_rate": 1.1279329960414047e-06,
"loss": 0.6847,
"step": 112
},
{
"epoch": 3.912280701754386,
"grad_norm": 0.29926703136108684,
"learning_rate": 1.0542974530180327e-06,
"loss": 0.6994,
"step": 113
},
{
"epoch": 3.9473684210526314,
"grad_norm": 0.27501540645654055,
"learning_rate": 9.82863318360695e-07,
"loss": 0.7181,
"step": 114
},
{
"epoch": 3.982456140350877,
"grad_norm": 0.2895489612529346,
"learning_rate": 9.136704470001101e-07,
"loss": 0.7339,
"step": 115
},
{
"epoch": 4.0,
"grad_norm": 0.49026129635730115,
"learning_rate": 8.46757443408886e-07,
"loss": 0.6741,
"step": 116
},
{
"epoch": 4.035087719298246,
"grad_norm": 0.3178598574719659,
"learning_rate": 7.821616400630866e-07,
"loss": 0.6855,
"step": 117
},
{
"epoch": 4.0701754385964914,
"grad_norm": 0.363536923752346,
"learning_rate": 7.199190766135001e-07,
"loss": 0.6937,
"step": 118
},
{
"epoch": 4.105263157894737,
"grad_norm": 0.3170276756005372,
"learning_rate": 6.600644797781847e-07,
"loss": 0.7208,
"step": 119
},
{
"epoch": 4.140350877192983,
"grad_norm": 0.31066860165653054,
"learning_rate": 6.026312439675553e-07,
"loss": 0.6932,
"step": 120
},
{
"epoch": 4.175438596491228,
"grad_norm": 0.31518476145134705,
"learning_rate": 5.476514126527771e-07,
"loss": 0.6702,
"step": 121
},
{
"epoch": 4.2105263157894735,
"grad_norm": 0.2992971948787984,
"learning_rate": 4.951556604879049e-07,
"loss": 0.6438,
"step": 122
},
{
"epoch": 4.245614035087719,
"grad_norm": 0.4156443389342463,
"learning_rate": 4.4517327619569784e-07,
"loss": 0.6876,
"step": 123
},
{
"epoch": 4.280701754385965,
"grad_norm": 0.3213893768577189,
"learning_rate": 3.9773214622669974e-07,
"loss": 0.7109,
"step": 124
},
{
"epoch": 4.315789473684211,
"grad_norm": 0.29988541816282643,
"learning_rate": 3.528587392006716e-07,
"loss": 0.7561,
"step": 125
},
{
"epoch": 4.350877192982456,
"grad_norm": 0.3129487444676496,
"learning_rate": 3.105780911390738e-07,
"loss": 0.7051,
"step": 126
},
{
"epoch": 4.385964912280702,
"grad_norm": 0.3592055992496657,
"learning_rate": 2.7091379149682683e-07,
"loss": 0.7052,
"step": 127
},
{
"epoch": 4.421052631578947,
"grad_norm": 0.2912336365524275,
"learning_rate": 2.3388797000115427e-07,
"loss": 0.7159,
"step": 128
},
{
"epoch": 4.456140350877193,
"grad_norm": 0.28213898200794857,
"learning_rate": 1.9952128430483718e-07,
"loss": 0.7201,
"step": 129
},
{
"epoch": 4.491228070175438,
"grad_norm": 0.287153447612373,
"learning_rate": 1.6783290846078714e-07,
"loss": 0.772,
"step": 130
},
{
"epoch": 4.526315789473684,
"grad_norm": 0.2975453038642912,
"learning_rate": 1.388405222243472e-07,
"loss": 0.6801,
"step": 131
},
{
"epoch": 4.56140350877193,
"grad_norm": 0.30216919034385753,
"learning_rate": 1.1256030118930727e-07,
"loss": 0.7095,
"step": 132
},
{
"epoch": 4.5964912280701755,
"grad_norm": 0.33725665688405765,
"learning_rate": 8.900690776312282e-08,
"loss": 0.6186,
"step": 133
},
{
"epoch": 4.631578947368421,
"grad_norm": 0.2885141464272949,
"learning_rate": 6.819348298638839e-08,
"loss": 0.618,
"step": 134
},
{
"epoch": 4.666666666666667,
"grad_norm": 0.3151545606250234,
"learning_rate": 5.013163920110864e-08,
"loss": 0.6912,
"step": 135
},
{
"epoch": 4.701754385964913,
"grad_norm": 0.3210608790204161,
"learning_rate": 3.483145357187967e-08,
"loss": 0.6663,
"step": 136
},
{
"epoch": 4.7368421052631575,
"grad_norm": 0.29638656127915125,
"learning_rate": 2.230146246358256e-08,
"loss": 0.7137,
"step": 137
},
{
"epoch": 4.771929824561403,
"grad_norm": 0.2842147099084156,
"learning_rate": 1.2548656678721404e-08,
"loss": 0.7475,
"step": 138
},
{
"epoch": 4.807017543859649,
"grad_norm": 0.33549357153059817,
"learning_rate": 5.578477557081074e-09,
"loss": 0.6913,
"step": 139
},
{
"epoch": 4.842105263157895,
"grad_norm": 0.28359612174705795,
"learning_rate": 1.3948139398628492e-09,
"loss": 0.6749,
"step": 140
}
],
"logging_steps": 1,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 63037458677760.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}