Stewart Slocum
Add fine-tuned model
f229b57
raw
history blame
38 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 0,
"global_step": 216,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004629629629629629,
"grad_norm": 1.2840936183929443,
"learning_rate": 1e-05,
"loss": 2.592,
"step": 1
},
{
"epoch": 0.009259259259259259,
"grad_norm": 1.2497975826263428,
"learning_rate": 9.953703703703704e-06,
"loss": 2.5955,
"step": 2
},
{
"epoch": 0.013888888888888888,
"grad_norm": 1.1559158563613892,
"learning_rate": 9.907407407407408e-06,
"loss": 2.5476,
"step": 3
},
{
"epoch": 0.018518518518518517,
"grad_norm": 1.2224756479263306,
"learning_rate": 9.861111111111112e-06,
"loss": 2.5497,
"step": 4
},
{
"epoch": 0.023148148148148147,
"grad_norm": 1.1082508563995361,
"learning_rate": 9.814814814814815e-06,
"loss": 2.5263,
"step": 5
},
{
"epoch": 0.027777777777777776,
"grad_norm": 1.0577224493026733,
"learning_rate": 9.768518518518519e-06,
"loss": 2.4677,
"step": 6
},
{
"epoch": 0.032407407407407406,
"grad_norm": 1.0209864377975464,
"learning_rate": 9.722222222222223e-06,
"loss": 2.4699,
"step": 7
},
{
"epoch": 0.037037037037037035,
"grad_norm": 1.0040022134780884,
"learning_rate": 9.675925925925926e-06,
"loss": 2.4408,
"step": 8
},
{
"epoch": 0.041666666666666664,
"grad_norm": 1.003764271736145,
"learning_rate": 9.62962962962963e-06,
"loss": 2.4429,
"step": 9
},
{
"epoch": 0.046296296296296294,
"grad_norm": 0.9816301465034485,
"learning_rate": 9.583333333333335e-06,
"loss": 2.4405,
"step": 10
},
{
"epoch": 0.05092592592592592,
"grad_norm": 0.9731383323669434,
"learning_rate": 9.537037037037037e-06,
"loss": 2.3876,
"step": 11
},
{
"epoch": 0.05555555555555555,
"grad_norm": 0.9626306891441345,
"learning_rate": 9.490740740740741e-06,
"loss": 2.458,
"step": 12
},
{
"epoch": 0.06018518518518518,
"grad_norm": 0.8909545540809631,
"learning_rate": 9.444444444444445e-06,
"loss": 2.3516,
"step": 13
},
{
"epoch": 0.06481481481481481,
"grad_norm": 0.863825261592865,
"learning_rate": 9.398148148148148e-06,
"loss": 2.3272,
"step": 14
},
{
"epoch": 0.06944444444444445,
"grad_norm": 0.8534576892852783,
"learning_rate": 9.351851851851854e-06,
"loss": 2.3088,
"step": 15
},
{
"epoch": 0.07407407407407407,
"grad_norm": 0.8410207033157349,
"learning_rate": 9.305555555555557e-06,
"loss": 2.3867,
"step": 16
},
{
"epoch": 0.0787037037037037,
"grad_norm": 0.8216672539710999,
"learning_rate": 9.25925925925926e-06,
"loss": 2.2946,
"step": 17
},
{
"epoch": 0.08333333333333333,
"grad_norm": 0.7917430400848389,
"learning_rate": 9.212962962962963e-06,
"loss": 2.3206,
"step": 18
},
{
"epoch": 0.08796296296296297,
"grad_norm": 0.7604931592941284,
"learning_rate": 9.166666666666666e-06,
"loss": 2.2694,
"step": 19
},
{
"epoch": 0.09259259259259259,
"grad_norm": 0.7862806916236877,
"learning_rate": 9.120370370370372e-06,
"loss": 2.3059,
"step": 20
},
{
"epoch": 0.09722222222222222,
"grad_norm": 0.7812303304672241,
"learning_rate": 9.074074074074075e-06,
"loss": 2.2944,
"step": 21
},
{
"epoch": 0.10185185185185185,
"grad_norm": 0.6887988448143005,
"learning_rate": 9.027777777777779e-06,
"loss": 2.1687,
"step": 22
},
{
"epoch": 0.10648148148148148,
"grad_norm": 0.7094734907150269,
"learning_rate": 8.981481481481483e-06,
"loss": 2.25,
"step": 23
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.735929548740387,
"learning_rate": 8.935185185185186e-06,
"loss": 2.2761,
"step": 24
},
{
"epoch": 0.11574074074074074,
"grad_norm": 0.7055741548538208,
"learning_rate": 8.888888888888888e-06,
"loss": 2.22,
"step": 25
},
{
"epoch": 0.12037037037037036,
"grad_norm": 0.6612609028816223,
"learning_rate": 8.842592592592594e-06,
"loss": 2.1327,
"step": 26
},
{
"epoch": 0.125,
"grad_norm": 0.6664066910743713,
"learning_rate": 8.796296296296297e-06,
"loss": 2.2184,
"step": 27
},
{
"epoch": 0.12962962962962962,
"grad_norm": 0.6584565043449402,
"learning_rate": 8.750000000000001e-06,
"loss": 2.2032,
"step": 28
},
{
"epoch": 0.13425925925925927,
"grad_norm": 0.653407096862793,
"learning_rate": 8.703703703703705e-06,
"loss": 2.1957,
"step": 29
},
{
"epoch": 0.1388888888888889,
"grad_norm": 0.6251302361488342,
"learning_rate": 8.657407407407408e-06,
"loss": 2.1389,
"step": 30
},
{
"epoch": 0.14351851851851852,
"grad_norm": 0.6295129656791687,
"learning_rate": 8.611111111111112e-06,
"loss": 2.1333,
"step": 31
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.6065976619720459,
"learning_rate": 8.564814814814816e-06,
"loss": 2.1016,
"step": 32
},
{
"epoch": 0.1527777777777778,
"grad_norm": 0.5833759307861328,
"learning_rate": 8.518518518518519e-06,
"loss": 2.1216,
"step": 33
},
{
"epoch": 0.1574074074074074,
"grad_norm": 0.580999493598938,
"learning_rate": 8.472222222222223e-06,
"loss": 2.1216,
"step": 34
},
{
"epoch": 0.16203703703703703,
"grad_norm": 0.5744687914848328,
"learning_rate": 8.425925925925926e-06,
"loss": 2.0704,
"step": 35
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.5671817660331726,
"learning_rate": 8.37962962962963e-06,
"loss": 2.1015,
"step": 36
},
{
"epoch": 0.1712962962962963,
"grad_norm": 0.5558776259422302,
"learning_rate": 8.333333333333334e-06,
"loss": 2.107,
"step": 37
},
{
"epoch": 0.17592592592592593,
"grad_norm": 0.5685739517211914,
"learning_rate": 8.287037037037037e-06,
"loss": 2.1195,
"step": 38
},
{
"epoch": 0.18055555555555555,
"grad_norm": 0.5277007818222046,
"learning_rate": 8.240740740740741e-06,
"loss": 2.0058,
"step": 39
},
{
"epoch": 0.18518518518518517,
"grad_norm": 0.5531312227249146,
"learning_rate": 8.194444444444445e-06,
"loss": 2.0461,
"step": 40
},
{
"epoch": 0.18981481481481483,
"grad_norm": 0.5435283780097961,
"learning_rate": 8.148148148148148e-06,
"loss": 2.0629,
"step": 41
},
{
"epoch": 0.19444444444444445,
"grad_norm": 0.5239248275756836,
"learning_rate": 8.101851851851854e-06,
"loss": 2.0484,
"step": 42
},
{
"epoch": 0.19907407407407407,
"grad_norm": 0.5030505061149597,
"learning_rate": 8.055555555555557e-06,
"loss": 2.0164,
"step": 43
},
{
"epoch": 0.2037037037037037,
"grad_norm": 0.5091701149940491,
"learning_rate": 8.00925925925926e-06,
"loss": 2.028,
"step": 44
},
{
"epoch": 0.20833333333333334,
"grad_norm": 0.5030836462974548,
"learning_rate": 7.962962962962963e-06,
"loss": 2.0184,
"step": 45
},
{
"epoch": 0.21296296296296297,
"grad_norm": 0.47289565205574036,
"learning_rate": 7.916666666666667e-06,
"loss": 2.009,
"step": 46
},
{
"epoch": 0.2175925925925926,
"grad_norm": 0.5161042809486389,
"learning_rate": 7.870370370370372e-06,
"loss": 2.0182,
"step": 47
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.48933666944503784,
"learning_rate": 7.824074074074076e-06,
"loss": 1.9673,
"step": 48
},
{
"epoch": 0.22685185185185186,
"grad_norm": 0.4742085933685303,
"learning_rate": 7.77777777777778e-06,
"loss": 2.0158,
"step": 49
},
{
"epoch": 0.23148148148148148,
"grad_norm": 0.47005364298820496,
"learning_rate": 7.731481481481483e-06,
"loss": 1.9924,
"step": 50
},
{
"epoch": 0.2361111111111111,
"grad_norm": 0.4893855154514313,
"learning_rate": 7.685185185185185e-06,
"loss": 1.9469,
"step": 51
},
{
"epoch": 0.24074074074074073,
"grad_norm": 0.4696884751319885,
"learning_rate": 7.638888888888888e-06,
"loss": 1.9769,
"step": 52
},
{
"epoch": 0.24537037037037038,
"grad_norm": 0.46378329396247864,
"learning_rate": 7.592592592592594e-06,
"loss": 1.9459,
"step": 53
},
{
"epoch": 0.25,
"grad_norm": 0.4593982994556427,
"learning_rate": 7.546296296296297e-06,
"loss": 1.999,
"step": 54
},
{
"epoch": 0.25462962962962965,
"grad_norm": 0.4580429494380951,
"learning_rate": 7.500000000000001e-06,
"loss": 1.9357,
"step": 55
},
{
"epoch": 0.25925925925925924,
"grad_norm": 0.46820005774497986,
"learning_rate": 7.453703703703704e-06,
"loss": 1.9138,
"step": 56
},
{
"epoch": 0.2638888888888889,
"grad_norm": 0.45933252573013306,
"learning_rate": 7.4074074074074075e-06,
"loss": 1.936,
"step": 57
},
{
"epoch": 0.26851851851851855,
"grad_norm": 0.5902265310287476,
"learning_rate": 7.361111111111112e-06,
"loss": 1.969,
"step": 58
},
{
"epoch": 0.27314814814814814,
"grad_norm": 0.46466007828712463,
"learning_rate": 7.314814814814816e-06,
"loss": 1.9416,
"step": 59
},
{
"epoch": 0.2777777777777778,
"grad_norm": 0.45180487632751465,
"learning_rate": 7.268518518518519e-06,
"loss": 1.944,
"step": 60
},
{
"epoch": 0.2824074074074074,
"grad_norm": 0.46131211519241333,
"learning_rate": 7.222222222222223e-06,
"loss": 1.9654,
"step": 61
},
{
"epoch": 0.28703703703703703,
"grad_norm": 0.45331230759620667,
"learning_rate": 7.1759259259259266e-06,
"loss": 1.9138,
"step": 62
},
{
"epoch": 0.2916666666666667,
"grad_norm": 0.43383753299713135,
"learning_rate": 7.129629629629629e-06,
"loss": 1.882,
"step": 63
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.4365609288215637,
"learning_rate": 7.083333333333335e-06,
"loss": 1.8858,
"step": 64
},
{
"epoch": 0.30092592592592593,
"grad_norm": 0.4502002000808716,
"learning_rate": 7.0370370370370375e-06,
"loss": 1.8772,
"step": 65
},
{
"epoch": 0.3055555555555556,
"grad_norm": 0.465324729681015,
"learning_rate": 6.990740740740741e-06,
"loss": 1.9398,
"step": 66
},
{
"epoch": 0.3101851851851852,
"grad_norm": 0.4249098598957062,
"learning_rate": 6.944444444444445e-06,
"loss": 1.8745,
"step": 67
},
{
"epoch": 0.3148148148148148,
"grad_norm": 0.436286062002182,
"learning_rate": 6.898148148148148e-06,
"loss": 1.9138,
"step": 68
},
{
"epoch": 0.3194444444444444,
"grad_norm": 0.43040260672569275,
"learning_rate": 6.851851851851853e-06,
"loss": 1.8652,
"step": 69
},
{
"epoch": 0.32407407407407407,
"grad_norm": 0.44443291425704956,
"learning_rate": 6.8055555555555566e-06,
"loss": 1.883,
"step": 70
},
{
"epoch": 0.3287037037037037,
"grad_norm": 0.44603466987609863,
"learning_rate": 6.75925925925926e-06,
"loss": 1.8576,
"step": 71
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.42672744393348694,
"learning_rate": 6.712962962962963e-06,
"loss": 1.8487,
"step": 72
},
{
"epoch": 0.33796296296296297,
"grad_norm": 0.42773088812828064,
"learning_rate": 6.666666666666667e-06,
"loss": 1.8638,
"step": 73
},
{
"epoch": 0.3425925925925926,
"grad_norm": 0.43208903074264526,
"learning_rate": 6.620370370370371e-06,
"loss": 1.8965,
"step": 74
},
{
"epoch": 0.3472222222222222,
"grad_norm": 0.454003244638443,
"learning_rate": 6.574074074074075e-06,
"loss": 1.9215,
"step": 75
},
{
"epoch": 0.35185185185185186,
"grad_norm": 0.43666955828666687,
"learning_rate": 6.5277777777777784e-06,
"loss": 1.8701,
"step": 76
},
{
"epoch": 0.35648148148148145,
"grad_norm": 0.4259486198425293,
"learning_rate": 6.481481481481482e-06,
"loss": 1.8115,
"step": 77
},
{
"epoch": 0.3611111111111111,
"grad_norm": 0.421562135219574,
"learning_rate": 6.435185185185186e-06,
"loss": 1.8425,
"step": 78
},
{
"epoch": 0.36574074074074076,
"grad_norm": 0.4166297912597656,
"learning_rate": 6.3888888888888885e-06,
"loss": 1.8147,
"step": 79
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.44431668519973755,
"learning_rate": 6.342592592592594e-06,
"loss": 1.8654,
"step": 80
},
{
"epoch": 0.375,
"grad_norm": 0.4198393225669861,
"learning_rate": 6.296296296296297e-06,
"loss": 1.884,
"step": 81
},
{
"epoch": 0.37962962962962965,
"grad_norm": 0.42111140489578247,
"learning_rate": 6.25e-06,
"loss": 1.8586,
"step": 82
},
{
"epoch": 0.38425925925925924,
"grad_norm": 0.4411139190196991,
"learning_rate": 6.203703703703704e-06,
"loss": 1.7706,
"step": 83
},
{
"epoch": 0.3888888888888889,
"grad_norm": 0.41565534472465515,
"learning_rate": 6.157407407407408e-06,
"loss": 1.8398,
"step": 84
},
{
"epoch": 0.39351851851851855,
"grad_norm": 0.43128618597984314,
"learning_rate": 6.111111111111112e-06,
"loss": 1.8371,
"step": 85
},
{
"epoch": 0.39814814814814814,
"grad_norm": 0.4438124895095825,
"learning_rate": 6.064814814814816e-06,
"loss": 1.868,
"step": 86
},
{
"epoch": 0.4027777777777778,
"grad_norm": 0.42422446608543396,
"learning_rate": 6.018518518518519e-06,
"loss": 1.834,
"step": 87
},
{
"epoch": 0.4074074074074074,
"grad_norm": 0.41801929473876953,
"learning_rate": 5.972222222222222e-06,
"loss": 1.8652,
"step": 88
},
{
"epoch": 0.41203703703703703,
"grad_norm": 0.42849692702293396,
"learning_rate": 5.925925925925926e-06,
"loss": 1.8603,
"step": 89
},
{
"epoch": 0.4166666666666667,
"grad_norm": 0.42593345046043396,
"learning_rate": 5.8796296296296295e-06,
"loss": 1.8334,
"step": 90
},
{
"epoch": 0.4212962962962963,
"grad_norm": 0.42619192600250244,
"learning_rate": 5.833333333333334e-06,
"loss": 1.7856,
"step": 91
},
{
"epoch": 0.42592592592592593,
"grad_norm": 0.4271165430545807,
"learning_rate": 5.787037037037038e-06,
"loss": 1.7967,
"step": 92
},
{
"epoch": 0.4305555555555556,
"grad_norm": 0.44702404737472534,
"learning_rate": 5.740740740740741e-06,
"loss": 1.8364,
"step": 93
},
{
"epoch": 0.4351851851851852,
"grad_norm": 0.4314418137073517,
"learning_rate": 5.694444444444445e-06,
"loss": 1.7939,
"step": 94
},
{
"epoch": 0.4398148148148148,
"grad_norm": 0.4346453547477722,
"learning_rate": 5.6481481481481485e-06,
"loss": 1.7767,
"step": 95
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.4198530614376068,
"learning_rate": 5.601851851851853e-06,
"loss": 1.7897,
"step": 96
},
{
"epoch": 0.44907407407407407,
"grad_norm": 0.449927419424057,
"learning_rate": 5.555555555555557e-06,
"loss": 1.798,
"step": 97
},
{
"epoch": 0.4537037037037037,
"grad_norm": 0.43222421407699585,
"learning_rate": 5.5092592592592595e-06,
"loss": 1.7874,
"step": 98
},
{
"epoch": 0.4583333333333333,
"grad_norm": 0.43431252241134644,
"learning_rate": 5.462962962962963e-06,
"loss": 1.78,
"step": 99
},
{
"epoch": 0.46296296296296297,
"grad_norm": 0.45050230622291565,
"learning_rate": 5.416666666666667e-06,
"loss": 1.768,
"step": 100
},
{
"epoch": 0.4675925925925926,
"grad_norm": 0.42318132519721985,
"learning_rate": 5.370370370370371e-06,
"loss": 1.7958,
"step": 101
},
{
"epoch": 0.4722222222222222,
"grad_norm": 0.4494766592979431,
"learning_rate": 5.324074074074075e-06,
"loss": 1.7956,
"step": 102
},
{
"epoch": 0.47685185185185186,
"grad_norm": 0.43541356921195984,
"learning_rate": 5.2777777777777785e-06,
"loss": 1.7959,
"step": 103
},
{
"epoch": 0.48148148148148145,
"grad_norm": 0.4341510832309723,
"learning_rate": 5.231481481481482e-06,
"loss": 1.8226,
"step": 104
},
{
"epoch": 0.4861111111111111,
"grad_norm": 0.43825647234916687,
"learning_rate": 5.185185185185185e-06,
"loss": 1.8148,
"step": 105
},
{
"epoch": 0.49074074074074076,
"grad_norm": 0.42040881514549255,
"learning_rate": 5.138888888888889e-06,
"loss": 1.7834,
"step": 106
},
{
"epoch": 0.49537037037037035,
"grad_norm": 0.4249044954776764,
"learning_rate": 5.092592592592593e-06,
"loss": 1.7924,
"step": 107
},
{
"epoch": 0.5,
"grad_norm": 0.4380439817905426,
"learning_rate": 5.046296296296297e-06,
"loss": 1.7732,
"step": 108
},
{
"epoch": 0.5046296296296297,
"grad_norm": 0.42866915464401245,
"learning_rate": 5e-06,
"loss": 1.7667,
"step": 109
},
{
"epoch": 0.5092592592592593,
"grad_norm": 0.46283015608787537,
"learning_rate": 4.953703703703704e-06,
"loss": 1.8107,
"step": 110
},
{
"epoch": 0.5138888888888888,
"grad_norm": 0.44372284412384033,
"learning_rate": 4.907407407407408e-06,
"loss": 1.7735,
"step": 111
},
{
"epoch": 0.5185185185185185,
"grad_norm": 0.4327738285064697,
"learning_rate": 4.861111111111111e-06,
"loss": 1.7344,
"step": 112
},
{
"epoch": 0.5231481481481481,
"grad_norm": 0.4242445230484009,
"learning_rate": 4.814814814814815e-06,
"loss": 1.7834,
"step": 113
},
{
"epoch": 0.5277777777777778,
"grad_norm": 0.4237624406814575,
"learning_rate": 4.768518518518519e-06,
"loss": 1.7874,
"step": 114
},
{
"epoch": 0.5324074074074074,
"grad_norm": 0.44031742215156555,
"learning_rate": 4.722222222222222e-06,
"loss": 1.754,
"step": 115
},
{
"epoch": 0.5370370370370371,
"grad_norm": 0.44006913900375366,
"learning_rate": 4.675925925925927e-06,
"loss": 1.8411,
"step": 116
},
{
"epoch": 0.5416666666666666,
"grad_norm": 0.42018765211105347,
"learning_rate": 4.62962962962963e-06,
"loss": 1.7611,
"step": 117
},
{
"epoch": 0.5462962962962963,
"grad_norm": 0.4189184308052063,
"learning_rate": 4.583333333333333e-06,
"loss": 1.7608,
"step": 118
},
{
"epoch": 0.5509259259259259,
"grad_norm": 0.4487444758415222,
"learning_rate": 4.537037037037038e-06,
"loss": 1.7308,
"step": 119
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.43615803122520447,
"learning_rate": 4.490740740740741e-06,
"loss": 1.7991,
"step": 120
},
{
"epoch": 0.5601851851851852,
"grad_norm": 0.44524088501930237,
"learning_rate": 4.444444444444444e-06,
"loss": 1.6982,
"step": 121
},
{
"epoch": 0.5648148148148148,
"grad_norm": 0.41494986414909363,
"learning_rate": 4.398148148148149e-06,
"loss": 1.7386,
"step": 122
},
{
"epoch": 0.5694444444444444,
"grad_norm": 0.42478981614112854,
"learning_rate": 4.351851851851852e-06,
"loss": 1.7154,
"step": 123
},
{
"epoch": 0.5740740740740741,
"grad_norm": 0.4177737832069397,
"learning_rate": 4.305555555555556e-06,
"loss": 1.7567,
"step": 124
},
{
"epoch": 0.5787037037037037,
"grad_norm": 0.41471073031425476,
"learning_rate": 4.2592592592592596e-06,
"loss": 1.757,
"step": 125
},
{
"epoch": 0.5833333333333334,
"grad_norm": 0.3962550461292267,
"learning_rate": 4.212962962962963e-06,
"loss": 1.7559,
"step": 126
},
{
"epoch": 0.5879629629629629,
"grad_norm": 0.4219653904438019,
"learning_rate": 4.166666666666667e-06,
"loss": 1.7109,
"step": 127
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.4559638798236847,
"learning_rate": 4.1203703703703705e-06,
"loss": 1.7746,
"step": 128
},
{
"epoch": 0.5972222222222222,
"grad_norm": 0.46450746059417725,
"learning_rate": 4.074074074074074e-06,
"loss": 1.7442,
"step": 129
},
{
"epoch": 0.6018518518518519,
"grad_norm": 0.4612331688404083,
"learning_rate": 4.027777777777779e-06,
"loss": 1.7993,
"step": 130
},
{
"epoch": 0.6064814814814815,
"grad_norm": 0.40595942735671997,
"learning_rate": 3.9814814814814814e-06,
"loss": 1.7207,
"step": 131
},
{
"epoch": 0.6111111111111112,
"grad_norm": 0.444697767496109,
"learning_rate": 3.935185185185186e-06,
"loss": 1.7626,
"step": 132
},
{
"epoch": 0.6157407407407407,
"grad_norm": 0.4202960133552551,
"learning_rate": 3.88888888888889e-06,
"loss": 1.7015,
"step": 133
},
{
"epoch": 0.6203703703703703,
"grad_norm": 0.425169974565506,
"learning_rate": 3.842592592592592e-06,
"loss": 1.7348,
"step": 134
},
{
"epoch": 0.625,
"grad_norm": 0.4239872694015503,
"learning_rate": 3.796296296296297e-06,
"loss": 1.7044,
"step": 135
},
{
"epoch": 0.6296296296296297,
"grad_norm": 0.4289877116680145,
"learning_rate": 3.7500000000000005e-06,
"loss": 1.7418,
"step": 136
},
{
"epoch": 0.6342592592592593,
"grad_norm": 0.4335390627384186,
"learning_rate": 3.7037037037037037e-06,
"loss": 1.7311,
"step": 137
},
{
"epoch": 0.6388888888888888,
"grad_norm": 0.44100385904312134,
"learning_rate": 3.657407407407408e-06,
"loss": 1.6752,
"step": 138
},
{
"epoch": 0.6435185185185185,
"grad_norm": 0.4602786600589752,
"learning_rate": 3.6111111111111115e-06,
"loss": 1.7786,
"step": 139
},
{
"epoch": 0.6481481481481481,
"grad_norm": 0.43911105394363403,
"learning_rate": 3.5648148148148147e-06,
"loss": 1.7343,
"step": 140
},
{
"epoch": 0.6527777777777778,
"grad_norm": 0.4818420708179474,
"learning_rate": 3.5185185185185187e-06,
"loss": 1.6351,
"step": 141
},
{
"epoch": 0.6574074074074074,
"grad_norm": 0.4308430552482605,
"learning_rate": 3.4722222222222224e-06,
"loss": 1.7021,
"step": 142
},
{
"epoch": 0.6620370370370371,
"grad_norm": 0.4618017077445984,
"learning_rate": 3.4259259259259265e-06,
"loss": 1.7931,
"step": 143
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.418113648891449,
"learning_rate": 3.37962962962963e-06,
"loss": 1.6651,
"step": 144
},
{
"epoch": 0.6712962962962963,
"grad_norm": 0.43931904435157776,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.7363,
"step": 145
},
{
"epoch": 0.6759259259259259,
"grad_norm": 0.42020654678344727,
"learning_rate": 3.2870370370370374e-06,
"loss": 1.7501,
"step": 146
},
{
"epoch": 0.6805555555555556,
"grad_norm": 0.41723671555519104,
"learning_rate": 3.240740740740741e-06,
"loss": 1.7354,
"step": 147
},
{
"epoch": 0.6851851851851852,
"grad_norm": 0.44364774227142334,
"learning_rate": 3.1944444444444443e-06,
"loss": 1.6479,
"step": 148
},
{
"epoch": 0.6898148148148148,
"grad_norm": 0.41947969794273376,
"learning_rate": 3.1481481481481483e-06,
"loss": 1.7081,
"step": 149
},
{
"epoch": 0.6944444444444444,
"grad_norm": 0.42689067125320435,
"learning_rate": 3.101851851851852e-06,
"loss": 1.6716,
"step": 150
},
{
"epoch": 0.6990740740740741,
"grad_norm": 0.4190499484539032,
"learning_rate": 3.055555555555556e-06,
"loss": 1.6715,
"step": 151
},
{
"epoch": 0.7037037037037037,
"grad_norm": 0.4444749355316162,
"learning_rate": 3.0092592592592597e-06,
"loss": 1.7352,
"step": 152
},
{
"epoch": 0.7083333333333334,
"grad_norm": 0.4346008002758026,
"learning_rate": 2.962962962962963e-06,
"loss": 1.6889,
"step": 153
},
{
"epoch": 0.7129629629629629,
"grad_norm": 0.4162415862083435,
"learning_rate": 2.916666666666667e-06,
"loss": 1.6959,
"step": 154
},
{
"epoch": 0.7175925925925926,
"grad_norm": 0.41898995637893677,
"learning_rate": 2.8703703703703706e-06,
"loss": 1.6694,
"step": 155
},
{
"epoch": 0.7222222222222222,
"grad_norm": 0.41438576579093933,
"learning_rate": 2.8240740740740743e-06,
"loss": 1.6653,
"step": 156
},
{
"epoch": 0.7268518518518519,
"grad_norm": 0.5035732388496399,
"learning_rate": 2.7777777777777783e-06,
"loss": 1.7819,
"step": 157
},
{
"epoch": 0.7314814814814815,
"grad_norm": 0.4908410310745239,
"learning_rate": 2.7314814814814816e-06,
"loss": 1.7884,
"step": 158
},
{
"epoch": 0.7361111111111112,
"grad_norm": 0.4252499043941498,
"learning_rate": 2.6851851851851856e-06,
"loss": 1.6733,
"step": 159
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.42570921778678894,
"learning_rate": 2.6388888888888893e-06,
"loss": 1.706,
"step": 160
},
{
"epoch": 0.7453703703703703,
"grad_norm": 0.4818575978279114,
"learning_rate": 2.5925925925925925e-06,
"loss": 1.6386,
"step": 161
},
{
"epoch": 0.75,
"grad_norm": 0.4613746106624603,
"learning_rate": 2.5462962962962966e-06,
"loss": 1.7099,
"step": 162
},
{
"epoch": 0.7546296296296297,
"grad_norm": 0.4468544125556946,
"learning_rate": 2.5e-06,
"loss": 1.6778,
"step": 163
},
{
"epoch": 0.7592592592592593,
"grad_norm": 0.43980488181114197,
"learning_rate": 2.453703703703704e-06,
"loss": 1.6743,
"step": 164
},
{
"epoch": 0.7638888888888888,
"grad_norm": 0.4213511645793915,
"learning_rate": 2.4074074074074075e-06,
"loss": 1.6769,
"step": 165
},
{
"epoch": 0.7685185185185185,
"grad_norm": 0.43999916315078735,
"learning_rate": 2.361111111111111e-06,
"loss": 1.6725,
"step": 166
},
{
"epoch": 0.7731481481481481,
"grad_norm": 0.44871917366981506,
"learning_rate": 2.314814814814815e-06,
"loss": 1.713,
"step": 167
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.4404929578304291,
"learning_rate": 2.268518518518519e-06,
"loss": 1.6882,
"step": 168
},
{
"epoch": 0.7824074074074074,
"grad_norm": 0.4368396997451782,
"learning_rate": 2.222222222222222e-06,
"loss": 1.6467,
"step": 169
},
{
"epoch": 0.7870370370370371,
"grad_norm": 0.4457816183567047,
"learning_rate": 2.175925925925926e-06,
"loss": 1.6818,
"step": 170
},
{
"epoch": 0.7916666666666666,
"grad_norm": 0.43753132224082947,
"learning_rate": 2.1296296296296298e-06,
"loss": 1.7128,
"step": 171
},
{
"epoch": 0.7962962962962963,
"grad_norm": 0.44547879695892334,
"learning_rate": 2.0833333333333334e-06,
"loss": 1.694,
"step": 172
},
{
"epoch": 0.8009259259259259,
"grad_norm": 0.41959348320961,
"learning_rate": 2.037037037037037e-06,
"loss": 1.6486,
"step": 173
},
{
"epoch": 0.8055555555555556,
"grad_norm": 0.46483269333839417,
"learning_rate": 1.9907407407407407e-06,
"loss": 1.7278,
"step": 174
},
{
"epoch": 0.8101851851851852,
"grad_norm": 0.4479513168334961,
"learning_rate": 1.944444444444445e-06,
"loss": 1.7069,
"step": 175
},
{
"epoch": 0.8148148148148148,
"grad_norm": 0.4265024960041046,
"learning_rate": 1.8981481481481484e-06,
"loss": 1.718,
"step": 176
},
{
"epoch": 0.8194444444444444,
"grad_norm": 0.4072514474391937,
"learning_rate": 1.8518518518518519e-06,
"loss": 1.6924,
"step": 177
},
{
"epoch": 0.8240740740740741,
"grad_norm": 0.43383556604385376,
"learning_rate": 1.8055555555555557e-06,
"loss": 1.6466,
"step": 178
},
{
"epoch": 0.8287037037037037,
"grad_norm": 0.41848164796829224,
"learning_rate": 1.7592592592592594e-06,
"loss": 1.7023,
"step": 179
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.45098960399627686,
"learning_rate": 1.7129629629629632e-06,
"loss": 1.7352,
"step": 180
},
{
"epoch": 0.8379629629629629,
"grad_norm": 0.4191969335079193,
"learning_rate": 1.6666666666666667e-06,
"loss": 1.6721,
"step": 181
},
{
"epoch": 0.8425925925925926,
"grad_norm": 0.4636867344379425,
"learning_rate": 1.6203703703703705e-06,
"loss": 1.6469,
"step": 182
},
{
"epoch": 0.8472222222222222,
"grad_norm": 0.42679235339164734,
"learning_rate": 1.5740740740740742e-06,
"loss": 1.7365,
"step": 183
},
{
"epoch": 0.8518518518518519,
"grad_norm": 0.416758269071579,
"learning_rate": 1.527777777777778e-06,
"loss": 1.6835,
"step": 184
},
{
"epoch": 0.8564814814814815,
"grad_norm": 0.4596608281135559,
"learning_rate": 1.4814814814814815e-06,
"loss": 1.705,
"step": 185
},
{
"epoch": 0.8611111111111112,
"grad_norm": 0.4271976947784424,
"learning_rate": 1.4351851851851853e-06,
"loss": 1.6272,
"step": 186
},
{
"epoch": 0.8657407407407407,
"grad_norm": 0.42482131719589233,
"learning_rate": 1.3888888888888892e-06,
"loss": 1.6447,
"step": 187
},
{
"epoch": 0.8703703703703703,
"grad_norm": 0.40397384762763977,
"learning_rate": 1.3425925925925928e-06,
"loss": 1.6812,
"step": 188
},
{
"epoch": 0.875,
"grad_norm": 0.6595266461372375,
"learning_rate": 1.2962962962962962e-06,
"loss": 1.7041,
"step": 189
},
{
"epoch": 0.8796296296296297,
"grad_norm": 0.49033576250076294,
"learning_rate": 1.25e-06,
"loss": 1.7392,
"step": 190
},
{
"epoch": 0.8842592592592593,
"grad_norm": 0.4406794011592865,
"learning_rate": 1.2037037037037037e-06,
"loss": 1.7136,
"step": 191
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.4479171335697174,
"learning_rate": 1.1574074074074076e-06,
"loss": 1.7089,
"step": 192
},
{
"epoch": 0.8935185185185185,
"grad_norm": 0.4189581871032715,
"learning_rate": 1.111111111111111e-06,
"loss": 1.6785,
"step": 193
},
{
"epoch": 0.8981481481481481,
"grad_norm": 0.4524936079978943,
"learning_rate": 1.0648148148148149e-06,
"loss": 1.7061,
"step": 194
},
{
"epoch": 0.9027777777777778,
"grad_norm": 0.42082199454307556,
"learning_rate": 1.0185185185185185e-06,
"loss": 1.691,
"step": 195
},
{
"epoch": 0.9074074074074074,
"grad_norm": 0.4415839910507202,
"learning_rate": 9.722222222222224e-07,
"loss": 1.7295,
"step": 196
},
{
"epoch": 0.9120370370370371,
"grad_norm": 0.41390758752822876,
"learning_rate": 9.259259259259259e-07,
"loss": 1.7035,
"step": 197
},
{
"epoch": 0.9166666666666666,
"grad_norm": 0.42637038230895996,
"learning_rate": 8.796296296296297e-07,
"loss": 1.6505,
"step": 198
},
{
"epoch": 0.9212962962962963,
"grad_norm": 0.4274970591068268,
"learning_rate": 8.333333333333333e-07,
"loss": 1.6936,
"step": 199
},
{
"epoch": 0.9259259259259259,
"grad_norm": 0.42553502321243286,
"learning_rate": 7.870370370370371e-07,
"loss": 1.6519,
"step": 200
},
{
"epoch": 0.9305555555555556,
"grad_norm": 0.44404858350753784,
"learning_rate": 7.407407407407407e-07,
"loss": 1.6605,
"step": 201
},
{
"epoch": 0.9351851851851852,
"grad_norm": 0.4228724539279938,
"learning_rate": 6.944444444444446e-07,
"loss": 1.6861,
"step": 202
},
{
"epoch": 0.9398148148148148,
"grad_norm": 0.4268285036087036,
"learning_rate": 6.481481481481481e-07,
"loss": 1.6696,
"step": 203
},
{
"epoch": 0.9444444444444444,
"grad_norm": 0.6097678542137146,
"learning_rate": 6.018518518518519e-07,
"loss": 1.6768,
"step": 204
},
{
"epoch": 0.9490740740740741,
"grad_norm": 0.434126079082489,
"learning_rate": 5.555555555555555e-07,
"loss": 1.6355,
"step": 205
},
{
"epoch": 0.9537037037037037,
"grad_norm": 0.4083269238471985,
"learning_rate": 5.092592592592593e-07,
"loss": 1.6595,
"step": 206
},
{
"epoch": 0.9583333333333334,
"grad_norm": 0.4251357316970825,
"learning_rate": 4.6296296296296297e-07,
"loss": 1.7208,
"step": 207
},
{
"epoch": 0.9629629629629629,
"grad_norm": 0.4255342185497284,
"learning_rate": 4.1666666666666667e-07,
"loss": 1.7005,
"step": 208
},
{
"epoch": 0.9675925925925926,
"grad_norm": 0.4373617172241211,
"learning_rate": 3.7037037037037036e-07,
"loss": 1.7133,
"step": 209
},
{
"epoch": 0.9722222222222222,
"grad_norm": 0.4113195836544037,
"learning_rate": 3.2407407407407406e-07,
"loss": 1.6353,
"step": 210
},
{
"epoch": 0.9768518518518519,
"grad_norm": 0.4072490930557251,
"learning_rate": 2.7777777777777776e-07,
"loss": 1.6936,
"step": 211
},
{
"epoch": 0.9814814814814815,
"grad_norm": 0.44550320506095886,
"learning_rate": 2.3148148148148148e-07,
"loss": 1.6657,
"step": 212
},
{
"epoch": 0.9861111111111112,
"grad_norm": 0.4239332675933838,
"learning_rate": 1.8518518518518518e-07,
"loss": 1.6619,
"step": 213
},
{
"epoch": 0.9907407407407407,
"grad_norm": 0.4457761347293854,
"learning_rate": 1.3888888888888888e-07,
"loss": 1.6724,
"step": 214
},
{
"epoch": 0.9953703703703703,
"grad_norm": 0.44246068596839905,
"learning_rate": 9.259259259259259e-08,
"loss": 1.6661,
"step": 215
},
{
"epoch": 1.0,
"grad_norm": 0.43305960297584534,
"learning_rate": 4.6296296296296295e-08,
"loss": 1.7103,
"step": 216
}
],
"logging_steps": 1.0,
"max_steps": 216,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0138967789797376e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}