dimasik2987's picture
Training in progress, step 200, checkpoint
8db8470 verified
{
"best_metric": 10.350366592407227,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.05534034311012728,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002767017155506364,
"grad_norm": 0.046716853976249695,
"learning_rate": 1.004e-05,
"loss": 10.3814,
"step": 1
},
{
"epoch": 0.0002767017155506364,
"eval_loss": 10.376320838928223,
"eval_runtime": 15.9184,
"eval_samples_per_second": 95.613,
"eval_steps_per_second": 23.935,
"step": 1
},
{
"epoch": 0.0005534034311012728,
"grad_norm": 0.04734182357788086,
"learning_rate": 2.008e-05,
"loss": 10.3809,
"step": 2
},
{
"epoch": 0.0008301051466519093,
"grad_norm": 0.04102853313088417,
"learning_rate": 3.012e-05,
"loss": 10.3778,
"step": 3
},
{
"epoch": 0.0011068068622025456,
"grad_norm": 0.041233424097299576,
"learning_rate": 4.016e-05,
"loss": 10.3751,
"step": 4
},
{
"epoch": 0.001383508577753182,
"grad_norm": 0.05065109208226204,
"learning_rate": 5.02e-05,
"loss": 10.3812,
"step": 5
},
{
"epoch": 0.0016602102933038186,
"grad_norm": 0.05412878468632698,
"learning_rate": 6.024e-05,
"loss": 10.3768,
"step": 6
},
{
"epoch": 0.001936912008854455,
"grad_norm": 0.04957752302289009,
"learning_rate": 7.028e-05,
"loss": 10.3776,
"step": 7
},
{
"epoch": 0.002213613724405091,
"grad_norm": 0.05057466775178909,
"learning_rate": 8.032e-05,
"loss": 10.3766,
"step": 8
},
{
"epoch": 0.0024903154399557276,
"grad_norm": 0.05437202751636505,
"learning_rate": 9.036000000000001e-05,
"loss": 10.3739,
"step": 9
},
{
"epoch": 0.002767017155506364,
"grad_norm": 0.0426546111702919,
"learning_rate": 0.0001004,
"loss": 10.3796,
"step": 10
},
{
"epoch": 0.0030437188710570003,
"grad_norm": 0.051293499767780304,
"learning_rate": 9.987157894736842e-05,
"loss": 10.3754,
"step": 11
},
{
"epoch": 0.003320420586607637,
"grad_norm": 0.05515759438276291,
"learning_rate": 9.934315789473684e-05,
"loss": 10.3752,
"step": 12
},
{
"epoch": 0.0035971223021582736,
"grad_norm": 0.04741832986474037,
"learning_rate": 9.881473684210525e-05,
"loss": 10.3754,
"step": 13
},
{
"epoch": 0.00387382401770891,
"grad_norm": 0.05781078711152077,
"learning_rate": 9.828631578947369e-05,
"loss": 10.3728,
"step": 14
},
{
"epoch": 0.004150525733259546,
"grad_norm": 0.0660712867975235,
"learning_rate": 9.77578947368421e-05,
"loss": 10.3796,
"step": 15
},
{
"epoch": 0.004427227448810182,
"grad_norm": 0.07684075087308884,
"learning_rate": 9.722947368421052e-05,
"loss": 10.3778,
"step": 16
},
{
"epoch": 0.004703929164360819,
"grad_norm": 0.0709812268614769,
"learning_rate": 9.670105263157895e-05,
"loss": 10.3737,
"step": 17
},
{
"epoch": 0.004980630879911455,
"grad_norm": 0.061015963554382324,
"learning_rate": 9.617263157894737e-05,
"loss": 10.3762,
"step": 18
},
{
"epoch": 0.005257332595462092,
"grad_norm": 0.07617278397083282,
"learning_rate": 9.564421052631579e-05,
"loss": 10.3813,
"step": 19
},
{
"epoch": 0.005534034311012728,
"grad_norm": 0.08400306850671768,
"learning_rate": 9.511578947368421e-05,
"loss": 10.3741,
"step": 20
},
{
"epoch": 0.005810736026563365,
"grad_norm": 0.0774376168847084,
"learning_rate": 9.458736842105264e-05,
"loss": 10.3732,
"step": 21
},
{
"epoch": 0.006087437742114001,
"grad_norm": 0.10185623168945312,
"learning_rate": 9.405894736842106e-05,
"loss": 10.3749,
"step": 22
},
{
"epoch": 0.0063641394576646375,
"grad_norm": 0.08428505808115005,
"learning_rate": 9.353052631578947e-05,
"loss": 10.3778,
"step": 23
},
{
"epoch": 0.006640841173215274,
"grad_norm": 0.08830801397562027,
"learning_rate": 9.300210526315789e-05,
"loss": 10.3672,
"step": 24
},
{
"epoch": 0.00691754288876591,
"grad_norm": 0.07905872166156769,
"learning_rate": 9.247368421052631e-05,
"loss": 10.3702,
"step": 25
},
{
"epoch": 0.007194244604316547,
"grad_norm": 0.08956081420183182,
"learning_rate": 9.194526315789473e-05,
"loss": 10.3715,
"step": 26
},
{
"epoch": 0.007470946319867183,
"grad_norm": 0.0873691737651825,
"learning_rate": 9.141684210526316e-05,
"loss": 10.3684,
"step": 27
},
{
"epoch": 0.00774764803541782,
"grad_norm": 0.10989569872617722,
"learning_rate": 9.088842105263158e-05,
"loss": 10.3697,
"step": 28
},
{
"epoch": 0.008024349750968456,
"grad_norm": 0.14163243770599365,
"learning_rate": 9.036000000000001e-05,
"loss": 10.3718,
"step": 29
},
{
"epoch": 0.008301051466519093,
"grad_norm": 0.12550966441631317,
"learning_rate": 8.983157894736843e-05,
"loss": 10.3708,
"step": 30
},
{
"epoch": 0.00857775318206973,
"grad_norm": 0.1360364854335785,
"learning_rate": 8.930315789473684e-05,
"loss": 10.3644,
"step": 31
},
{
"epoch": 0.008854454897620365,
"grad_norm": 0.1274397373199463,
"learning_rate": 8.877473684210526e-05,
"loss": 10.3699,
"step": 32
},
{
"epoch": 0.009131156613171001,
"grad_norm": 0.12157135456800461,
"learning_rate": 8.824631578947368e-05,
"loss": 10.3672,
"step": 33
},
{
"epoch": 0.009407858328721638,
"grad_norm": 0.19225279986858368,
"learning_rate": 8.771789473684211e-05,
"loss": 10.3583,
"step": 34
},
{
"epoch": 0.009684560044272275,
"grad_norm": 0.15720342099666595,
"learning_rate": 8.718947368421053e-05,
"loss": 10.3649,
"step": 35
},
{
"epoch": 0.00996126175982291,
"grad_norm": 0.13139154016971588,
"learning_rate": 8.666105263157895e-05,
"loss": 10.3724,
"step": 36
},
{
"epoch": 0.010237963475373547,
"grad_norm": 0.14383310079574585,
"learning_rate": 8.613263157894737e-05,
"loss": 10.3627,
"step": 37
},
{
"epoch": 0.010514665190924184,
"grad_norm": 0.14333775639533997,
"learning_rate": 8.560421052631578e-05,
"loss": 10.3654,
"step": 38
},
{
"epoch": 0.01079136690647482,
"grad_norm": 0.16654908657073975,
"learning_rate": 8.50757894736842e-05,
"loss": 10.3671,
"step": 39
},
{
"epoch": 0.011068068622025456,
"grad_norm": 0.16506536304950714,
"learning_rate": 8.454736842105263e-05,
"loss": 10.3597,
"step": 40
},
{
"epoch": 0.011344770337576093,
"grad_norm": 0.15948215126991272,
"learning_rate": 8.401894736842106e-05,
"loss": 10.3626,
"step": 41
},
{
"epoch": 0.01162147205312673,
"grad_norm": 0.17302924394607544,
"learning_rate": 8.349052631578948e-05,
"loss": 10.3545,
"step": 42
},
{
"epoch": 0.011898173768677366,
"grad_norm": 0.15607234835624695,
"learning_rate": 8.29621052631579e-05,
"loss": 10.3536,
"step": 43
},
{
"epoch": 0.012174875484228001,
"grad_norm": 0.20499636232852936,
"learning_rate": 8.243368421052632e-05,
"loss": 10.364,
"step": 44
},
{
"epoch": 0.012451577199778638,
"grad_norm": 0.1812644749879837,
"learning_rate": 8.190526315789474e-05,
"loss": 10.3575,
"step": 45
},
{
"epoch": 0.012728278915329275,
"grad_norm": 0.2293720543384552,
"learning_rate": 8.137684210526315e-05,
"loss": 10.3609,
"step": 46
},
{
"epoch": 0.013004980630879912,
"grad_norm": 0.1858595609664917,
"learning_rate": 8.084842105263157e-05,
"loss": 10.3587,
"step": 47
},
{
"epoch": 0.013281682346430549,
"grad_norm": 0.17006252706050873,
"learning_rate": 8.032e-05,
"loss": 10.356,
"step": 48
},
{
"epoch": 0.013558384061981184,
"grad_norm": 0.1877298206090927,
"learning_rate": 7.979157894736842e-05,
"loss": 10.3568,
"step": 49
},
{
"epoch": 0.01383508577753182,
"grad_norm": 0.25946682691574097,
"learning_rate": 7.926315789473684e-05,
"loss": 10.3564,
"step": 50
},
{
"epoch": 0.01383508577753182,
"eval_loss": 10.358550071716309,
"eval_runtime": 15.8823,
"eval_samples_per_second": 95.83,
"eval_steps_per_second": 23.989,
"step": 50
},
{
"epoch": 0.014111787493082457,
"grad_norm": 0.11852291226387024,
"learning_rate": 7.873473684210526e-05,
"loss": 10.3728,
"step": 51
},
{
"epoch": 0.014388489208633094,
"grad_norm": 0.15151363611221313,
"learning_rate": 7.820631578947369e-05,
"loss": 10.3692,
"step": 52
},
{
"epoch": 0.01466519092418373,
"grad_norm": 0.11771726608276367,
"learning_rate": 7.76778947368421e-05,
"loss": 10.3745,
"step": 53
},
{
"epoch": 0.014941892639734366,
"grad_norm": 0.1431136578321457,
"learning_rate": 7.714947368421052e-05,
"loss": 10.365,
"step": 54
},
{
"epoch": 0.015218594355285003,
"grad_norm": 0.12969781458377838,
"learning_rate": 7.662105263157896e-05,
"loss": 10.3685,
"step": 55
},
{
"epoch": 0.01549529607083564,
"grad_norm": 0.11919277906417847,
"learning_rate": 7.609263157894737e-05,
"loss": 10.3637,
"step": 56
},
{
"epoch": 0.015771997786386275,
"grad_norm": 0.13429664075374603,
"learning_rate": 7.556421052631579e-05,
"loss": 10.3578,
"step": 57
},
{
"epoch": 0.016048699501936912,
"grad_norm": 0.1704893857240677,
"learning_rate": 7.503578947368421e-05,
"loss": 10.3618,
"step": 58
},
{
"epoch": 0.01632540121748755,
"grad_norm": 0.11079560965299606,
"learning_rate": 7.450736842105263e-05,
"loss": 10.357,
"step": 59
},
{
"epoch": 0.016602102933038185,
"grad_norm": 0.15897336602210999,
"learning_rate": 7.397894736842105e-05,
"loss": 10.361,
"step": 60
},
{
"epoch": 0.016878804648588822,
"grad_norm": 0.14476776123046875,
"learning_rate": 7.345052631578948e-05,
"loss": 10.3518,
"step": 61
},
{
"epoch": 0.01715550636413946,
"grad_norm": 0.0889514610171318,
"learning_rate": 7.29221052631579e-05,
"loss": 10.3566,
"step": 62
},
{
"epoch": 0.017432208079690092,
"grad_norm": 0.1303914487361908,
"learning_rate": 7.239368421052631e-05,
"loss": 10.358,
"step": 63
},
{
"epoch": 0.01770890979524073,
"grad_norm": 0.10085094720125198,
"learning_rate": 7.186526315789474e-05,
"loss": 10.3564,
"step": 64
},
{
"epoch": 0.017985611510791366,
"grad_norm": 0.1356130838394165,
"learning_rate": 7.133684210526316e-05,
"loss": 10.3594,
"step": 65
},
{
"epoch": 0.018262313226342003,
"grad_norm": 0.14437317848205566,
"learning_rate": 7.080842105263158e-05,
"loss": 10.3588,
"step": 66
},
{
"epoch": 0.01853901494189264,
"grad_norm": 0.14342361688613892,
"learning_rate": 7.028e-05,
"loss": 10.3508,
"step": 67
},
{
"epoch": 0.018815716657443277,
"grad_norm": 0.10618889331817627,
"learning_rate": 6.975157894736843e-05,
"loss": 10.3556,
"step": 68
},
{
"epoch": 0.019092418372993913,
"grad_norm": 0.0874563455581665,
"learning_rate": 6.922315789473685e-05,
"loss": 10.3545,
"step": 69
},
{
"epoch": 0.01936912008854455,
"grad_norm": 0.08817718178033829,
"learning_rate": 6.869473684210527e-05,
"loss": 10.3491,
"step": 70
},
{
"epoch": 0.019645821804095187,
"grad_norm": 0.08227040618658066,
"learning_rate": 6.816631578947368e-05,
"loss": 10.3534,
"step": 71
},
{
"epoch": 0.01992252351964582,
"grad_norm": 0.09500890225172043,
"learning_rate": 6.76378947368421e-05,
"loss": 10.3512,
"step": 72
},
{
"epoch": 0.020199225235196457,
"grad_norm": 0.1020575761795044,
"learning_rate": 6.710947368421052e-05,
"loss": 10.3534,
"step": 73
},
{
"epoch": 0.020475926950747094,
"grad_norm": 0.11576978862285614,
"learning_rate": 6.658105263157894e-05,
"loss": 10.3497,
"step": 74
},
{
"epoch": 0.02075262866629773,
"grad_norm": 0.1325415074825287,
"learning_rate": 6.605263157894737e-05,
"loss": 10.3571,
"step": 75
},
{
"epoch": 0.021029330381848368,
"grad_norm": 0.12589293718338013,
"learning_rate": 6.55242105263158e-05,
"loss": 10.3612,
"step": 76
},
{
"epoch": 0.021306032097399005,
"grad_norm": 0.11694139987230301,
"learning_rate": 6.499578947368422e-05,
"loss": 10.3498,
"step": 77
},
{
"epoch": 0.02158273381294964,
"grad_norm": 0.1161419078707695,
"learning_rate": 6.446736842105264e-05,
"loss": 10.3576,
"step": 78
},
{
"epoch": 0.021859435528500278,
"grad_norm": 0.1140846386551857,
"learning_rate": 6.393894736842105e-05,
"loss": 10.3473,
"step": 79
},
{
"epoch": 0.02213613724405091,
"grad_norm": 0.13525035977363586,
"learning_rate": 6.341052631578947e-05,
"loss": 10.3515,
"step": 80
},
{
"epoch": 0.02241283895960155,
"grad_norm": 0.0944841131567955,
"learning_rate": 6.288210526315789e-05,
"loss": 10.3523,
"step": 81
},
{
"epoch": 0.022689540675152185,
"grad_norm": 0.08372361958026886,
"learning_rate": 6.235368421052632e-05,
"loss": 10.3504,
"step": 82
},
{
"epoch": 0.022966242390702822,
"grad_norm": 0.13337409496307373,
"learning_rate": 6.182526315789474e-05,
"loss": 10.3573,
"step": 83
},
{
"epoch": 0.02324294410625346,
"grad_norm": 0.11504900455474854,
"learning_rate": 6.129684210526316e-05,
"loss": 10.3491,
"step": 84
},
{
"epoch": 0.023519645821804096,
"grad_norm": 0.16143518686294556,
"learning_rate": 6.076842105263158e-05,
"loss": 10.3413,
"step": 85
},
{
"epoch": 0.023796347537354733,
"grad_norm": 0.11960137635469437,
"learning_rate": 6.024e-05,
"loss": 10.3512,
"step": 86
},
{
"epoch": 0.02407304925290537,
"grad_norm": 0.1277434378862381,
"learning_rate": 5.971157894736842e-05,
"loss": 10.3543,
"step": 87
},
{
"epoch": 0.024349750968456003,
"grad_norm": 0.09521263837814331,
"learning_rate": 5.9183157894736835e-05,
"loss": 10.3453,
"step": 88
},
{
"epoch": 0.02462645268400664,
"grad_norm": 0.10589306801557541,
"learning_rate": 5.8654736842105267e-05,
"loss": 10.3464,
"step": 89
},
{
"epoch": 0.024903154399557276,
"grad_norm": 0.1161879375576973,
"learning_rate": 5.8126315789473684e-05,
"loss": 10.3511,
"step": 90
},
{
"epoch": 0.025179856115107913,
"grad_norm": 0.12451572716236115,
"learning_rate": 5.759789473684211e-05,
"loss": 10.349,
"step": 91
},
{
"epoch": 0.02545655783065855,
"grad_norm": 0.13021744787693024,
"learning_rate": 5.706947368421053e-05,
"loss": 10.3477,
"step": 92
},
{
"epoch": 0.025733259546209187,
"grad_norm": 0.15881933271884918,
"learning_rate": 5.6541052631578945e-05,
"loss": 10.3487,
"step": 93
},
{
"epoch": 0.026009961261759824,
"grad_norm": 0.1306811273097992,
"learning_rate": 5.601263157894736e-05,
"loss": 10.3508,
"step": 94
},
{
"epoch": 0.02628666297731046,
"grad_norm": 0.15235261619091034,
"learning_rate": 5.5484210526315794e-05,
"loss": 10.3483,
"step": 95
},
{
"epoch": 0.026563364692861097,
"grad_norm": 0.12139783054590225,
"learning_rate": 5.495578947368421e-05,
"loss": 10.3531,
"step": 96
},
{
"epoch": 0.02684006640841173,
"grad_norm": 0.18180833756923676,
"learning_rate": 5.442736842105264e-05,
"loss": 10.353,
"step": 97
},
{
"epoch": 0.027116768123962368,
"grad_norm": 0.18144264817237854,
"learning_rate": 5.3898947368421055e-05,
"loss": 10.351,
"step": 98
},
{
"epoch": 0.027393469839513004,
"grad_norm": 0.2712603211402893,
"learning_rate": 5.337052631578947e-05,
"loss": 10.3511,
"step": 99
},
{
"epoch": 0.02767017155506364,
"grad_norm": 0.17104001343250275,
"learning_rate": 5.284210526315789e-05,
"loss": 10.3478,
"step": 100
},
{
"epoch": 0.02767017155506364,
"eval_loss": 10.351698875427246,
"eval_runtime": 15.8519,
"eval_samples_per_second": 96.014,
"eval_steps_per_second": 24.035,
"step": 100
},
{
"epoch": 0.027946873270614278,
"grad_norm": 0.14280512928962708,
"learning_rate": 5.231368421052631e-05,
"loss": 10.3587,
"step": 101
},
{
"epoch": 0.028223574986164915,
"grad_norm": 0.11392911523580551,
"learning_rate": 5.178526315789474e-05,
"loss": 10.361,
"step": 102
},
{
"epoch": 0.02850027670171555,
"grad_norm": 0.11263832449913025,
"learning_rate": 5.1256842105263165e-05,
"loss": 10.3613,
"step": 103
},
{
"epoch": 0.02877697841726619,
"grad_norm": 0.13879412412643433,
"learning_rate": 5.072842105263158e-05,
"loss": 10.3606,
"step": 104
},
{
"epoch": 0.029053680132816822,
"grad_norm": 0.12684401869773865,
"learning_rate": 5.02e-05,
"loss": 10.3576,
"step": 105
},
{
"epoch": 0.02933038184836746,
"grad_norm": 0.11932656913995743,
"learning_rate": 4.967157894736842e-05,
"loss": 10.3547,
"step": 106
},
{
"epoch": 0.029607083563918096,
"grad_norm": 0.10179711133241653,
"learning_rate": 4.914315789473684e-05,
"loss": 10.3571,
"step": 107
},
{
"epoch": 0.029883785279468732,
"grad_norm": 0.12076177448034286,
"learning_rate": 4.861473684210526e-05,
"loss": 10.357,
"step": 108
},
{
"epoch": 0.03016048699501937,
"grad_norm": 0.07408250123262405,
"learning_rate": 4.8086315789473686e-05,
"loss": 10.3571,
"step": 109
},
{
"epoch": 0.030437188710570006,
"grad_norm": 0.12372420728206635,
"learning_rate": 4.7557894736842104e-05,
"loss": 10.3543,
"step": 110
},
{
"epoch": 0.030713890426120643,
"grad_norm": 0.08638908714056015,
"learning_rate": 4.702947368421053e-05,
"loss": 10.3485,
"step": 111
},
{
"epoch": 0.03099059214167128,
"grad_norm": 0.12098588794469833,
"learning_rate": 4.6501052631578946e-05,
"loss": 10.3535,
"step": 112
},
{
"epoch": 0.03126729385722191,
"grad_norm": 0.10554546117782593,
"learning_rate": 4.5972631578947364e-05,
"loss": 10.3475,
"step": 113
},
{
"epoch": 0.03154399557277255,
"grad_norm": 0.07617288827896118,
"learning_rate": 4.544421052631579e-05,
"loss": 10.3547,
"step": 114
},
{
"epoch": 0.03182069728832319,
"grad_norm": 0.10432225465774536,
"learning_rate": 4.4915789473684213e-05,
"loss": 10.3502,
"step": 115
},
{
"epoch": 0.032097399003873824,
"grad_norm": 0.09122877568006516,
"learning_rate": 4.438736842105263e-05,
"loss": 10.3551,
"step": 116
},
{
"epoch": 0.03237410071942446,
"grad_norm": 0.09355830401182175,
"learning_rate": 4.3858947368421056e-05,
"loss": 10.3508,
"step": 117
},
{
"epoch": 0.0326508024349751,
"grad_norm": 0.11909081041812897,
"learning_rate": 4.3330526315789474e-05,
"loss": 10.3474,
"step": 118
},
{
"epoch": 0.032927504150525734,
"grad_norm": 0.0938921570777893,
"learning_rate": 4.280210526315789e-05,
"loss": 10.3453,
"step": 119
},
{
"epoch": 0.03320420586607637,
"grad_norm": 0.09045830368995667,
"learning_rate": 4.2273684210526317e-05,
"loss": 10.3474,
"step": 120
},
{
"epoch": 0.03348090758162701,
"grad_norm": 0.11095123738050461,
"learning_rate": 4.174526315789474e-05,
"loss": 10.3544,
"step": 121
},
{
"epoch": 0.033757609297177645,
"grad_norm": 0.08681843429803848,
"learning_rate": 4.121684210526316e-05,
"loss": 10.3565,
"step": 122
},
{
"epoch": 0.03403431101272828,
"grad_norm": 0.06601341813802719,
"learning_rate": 4.068842105263158e-05,
"loss": 10.3457,
"step": 123
},
{
"epoch": 0.03431101272827892,
"grad_norm": 0.08264271169900894,
"learning_rate": 4.016e-05,
"loss": 10.3572,
"step": 124
},
{
"epoch": 0.034587714443829555,
"grad_norm": 0.06502602994441986,
"learning_rate": 3.963157894736842e-05,
"loss": 10.3484,
"step": 125
},
{
"epoch": 0.034864416159380185,
"grad_norm": 0.08391563594341278,
"learning_rate": 3.9103157894736844e-05,
"loss": 10.3514,
"step": 126
},
{
"epoch": 0.03514111787493082,
"grad_norm": 0.08374013006687164,
"learning_rate": 3.857473684210526e-05,
"loss": 10.3485,
"step": 127
},
{
"epoch": 0.03541781959048146,
"grad_norm": 0.09567815065383911,
"learning_rate": 3.804631578947369e-05,
"loss": 10.3476,
"step": 128
},
{
"epoch": 0.035694521306032095,
"grad_norm": 0.09184671938419342,
"learning_rate": 3.7517894736842105e-05,
"loss": 10.3477,
"step": 129
},
{
"epoch": 0.03597122302158273,
"grad_norm": 0.09309260547161102,
"learning_rate": 3.698947368421052e-05,
"loss": 10.3535,
"step": 130
},
{
"epoch": 0.03624792473713337,
"grad_norm": 0.11253216862678528,
"learning_rate": 3.646105263157895e-05,
"loss": 10.3553,
"step": 131
},
{
"epoch": 0.036524626452684006,
"grad_norm": 0.07496535778045654,
"learning_rate": 3.593263157894737e-05,
"loss": 10.3417,
"step": 132
},
{
"epoch": 0.03680132816823464,
"grad_norm": 0.11139296740293503,
"learning_rate": 3.540421052631579e-05,
"loss": 10.3447,
"step": 133
},
{
"epoch": 0.03707802988378528,
"grad_norm": 0.10377196967601776,
"learning_rate": 3.4875789473684215e-05,
"loss": 10.3484,
"step": 134
},
{
"epoch": 0.037354731599335916,
"grad_norm": 0.09641505777835846,
"learning_rate": 3.434736842105263e-05,
"loss": 10.3467,
"step": 135
},
{
"epoch": 0.03763143331488655,
"grad_norm": 0.1256895661354065,
"learning_rate": 3.381894736842105e-05,
"loss": 10.3542,
"step": 136
},
{
"epoch": 0.03790813503043719,
"grad_norm": 0.10770992189645767,
"learning_rate": 3.329052631578947e-05,
"loss": 10.3487,
"step": 137
},
{
"epoch": 0.03818483674598783,
"grad_norm": 0.11906769126653671,
"learning_rate": 3.27621052631579e-05,
"loss": 10.3482,
"step": 138
},
{
"epoch": 0.038461538461538464,
"grad_norm": 0.1397065669298172,
"learning_rate": 3.223368421052632e-05,
"loss": 10.353,
"step": 139
},
{
"epoch": 0.0387382401770891,
"grad_norm": 0.10772430896759033,
"learning_rate": 3.1705263157894736e-05,
"loss": 10.3423,
"step": 140
},
{
"epoch": 0.03901494189263974,
"grad_norm": 0.10745060443878174,
"learning_rate": 3.117684210526316e-05,
"loss": 10.351,
"step": 141
},
{
"epoch": 0.039291643608190374,
"grad_norm": 0.09514959156513214,
"learning_rate": 3.064842105263158e-05,
"loss": 10.3492,
"step": 142
},
{
"epoch": 0.039568345323741004,
"grad_norm": 0.16483455896377563,
"learning_rate": 3.012e-05,
"loss": 10.3444,
"step": 143
},
{
"epoch": 0.03984504703929164,
"grad_norm": 0.14660082757472992,
"learning_rate": 2.9591578947368418e-05,
"loss": 10.3414,
"step": 144
},
{
"epoch": 0.04012174875484228,
"grad_norm": 0.14518006145954132,
"learning_rate": 2.9063157894736842e-05,
"loss": 10.3534,
"step": 145
},
{
"epoch": 0.040398450470392915,
"grad_norm": 0.11700974404811859,
"learning_rate": 2.8534736842105264e-05,
"loss": 10.3541,
"step": 146
},
{
"epoch": 0.04067515218594355,
"grad_norm": 0.1969076544046402,
"learning_rate": 2.800631578947368e-05,
"loss": 10.3477,
"step": 147
},
{
"epoch": 0.04095185390149419,
"grad_norm": 0.18599028885364532,
"learning_rate": 2.7477894736842106e-05,
"loss": 10.3502,
"step": 148
},
{
"epoch": 0.041228555617044825,
"grad_norm": 0.12765298783779144,
"learning_rate": 2.6949473684210527e-05,
"loss": 10.3463,
"step": 149
},
{
"epoch": 0.04150525733259546,
"grad_norm": 0.22577303647994995,
"learning_rate": 2.6421052631578945e-05,
"loss": 10.3555,
"step": 150
},
{
"epoch": 0.04150525733259546,
"eval_loss": 10.350841522216797,
"eval_runtime": 15.8399,
"eval_samples_per_second": 96.087,
"eval_steps_per_second": 24.053,
"step": 150
},
{
"epoch": 0.0417819590481461,
"grad_norm": 0.13452677428722382,
"learning_rate": 2.589263157894737e-05,
"loss": 10.3604,
"step": 151
},
{
"epoch": 0.042058660763696736,
"grad_norm": 0.0959853008389473,
"learning_rate": 2.536421052631579e-05,
"loss": 10.3649,
"step": 152
},
{
"epoch": 0.04233536247924737,
"grad_norm": 0.0890377089381218,
"learning_rate": 2.483578947368421e-05,
"loss": 10.3554,
"step": 153
},
{
"epoch": 0.04261206419479801,
"grad_norm": 0.1184496209025383,
"learning_rate": 2.430736842105263e-05,
"loss": 10.3566,
"step": 154
},
{
"epoch": 0.042888765910348646,
"grad_norm": 0.13035479187965393,
"learning_rate": 2.3778947368421052e-05,
"loss": 10.3568,
"step": 155
},
{
"epoch": 0.04316546762589928,
"grad_norm": 0.1071864515542984,
"learning_rate": 2.3250526315789473e-05,
"loss": 10.3556,
"step": 156
},
{
"epoch": 0.04344216934144992,
"grad_norm": 0.06960742175579071,
"learning_rate": 2.2722105263157894e-05,
"loss": 10.352,
"step": 157
},
{
"epoch": 0.043718871057000556,
"grad_norm": 0.14237448573112488,
"learning_rate": 2.2193684210526316e-05,
"loss": 10.3574,
"step": 158
},
{
"epoch": 0.043995572772551186,
"grad_norm": 0.1190297082066536,
"learning_rate": 2.1665263157894737e-05,
"loss": 10.3545,
"step": 159
},
{
"epoch": 0.04427227448810182,
"grad_norm": 0.08851869404315948,
"learning_rate": 2.1136842105263158e-05,
"loss": 10.3461,
"step": 160
},
{
"epoch": 0.04454897620365246,
"grad_norm": 0.09421070665121078,
"learning_rate": 2.060842105263158e-05,
"loss": 10.352,
"step": 161
},
{
"epoch": 0.0448256779192031,
"grad_norm": 0.07470674067735672,
"learning_rate": 2.008e-05,
"loss": 10.3497,
"step": 162
},
{
"epoch": 0.045102379634753734,
"grad_norm": 0.09015386551618576,
"learning_rate": 1.9551578947368422e-05,
"loss": 10.3558,
"step": 163
},
{
"epoch": 0.04537908135030437,
"grad_norm": 0.0824267715215683,
"learning_rate": 1.9023157894736843e-05,
"loss": 10.3504,
"step": 164
},
{
"epoch": 0.04565578306585501,
"grad_norm": 0.11378297209739685,
"learning_rate": 1.849473684210526e-05,
"loss": 10.348,
"step": 165
},
{
"epoch": 0.045932484781405644,
"grad_norm": 0.08602961897850037,
"learning_rate": 1.7966315789473686e-05,
"loss": 10.3516,
"step": 166
},
{
"epoch": 0.04620918649695628,
"grad_norm": 0.0837302878499031,
"learning_rate": 1.7437894736842107e-05,
"loss": 10.3514,
"step": 167
},
{
"epoch": 0.04648588821250692,
"grad_norm": 0.143371120095253,
"learning_rate": 1.6909473684210525e-05,
"loss": 10.3527,
"step": 168
},
{
"epoch": 0.046762589928057555,
"grad_norm": 0.10025764256715775,
"learning_rate": 1.638105263157895e-05,
"loss": 10.3492,
"step": 169
},
{
"epoch": 0.04703929164360819,
"grad_norm": 0.10547541826963425,
"learning_rate": 1.5852631578947368e-05,
"loss": 10.3493,
"step": 170
},
{
"epoch": 0.04731599335915883,
"grad_norm": 0.10732828825712204,
"learning_rate": 1.532421052631579e-05,
"loss": 10.3481,
"step": 171
},
{
"epoch": 0.047592695074709465,
"grad_norm": 0.12645092606544495,
"learning_rate": 1.4795789473684209e-05,
"loss": 10.3532,
"step": 172
},
{
"epoch": 0.0478693967902601,
"grad_norm": 0.10770411044359207,
"learning_rate": 1.4267368421052632e-05,
"loss": 10.3473,
"step": 173
},
{
"epoch": 0.04814609850581074,
"grad_norm": 0.08041389286518097,
"learning_rate": 1.3738947368421053e-05,
"loss": 10.3516,
"step": 174
},
{
"epoch": 0.048422800221361376,
"grad_norm": 0.0928083285689354,
"learning_rate": 1.3210526315789473e-05,
"loss": 10.3511,
"step": 175
},
{
"epoch": 0.048699501936912006,
"grad_norm": 0.08434465527534485,
"learning_rate": 1.2682105263157896e-05,
"loss": 10.3501,
"step": 176
},
{
"epoch": 0.04897620365246264,
"grad_norm": 0.12187798321247101,
"learning_rate": 1.2153684210526315e-05,
"loss": 10.3501,
"step": 177
},
{
"epoch": 0.04925290536801328,
"grad_norm": 0.08970190584659576,
"learning_rate": 1.1625263157894737e-05,
"loss": 10.3475,
"step": 178
},
{
"epoch": 0.049529607083563916,
"grad_norm": 0.09689393639564514,
"learning_rate": 1.1096842105263158e-05,
"loss": 10.3447,
"step": 179
},
{
"epoch": 0.04980630879911455,
"grad_norm": 0.09856870025396347,
"learning_rate": 1.0568421052631579e-05,
"loss": 10.3467,
"step": 180
},
{
"epoch": 0.05008301051466519,
"grad_norm": 0.11689821630716324,
"learning_rate": 1.004e-05,
"loss": 10.353,
"step": 181
},
{
"epoch": 0.050359712230215826,
"grad_norm": 0.12540104985237122,
"learning_rate": 9.511578947368422e-06,
"loss": 10.3499,
"step": 182
},
{
"epoch": 0.05063641394576646,
"grad_norm": 0.10547714680433273,
"learning_rate": 8.983157894736843e-06,
"loss": 10.3543,
"step": 183
},
{
"epoch": 0.0509131156613171,
"grad_norm": 0.10751640051603317,
"learning_rate": 8.454736842105263e-06,
"loss": 10.3483,
"step": 184
},
{
"epoch": 0.05118981737686774,
"grad_norm": 0.12528543174266815,
"learning_rate": 7.926315789473684e-06,
"loss": 10.3456,
"step": 185
},
{
"epoch": 0.051466519092418374,
"grad_norm": 0.09134452790021896,
"learning_rate": 7.397894736842104e-06,
"loss": 10.3548,
"step": 186
},
{
"epoch": 0.05174322080796901,
"grad_norm": 0.08841915428638458,
"learning_rate": 6.8694736842105265e-06,
"loss": 10.3462,
"step": 187
},
{
"epoch": 0.05201992252351965,
"grad_norm": 0.1365995854139328,
"learning_rate": 6.341052631578948e-06,
"loss": 10.3504,
"step": 188
},
{
"epoch": 0.052296624239070284,
"grad_norm": 0.1279522180557251,
"learning_rate": 5.812631578947368e-06,
"loss": 10.3554,
"step": 189
},
{
"epoch": 0.05257332595462092,
"grad_norm": 0.10719075798988342,
"learning_rate": 5.2842105263157896e-06,
"loss": 10.346,
"step": 190
},
{
"epoch": 0.05285002767017156,
"grad_norm": 0.10895779728889465,
"learning_rate": 4.755789473684211e-06,
"loss": 10.3515,
"step": 191
},
{
"epoch": 0.053126729385722195,
"grad_norm": 0.09727931022644043,
"learning_rate": 4.227368421052631e-06,
"loss": 10.3445,
"step": 192
},
{
"epoch": 0.053403431101272825,
"grad_norm": 0.09920754283666611,
"learning_rate": 3.698947368421052e-06,
"loss": 10.3509,
"step": 193
},
{
"epoch": 0.05368013281682346,
"grad_norm": 0.12822380661964417,
"learning_rate": 3.170526315789474e-06,
"loss": 10.3469,
"step": 194
},
{
"epoch": 0.0539568345323741,
"grad_norm": 0.13037635385990143,
"learning_rate": 2.6421052631578948e-06,
"loss": 10.3498,
"step": 195
},
{
"epoch": 0.054233536247924735,
"grad_norm": 0.1337699145078659,
"learning_rate": 2.1136842105263157e-06,
"loss": 10.3471,
"step": 196
},
{
"epoch": 0.05451023796347537,
"grad_norm": 0.14256015419960022,
"learning_rate": 1.585263157894737e-06,
"loss": 10.3399,
"step": 197
},
{
"epoch": 0.05478693967902601,
"grad_norm": 0.23247487843036652,
"learning_rate": 1.0568421052631578e-06,
"loss": 10.3549,
"step": 198
},
{
"epoch": 0.055063641394576646,
"grad_norm": 0.19181080162525177,
"learning_rate": 5.284210526315789e-07,
"loss": 10.3417,
"step": 199
},
{
"epoch": 0.05534034311012728,
"grad_norm": 0.28272467851638794,
"learning_rate": 0.0,
"loss": 10.3468,
"step": 200
},
{
"epoch": 0.05534034311012728,
"eval_loss": 10.350366592407227,
"eval_runtime": 15.8466,
"eval_samples_per_second": 96.046,
"eval_steps_per_second": 24.043,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5605420695552.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}