metythorn's picture
Upload fine-tuned Llama-3-8B model on Khmer corpus
977dd85 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 313,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0032,
"grad_norm": 15.524961471557617,
"learning_rate": 0.0,
"loss": 1.9735,
"step": 1
},
{
"epoch": 0.0064,
"grad_norm": 13.42629623413086,
"learning_rate": 1.5625e-06,
"loss": 1.7784,
"step": 2
},
{
"epoch": 0.0096,
"grad_norm": 13.618926048278809,
"learning_rate": 3.125e-06,
"loss": 1.8501,
"step": 3
},
{
"epoch": 0.0128,
"grad_norm": 13.631352424621582,
"learning_rate": 4.6875000000000004e-06,
"loss": 1.8382,
"step": 4
},
{
"epoch": 0.016,
"grad_norm": 12.379663467407227,
"learning_rate": 6.25e-06,
"loss": 1.7936,
"step": 5
},
{
"epoch": 0.0192,
"grad_norm": 9.427712440490723,
"learning_rate": 7.8125e-06,
"loss": 1.6236,
"step": 6
},
{
"epoch": 0.0224,
"grad_norm": 8.059680938720703,
"learning_rate": 9.375000000000001e-06,
"loss": 1.4075,
"step": 7
},
{
"epoch": 0.0256,
"grad_norm": 8.344525337219238,
"learning_rate": 1.09375e-05,
"loss": 1.5812,
"step": 8
},
{
"epoch": 0.0288,
"grad_norm": 8.296407699584961,
"learning_rate": 1.25e-05,
"loss": 1.4497,
"step": 9
},
{
"epoch": 0.032,
"grad_norm": 8.533961296081543,
"learning_rate": 1.4062500000000001e-05,
"loss": 1.3985,
"step": 10
},
{
"epoch": 0.0352,
"grad_norm": 6.942614555358887,
"learning_rate": 1.5625e-05,
"loss": 1.3606,
"step": 11
},
{
"epoch": 0.0384,
"grad_norm": 7.6024088859558105,
"learning_rate": 1.71875e-05,
"loss": 1.3757,
"step": 12
},
{
"epoch": 0.0416,
"grad_norm": 7.377843856811523,
"learning_rate": 1.8750000000000002e-05,
"loss": 1.3528,
"step": 13
},
{
"epoch": 0.0448,
"grad_norm": 7.739803791046143,
"learning_rate": 2.0312500000000002e-05,
"loss": 1.246,
"step": 14
},
{
"epoch": 0.048,
"grad_norm": 6.864417552947998,
"learning_rate": 2.1875e-05,
"loss": 1.2315,
"step": 15
},
{
"epoch": 0.0512,
"grad_norm": 7.203100681304932,
"learning_rate": 2.34375e-05,
"loss": 1.1748,
"step": 16
},
{
"epoch": 0.0544,
"grad_norm": 8.64692497253418,
"learning_rate": 2.5e-05,
"loss": 1.1669,
"step": 17
},
{
"epoch": 0.0576,
"grad_norm": 6.809157848358154,
"learning_rate": 2.6562500000000002e-05,
"loss": 1.0799,
"step": 18
},
{
"epoch": 0.0608,
"grad_norm": 8.925545692443848,
"learning_rate": 2.8125000000000003e-05,
"loss": 1.0562,
"step": 19
},
{
"epoch": 0.064,
"grad_norm": 5.894463539123535,
"learning_rate": 2.96875e-05,
"loss": 0.9397,
"step": 20
},
{
"epoch": 0.0672,
"grad_norm": 4.393126010894775,
"learning_rate": 3.125e-05,
"loss": 0.9909,
"step": 21
},
{
"epoch": 0.0704,
"grad_norm": 5.031020164489746,
"learning_rate": 3.2812500000000005e-05,
"loss": 0.9835,
"step": 22
},
{
"epoch": 0.0736,
"grad_norm": 5.137537956237793,
"learning_rate": 3.4375e-05,
"loss": 1.011,
"step": 23
},
{
"epoch": 0.0768,
"grad_norm": 4.404738903045654,
"learning_rate": 3.59375e-05,
"loss": 0.985,
"step": 24
},
{
"epoch": 0.08,
"grad_norm": 4.291604518890381,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.933,
"step": 25
},
{
"epoch": 0.0832,
"grad_norm": 3.8908796310424805,
"learning_rate": 3.90625e-05,
"loss": 0.8894,
"step": 26
},
{
"epoch": 0.0864,
"grad_norm": 5.403290271759033,
"learning_rate": 4.0625000000000005e-05,
"loss": 0.9313,
"step": 27
},
{
"epoch": 0.0896,
"grad_norm": 4.857142448425293,
"learning_rate": 4.21875e-05,
"loss": 0.8802,
"step": 28
},
{
"epoch": 0.0928,
"grad_norm": 5.22381067276001,
"learning_rate": 4.375e-05,
"loss": 0.925,
"step": 29
},
{
"epoch": 0.096,
"grad_norm": 5.229774475097656,
"learning_rate": 4.5312500000000004e-05,
"loss": 0.9258,
"step": 30
},
{
"epoch": 0.0992,
"grad_norm": 4.439915180206299,
"learning_rate": 4.6875e-05,
"loss": 0.8377,
"step": 31
},
{
"epoch": 0.1024,
"grad_norm": 4.758205413818359,
"learning_rate": 4.8437500000000005e-05,
"loss": 0.8652,
"step": 32
},
{
"epoch": 0.1056,
"grad_norm": 4.30954122543335,
"learning_rate": 5e-05,
"loss": 0.884,
"step": 33
},
{
"epoch": 0.1088,
"grad_norm": 4.055234909057617,
"learning_rate": 4.999843759868819e-05,
"loss": 0.7903,
"step": 34
},
{
"epoch": 0.112,
"grad_norm": 5.193882942199707,
"learning_rate": 4.9993750590040575e-05,
"loss": 0.8985,
"step": 35
},
{
"epoch": 0.1152,
"grad_norm": 4.579808235168457,
"learning_rate": 4.998593955989626e-05,
"loss": 0.8734,
"step": 36
},
{
"epoch": 0.1184,
"grad_norm": 3.9402709007263184,
"learning_rate": 4.9975005484572305e-05,
"loss": 0.8567,
"step": 37
},
{
"epoch": 0.1216,
"grad_norm": 4.5101704597473145,
"learning_rate": 4.996094973074183e-05,
"loss": 0.9164,
"step": 38
},
{
"epoch": 0.1248,
"grad_norm": 4.826318264007568,
"learning_rate": 4.994377405526308e-05,
"loss": 0.8821,
"step": 39
},
{
"epoch": 0.128,
"grad_norm": 3.9617154598236084,
"learning_rate": 4.992348060495989e-05,
"loss": 0.883,
"step": 40
},
{
"epoch": 0.1312,
"grad_norm": 3.356626272201538,
"learning_rate": 4.990007191635334e-05,
"loss": 0.7852,
"step": 41
},
{
"epoch": 0.1344,
"grad_norm": 3.9243972301483154,
"learning_rate": 4.987355091534468e-05,
"loss": 0.8668,
"step": 42
},
{
"epoch": 0.1376,
"grad_norm": 3.6170945167541504,
"learning_rate": 4.9843920916849645e-05,
"loss": 0.8517,
"step": 43
},
{
"epoch": 0.1408,
"grad_norm": 4.146400451660156,
"learning_rate": 4.981118562438414e-05,
"loss": 0.9207,
"step": 44
},
{
"epoch": 0.144,
"grad_norm": 4.426783561706543,
"learning_rate": 4.9775349129601243e-05,
"loss": 0.8918,
"step": 45
},
{
"epoch": 0.1472,
"grad_norm": 3.632885217666626,
"learning_rate": 4.973641591177991e-05,
"loss": 0.8404,
"step": 46
},
{
"epoch": 0.1504,
"grad_norm": 4.035740375518799,
"learning_rate": 4.969439083726496e-05,
"loss": 0.8945,
"step": 47
},
{
"epoch": 0.1536,
"grad_norm": 3.7760114669799805,
"learning_rate": 4.964927915885893e-05,
"loss": 0.7685,
"step": 48
},
{
"epoch": 0.1568,
"grad_norm": 3.9631412029266357,
"learning_rate": 4.960108651516545e-05,
"loss": 0.8538,
"step": 49
},
{
"epoch": 0.16,
"grad_norm": 3.716055393218994,
"learning_rate": 4.954981892988451e-05,
"loss": 0.8328,
"step": 50
},
{
"epoch": 0.1632,
"grad_norm": 4.943089485168457,
"learning_rate": 4.949548281105951e-05,
"loss": 0.8006,
"step": 51
},
{
"epoch": 0.1664,
"grad_norm": 3.410830497741699,
"learning_rate": 4.943808495027631e-05,
"loss": 0.8173,
"step": 52
},
{
"epoch": 0.1696,
"grad_norm": 3.5364482402801514,
"learning_rate": 4.937763252181434e-05,
"loss": 0.8017,
"step": 53
},
{
"epoch": 0.1728,
"grad_norm": 3.6835520267486572,
"learning_rate": 4.93141330817499e-05,
"loss": 0.8256,
"step": 54
},
{
"epoch": 0.176,
"grad_norm": 3.966698408126831,
"learning_rate": 4.924759456701167e-05,
"loss": 0.8032,
"step": 55
},
{
"epoch": 0.1792,
"grad_norm": 4.649932384490967,
"learning_rate": 4.917802529438864e-05,
"loss": 0.9344,
"step": 56
},
{
"epoch": 0.1824,
"grad_norm": 3.5674755573272705,
"learning_rate": 4.910543395949067e-05,
"loss": 0.8222,
"step": 57
},
{
"epoch": 0.1856,
"grad_norm": 4.073423862457275,
"learning_rate": 4.9029829635661475e-05,
"loss": 0.8949,
"step": 58
},
{
"epoch": 0.1888,
"grad_norm": 4.209728717803955,
"learning_rate": 4.895122177284465e-05,
"loss": 0.8474,
"step": 59
},
{
"epoch": 0.192,
"grad_norm": 4.795137882232666,
"learning_rate": 4.8869620196402436e-05,
"loss": 0.861,
"step": 60
},
{
"epoch": 0.1952,
"grad_norm": 3.300403594970703,
"learning_rate": 4.878503510588765e-05,
"loss": 0.828,
"step": 61
},
{
"epoch": 0.1984,
"grad_norm": 3.2910377979278564,
"learning_rate": 4.8697477073768766e-05,
"loss": 0.8032,
"step": 62
},
{
"epoch": 0.2016,
"grad_norm": 3.4420082569122314,
"learning_rate": 4.8606957044108556e-05,
"loss": 0.7789,
"step": 63
},
{
"epoch": 0.2048,
"grad_norm": 3.449355363845825,
"learning_rate": 4.851348633119606e-05,
"loss": 0.6904,
"step": 64
},
{
"epoch": 0.208,
"grad_norm": 3.5272228717803955,
"learning_rate": 4.8417076618132426e-05,
"loss": 0.7907,
"step": 65
},
{
"epoch": 0.2112,
"grad_norm": 3.2754251956939697,
"learning_rate": 4.8317739955370636e-05,
"loss": 0.7932,
"step": 66
},
{
"epoch": 0.2144,
"grad_norm": 3.522357702255249,
"learning_rate": 4.821548875920927e-05,
"loss": 0.7782,
"step": 67
},
{
"epoch": 0.2176,
"grad_norm": 3.652512311935425,
"learning_rate": 4.811033581024056e-05,
"loss": 0.7733,
"step": 68
},
{
"epoch": 0.2208,
"grad_norm": 3.8177380561828613,
"learning_rate": 4.800229425175294e-05,
"loss": 0.7116,
"step": 69
},
{
"epoch": 0.224,
"grad_norm": 4.430614948272705,
"learning_rate": 4.7891377588088223e-05,
"loss": 0.8089,
"step": 70
},
{
"epoch": 0.2272,
"grad_norm": 4.25429630279541,
"learning_rate": 4.777759968295369e-05,
"loss": 0.7455,
"step": 71
},
{
"epoch": 0.2304,
"grad_norm": 3.8671886920928955,
"learning_rate": 4.766097475768919e-05,
"loss": 0.8566,
"step": 72
},
{
"epoch": 0.2336,
"grad_norm": 4.336380481719971,
"learning_rate": 4.754151738948962e-05,
"loss": 0.8277,
"step": 73
},
{
"epoch": 0.2368,
"grad_norm": 5.870267391204834,
"learning_rate": 4.741924250958289e-05,
"loss": 0.7712,
"step": 74
},
{
"epoch": 0.24,
"grad_norm": 5.58464241027832,
"learning_rate": 4.729416540136361e-05,
"loss": 0.837,
"step": 75
},
{
"epoch": 0.2432,
"grad_norm": 6.278571128845215,
"learning_rate": 4.7166301698482815e-05,
"loss": 0.7804,
"step": 76
},
{
"epoch": 0.2464,
"grad_norm": 3.8727006912231445,
"learning_rate": 4.703566738289389e-05,
"loss": 0.7113,
"step": 77
},
{
"epoch": 0.2496,
"grad_norm": 2.9273529052734375,
"learning_rate": 4.69022787828549e-05,
"loss": 0.7402,
"step": 78
},
{
"epoch": 0.2528,
"grad_norm": 4.868607997894287,
"learning_rate": 4.676615257088776e-05,
"loss": 0.7648,
"step": 79
},
{
"epoch": 0.256,
"grad_norm": 4.873749732971191,
"learning_rate": 4.662730576169423e-05,
"loss": 0.8317,
"step": 80
},
{
"epoch": 0.2592,
"grad_norm": 3.7857329845428467,
"learning_rate": 4.6485755710029256e-05,
"loss": 0.7691,
"step": 81
},
{
"epoch": 0.2624,
"grad_norm": 4.516591548919678,
"learning_rate": 4.6341520108531746e-05,
"loss": 0.8074,
"step": 82
},
{
"epoch": 0.2656,
"grad_norm": 2.922186851501465,
"learning_rate": 4.619461698551315e-05,
"loss": 0.6979,
"step": 83
},
{
"epoch": 0.2688,
"grad_norm": 3.1487746238708496,
"learning_rate": 4.604506470270403e-05,
"loss": 0.8106,
"step": 84
},
{
"epoch": 0.272,
"grad_norm": 3.4952852725982666,
"learning_rate": 4.589288195295901e-05,
"loss": 0.8193,
"step": 85
},
{
"epoch": 0.2752,
"grad_norm": 3.446680784225464,
"learning_rate": 4.573808775792033e-05,
"loss": 0.6519,
"step": 86
},
{
"epoch": 0.2784,
"grad_norm": 4.01785135269165,
"learning_rate": 4.5580701465640254e-05,
"loss": 0.7972,
"step": 87
},
{
"epoch": 0.2816,
"grad_norm": 3.0481715202331543,
"learning_rate": 4.5420742748162734e-05,
"loss": 0.8058,
"step": 88
},
{
"epoch": 0.2848,
"grad_norm": 3.555839776992798,
"learning_rate": 4.525823159906459e-05,
"loss": 0.8275,
"step": 89
},
{
"epoch": 0.288,
"grad_norm": 3.641484260559082,
"learning_rate": 4.509318833095642e-05,
"loss": 0.7904,
"step": 90
},
{
"epoch": 0.2912,
"grad_norm": 3.2470881938934326,
"learning_rate": 4.492563357294369e-05,
"loss": 0.7142,
"step": 91
},
{
"epoch": 0.2944,
"grad_norm": 2.9820683002471924,
"learning_rate": 4.475558826804833e-05,
"loss": 0.7862,
"step": 92
},
{
"epoch": 0.2976,
"grad_norm": 3.468200206756592,
"learning_rate": 4.458307367059092e-05,
"loss": 0.7648,
"step": 93
},
{
"epoch": 0.3008,
"grad_norm": 3.363412380218506,
"learning_rate": 4.440811134353412e-05,
"loss": 0.7217,
"step": 94
},
{
"epoch": 0.304,
"grad_norm": 3.7876627445220947,
"learning_rate": 4.42307231557875e-05,
"loss": 0.7541,
"step": 95
},
{
"epoch": 0.3072,
"grad_norm": 3.116093397140503,
"learning_rate": 4.4050931279474015e-05,
"loss": 0.7603,
"step": 96
},
{
"epoch": 0.3104,
"grad_norm": 3.113248109817505,
"learning_rate": 4.386875818715874e-05,
"loss": 0.6,
"step": 97
},
{
"epoch": 0.3136,
"grad_norm": 3.3155691623687744,
"learning_rate": 4.368422664903997e-05,
"loss": 0.7264,
"step": 98
},
{
"epoch": 0.3168,
"grad_norm": 4.281957626342773,
"learning_rate": 4.349735973010305e-05,
"loss": 0.8178,
"step": 99
},
{
"epoch": 0.32,
"grad_norm": 4.118067264556885,
"learning_rate": 4.330818078723755e-05,
"loss": 0.7394,
"step": 100
},
{
"epoch": 0.3232,
"grad_norm": 3.7865848541259766,
"learning_rate": 4.311671346631774e-05,
"loss": 0.8166,
"step": 101
},
{
"epoch": 0.3264,
"grad_norm": 3.1080141067504883,
"learning_rate": 4.292298169924709e-05,
"loss": 0.6196,
"step": 102
},
{
"epoch": 0.3296,
"grad_norm": 2.8744795322418213,
"learning_rate": 4.272700970096696e-05,
"loss": 0.7371,
"step": 103
},
{
"epoch": 0.3328,
"grad_norm": 3.6204240322113037,
"learning_rate": 4.252882196642992e-05,
"loss": 0.7253,
"step": 104
},
{
"epoch": 0.336,
"grad_norm": 2.7233681678771973,
"learning_rate": 4.23284432675381e-05,
"loss": 0.6528,
"step": 105
},
{
"epoch": 0.3392,
"grad_norm": 2.825315237045288,
"learning_rate": 4.212589865004684e-05,
"loss": 0.6597,
"step": 106
},
{
"epoch": 0.3424,
"grad_norm": 3.6910030841827393,
"learning_rate": 4.192121343043424e-05,
"loss": 0.6703,
"step": 107
},
{
"epoch": 0.3456,
"grad_norm": 3.4581615924835205,
"learning_rate": 4.1714413192736754e-05,
"loss": 0.759,
"step": 108
},
{
"epoch": 0.3488,
"grad_norm": 4.329128265380859,
"learning_rate": 4.150552378535137e-05,
"loss": 0.7174,
"step": 109
},
{
"epoch": 0.352,
"grad_norm": 3.2338504791259766,
"learning_rate": 4.1294571317804854e-05,
"loss": 0.7708,
"step": 110
},
{
"epoch": 0.3552,
"grad_norm": 3.127150774002075,
"learning_rate": 4.108158215749014e-05,
"loss": 0.6826,
"step": 111
},
{
"epoch": 0.3584,
"grad_norm": 3.5068857669830322,
"learning_rate": 4.0866582926370725e-05,
"loss": 0.6749,
"step": 112
},
{
"epoch": 0.3616,
"grad_norm": 3.0821373462677,
"learning_rate": 4.064960049765304e-05,
"loss": 0.7902,
"step": 113
},
{
"epoch": 0.3648,
"grad_norm": 3.2901744842529297,
"learning_rate": 4.043066199242762e-05,
"loss": 0.7017,
"step": 114
},
{
"epoch": 0.368,
"grad_norm": 3.2184112071990967,
"learning_rate": 4.020979477627907e-05,
"loss": 0.7647,
"step": 115
},
{
"epoch": 0.3712,
"grad_norm": 3.092893600463867,
"learning_rate": 3.998702645586565e-05,
"loss": 0.6897,
"step": 116
},
{
"epoch": 0.3744,
"grad_norm": 3.3295419216156006,
"learning_rate": 3.976238487546864e-05,
"loss": 0.8187,
"step": 117
},
{
"epoch": 0.3776,
"grad_norm": 2.6530091762542725,
"learning_rate": 3.953589811351204e-05,
"loss": 0.7974,
"step": 118
},
{
"epoch": 0.3808,
"grad_norm": 3.290668487548828,
"learning_rate": 3.930759447905298e-05,
"loss": 0.7172,
"step": 119
},
{
"epoch": 0.384,
"grad_norm": 2.70243763923645,
"learning_rate": 3.907750250824327e-05,
"loss": 0.6843,
"step": 120
},
{
"epoch": 0.3872,
"grad_norm": 3.1110892295837402,
"learning_rate": 3.884565096076269e-05,
"loss": 0.7896,
"step": 121
},
{
"epoch": 0.3904,
"grad_norm": 2.7878258228302,
"learning_rate": 3.861206881622419e-05,
"loss": 0.7505,
"step": 122
},
{
"epoch": 0.3936,
"grad_norm": 3.118669271469116,
"learning_rate": 3.837678527055168e-05,
"loss": 0.6734,
"step": 123
},
{
"epoch": 0.3968,
"grad_norm": 3.1683592796325684,
"learning_rate": 3.813982973233083e-05,
"loss": 0.7451,
"step": 124
},
{
"epoch": 0.4,
"grad_norm": 4.442941665649414,
"learning_rate": 3.7901231819133105e-05,
"loss": 0.8121,
"step": 125
},
{
"epoch": 0.4032,
"grad_norm": 2.806699275970459,
"learning_rate": 3.766102135381393e-05,
"loss": 0.6691,
"step": 126
},
{
"epoch": 0.4064,
"grad_norm": 3.6106672286987305,
"learning_rate": 3.741922836078499e-05,
"loss": 0.6728,
"step": 127
},
{
"epoch": 0.4096,
"grad_norm": 3.241565465927124,
"learning_rate": 3.717588306226143e-05,
"loss": 0.7116,
"step": 128
},
{
"epoch": 0.4128,
"grad_norm": 2.966460704803467,
"learning_rate": 3.693101587448436e-05,
"loss": 0.6842,
"step": 129
},
{
"epoch": 0.416,
"grad_norm": 2.9147496223449707,
"learning_rate": 3.6684657403919005e-05,
"loss": 0.725,
"step": 130
},
{
"epoch": 0.4192,
"grad_norm": 3.226004123687744,
"learning_rate": 3.6436838443429175e-05,
"loss": 0.6485,
"step": 131
},
{
"epoch": 0.4224,
"grad_norm": 4.062314510345459,
"learning_rate": 3.618758996842839e-05,
"loss": 0.7387,
"step": 132
},
{
"epoch": 0.4256,
"grad_norm": 2.843627691268921,
"learning_rate": 3.5936943133008183e-05,
"loss": 0.6621,
"step": 133
},
{
"epoch": 0.4288,
"grad_norm": 3.151275396347046,
"learning_rate": 3.568492926604412e-05,
"loss": 0.733,
"step": 134
},
{
"epoch": 0.432,
"grad_norm": 2.40847110748291,
"learning_rate": 3.5431579867279905e-05,
"loss": 0.6433,
"step": 135
},
{
"epoch": 0.4352,
"grad_norm": 3.5088706016540527,
"learning_rate": 3.517692660339018e-05,
"loss": 0.8031,
"step": 136
},
{
"epoch": 0.4384,
"grad_norm": 2.9651691913604736,
"learning_rate": 3.492100130402242e-05,
"loss": 0.6583,
"step": 137
},
{
"epoch": 0.4416,
"grad_norm": 2.8937885761260986,
"learning_rate": 3.4663835957818515e-05,
"loss": 0.6887,
"step": 138
},
{
"epoch": 0.4448,
"grad_norm": 3.493434429168701,
"learning_rate": 3.440546270841639e-05,
"loss": 0.7076,
"step": 139
},
{
"epoch": 0.448,
"grad_norm": 2.6669135093688965,
"learning_rate": 3.414591385043237e-05,
"loss": 0.7377,
"step": 140
},
{
"epoch": 0.4512,
"grad_norm": 3.7485995292663574,
"learning_rate": 3.3885221825424537e-05,
"loss": 0.6898,
"step": 141
},
{
"epoch": 0.4544,
"grad_norm": 2.8648860454559326,
"learning_rate": 3.362341921783784e-05,
"loss": 0.7099,
"step": 142
},
{
"epoch": 0.4576,
"grad_norm": 3.085545063018799,
"learning_rate": 3.336053875093128e-05,
"loss": 0.6491,
"step": 143
},
{
"epoch": 0.4608,
"grad_norm": 2.9615533351898193,
"learning_rate": 3.309661328268776e-05,
"loss": 0.6848,
"step": 144
},
{
"epoch": 0.464,
"grad_norm": 2.989152431488037,
"learning_rate": 3.283167580170712e-05,
"loss": 0.753,
"step": 145
},
{
"epoch": 0.4672,
"grad_norm": 2.6297624111175537,
"learning_rate": 3.256575942308278e-05,
"loss": 0.6575,
"step": 146
},
{
"epoch": 0.4704,
"grad_norm": 3.077404022216797,
"learning_rate": 3.229889738426264e-05,
"loss": 0.7943,
"step": 147
},
{
"epoch": 0.4736,
"grad_norm": 3.0161232948303223,
"learning_rate": 3.203112304089466e-05,
"loss": 0.665,
"step": 148
},
{
"epoch": 0.4768,
"grad_norm": 3.0550014972686768,
"learning_rate": 3.176246986265767e-05,
"loss": 0.7091,
"step": 149
},
{
"epoch": 0.48,
"grad_norm": 3.3446528911590576,
"learning_rate": 3.149297142907792e-05,
"loss": 0.7586,
"step": 150
},
{
"epoch": 0.4832,
"grad_norm": 2.50750470161438,
"learning_rate": 3.122266142533191e-05,
"loss": 0.6006,
"step": 151
},
{
"epoch": 0.4864,
"grad_norm": 2.2805991172790527,
"learning_rate": 3.095157363803598e-05,
"loss": 0.6918,
"step": 152
},
{
"epoch": 0.4896,
"grad_norm": 2.982093572616577,
"learning_rate": 3.06797419510233e-05,
"loss": 0.6261,
"step": 153
},
{
"epoch": 0.4928,
"grad_norm": 3.545555591583252,
"learning_rate": 3.0407200341108617e-05,
"loss": 0.5964,
"step": 154
},
{
"epoch": 0.496,
"grad_norm": 3.1337227821350098,
"learning_rate": 3.013398287384144e-05,
"loss": 0.6798,
"step": 155
},
{
"epoch": 0.4992,
"grad_norm": 3.5548670291900635,
"learning_rate": 2.986012369924811e-05,
"loss": 0.72,
"step": 156
},
{
"epoch": 0.5024,
"grad_norm": 2.6750857830047607,
"learning_rate": 2.9585657047563315e-05,
"loss": 0.7358,
"step": 157
},
{
"epoch": 0.5056,
"grad_norm": 3.0403218269348145,
"learning_rate": 2.931061722495159e-05,
"loss": 0.6558,
"step": 158
},
{
"epoch": 0.5088,
"grad_norm": 2.5860700607299805,
"learning_rate": 2.9035038609219306e-05,
"loss": 0.6607,
"step": 159
},
{
"epoch": 0.512,
"grad_norm": 2.983950138092041,
"learning_rate": 2.875895564551772e-05,
"loss": 0.7403,
"step": 160
},
{
"epoch": 0.5152,
"grad_norm": 2.4601857662200928,
"learning_rate": 2.8482402842037614e-05,
"loss": 0.581,
"step": 161
},
{
"epoch": 0.5184,
"grad_norm": 2.6015350818634033,
"learning_rate": 2.8205414765696003e-05,
"loss": 0.6462,
"step": 162
},
{
"epoch": 0.5216,
"grad_norm": 3.2562873363494873,
"learning_rate": 2.792802603781562e-05,
"loss": 0.6778,
"step": 163
},
{
"epoch": 0.5248,
"grad_norm": 3.0855162143707275,
"learning_rate": 2.7650271329797427e-05,
"loss": 0.6187,
"step": 164
},
{
"epoch": 0.528,
"grad_norm": 2.281909942626953,
"learning_rate": 2.737218535878705e-05,
"loss": 0.6209,
"step": 165
},
{
"epoch": 0.5312,
"grad_norm": 2.689497232437134,
"learning_rate": 2.7093802883335357e-05,
"loss": 0.6529,
"step": 166
},
{
"epoch": 0.5344,
"grad_norm": 2.919034719467163,
"learning_rate": 2.6815158699053932e-05,
"loss": 0.6751,
"step": 167
},
{
"epoch": 0.5376,
"grad_norm": 2.32546067237854,
"learning_rate": 2.6536287634265918e-05,
"loss": 0.5795,
"step": 168
},
{
"epoch": 0.5408,
"grad_norm": 2.528740167617798,
"learning_rate": 2.6257224545652688e-05,
"loss": 0.6675,
"step": 169
},
{
"epoch": 0.544,
"grad_norm": 2.651212692260742,
"learning_rate": 2.5978004313897104e-05,
"loss": 0.6048,
"step": 170
},
{
"epoch": 0.5472,
"grad_norm": 2.5883514881134033,
"learning_rate": 2.569866183932368e-05,
"loss": 0.584,
"step": 171
},
{
"epoch": 0.5504,
"grad_norm": 2.2905187606811523,
"learning_rate": 2.5419232037536316e-05,
"loss": 0.6269,
"step": 172
},
{
"epoch": 0.5536,
"grad_norm": 3.2044761180877686,
"learning_rate": 2.5139749835054123e-05,
"loss": 0.6114,
"step": 173
},
{
"epoch": 0.5568,
"grad_norm": 2.599435806274414,
"learning_rate": 2.4860250164945876e-05,
"loss": 0.6962,
"step": 174
},
{
"epoch": 0.56,
"grad_norm": 2.6137402057647705,
"learning_rate": 2.4580767962463687e-05,
"loss": 0.614,
"step": 175
},
{
"epoch": 0.5632,
"grad_norm": 2.9657676219940186,
"learning_rate": 2.4301338160676324e-05,
"loss": 0.6616,
"step": 176
},
{
"epoch": 0.5664,
"grad_norm": 2.5792715549468994,
"learning_rate": 2.40219956861029e-05,
"loss": 0.6584,
"step": 177
},
{
"epoch": 0.5696,
"grad_norm": 2.874328851699829,
"learning_rate": 2.374277545434732e-05,
"loss": 0.6192,
"step": 178
},
{
"epoch": 0.5728,
"grad_norm": 2.860802412033081,
"learning_rate": 2.346371236573409e-05,
"loss": 0.7215,
"step": 179
},
{
"epoch": 0.576,
"grad_norm": 2.5315871238708496,
"learning_rate": 2.318484130094607e-05,
"loss": 0.6511,
"step": 180
},
{
"epoch": 0.5792,
"grad_norm": 2.4689619541168213,
"learning_rate": 2.2906197116664653e-05,
"loss": 0.5986,
"step": 181
},
{
"epoch": 0.5824,
"grad_norm": 2.6381800174713135,
"learning_rate": 2.262781464121296e-05,
"loss": 0.6328,
"step": 182
},
{
"epoch": 0.5856,
"grad_norm": 2.6276397705078125,
"learning_rate": 2.2349728670202582e-05,
"loss": 0.6409,
"step": 183
},
{
"epoch": 0.5888,
"grad_norm": 2.783932685852051,
"learning_rate": 2.2071973962184384e-05,
"loss": 0.6556,
"step": 184
},
{
"epoch": 0.592,
"grad_norm": 3.184654474258423,
"learning_rate": 2.1794585234303993e-05,
"loss": 0.6706,
"step": 185
},
{
"epoch": 0.5952,
"grad_norm": 2.7625272274017334,
"learning_rate": 2.1517597157962392e-05,
"loss": 0.5842,
"step": 186
},
{
"epoch": 0.5984,
"grad_norm": 2.9620823860168457,
"learning_rate": 2.124104435448228e-05,
"loss": 0.622,
"step": 187
},
{
"epoch": 0.6016,
"grad_norm": 3.4759445190429688,
"learning_rate": 2.0964961390780703e-05,
"loss": 0.6734,
"step": 188
},
{
"epoch": 0.6048,
"grad_norm": 2.639575481414795,
"learning_rate": 2.0689382775048418e-05,
"loss": 0.6005,
"step": 189
},
{
"epoch": 0.608,
"grad_norm": 2.5835771560668945,
"learning_rate": 2.0414342952436694e-05,
"loss": 0.6009,
"step": 190
},
{
"epoch": 0.6112,
"grad_norm": 2.8783106803894043,
"learning_rate": 2.0139876300751904e-05,
"loss": 0.6493,
"step": 191
},
{
"epoch": 0.6144,
"grad_norm": 2.6388046741485596,
"learning_rate": 1.9866017126158574e-05,
"loss": 0.591,
"step": 192
},
{
"epoch": 0.6176,
"grad_norm": 2.5275888442993164,
"learning_rate": 1.9592799658891385e-05,
"loss": 0.5525,
"step": 193
},
{
"epoch": 0.6208,
"grad_norm": 2.582974672317505,
"learning_rate": 1.9320258048976702e-05,
"loss": 0.5525,
"step": 194
},
{
"epoch": 0.624,
"grad_norm": 2.4387340545654297,
"learning_rate": 1.904842636196402e-05,
"loss": 0.6502,
"step": 195
},
{
"epoch": 0.6272,
"grad_norm": 3.4080708026885986,
"learning_rate": 1.8777338574668095e-05,
"loss": 0.5925,
"step": 196
},
{
"epoch": 0.6304,
"grad_norm": 2.7693426609039307,
"learning_rate": 1.850702857092208e-05,
"loss": 0.6189,
"step": 197
},
{
"epoch": 0.6336,
"grad_norm": 2.9749882221221924,
"learning_rate": 1.8237530137342335e-05,
"loss": 0.6079,
"step": 198
},
{
"epoch": 0.6368,
"grad_norm": 2.405189275741577,
"learning_rate": 1.796887695910535e-05,
"loss": 0.4882,
"step": 199
},
{
"epoch": 0.64,
"grad_norm": 2.856356143951416,
"learning_rate": 1.7701102615737368e-05,
"loss": 0.662,
"step": 200
},
{
"epoch": 0.6432,
"grad_norm": 3.5571553707122803,
"learning_rate": 1.7434240576917226e-05,
"loss": 0.6705,
"step": 201
},
{
"epoch": 0.6464,
"grad_norm": 2.9038398265838623,
"learning_rate": 1.7168324198292888e-05,
"loss": 0.647,
"step": 202
},
{
"epoch": 0.6496,
"grad_norm": 2.6858866214752197,
"learning_rate": 1.6903386717312236e-05,
"loss": 0.5854,
"step": 203
},
{
"epoch": 0.6528,
"grad_norm": 2.881903648376465,
"learning_rate": 1.6639461249068726e-05,
"loss": 0.6802,
"step": 204
},
{
"epoch": 0.656,
"grad_norm": 2.7893338203430176,
"learning_rate": 1.637658078216217e-05,
"loss": 0.6101,
"step": 205
},
{
"epoch": 0.6592,
"grad_norm": 2.7626793384552,
"learning_rate": 1.6114778174575473e-05,
"loss": 0.5851,
"step": 206
},
{
"epoch": 0.6624,
"grad_norm": 2.4826924800872803,
"learning_rate": 1.585408614956763e-05,
"loss": 0.5924,
"step": 207
},
{
"epoch": 0.6656,
"grad_norm": 2.7100701332092285,
"learning_rate": 1.559453729158361e-05,
"loss": 0.5931,
"step": 208
},
{
"epoch": 0.6688,
"grad_norm": 2.625746488571167,
"learning_rate": 1.5336164042181494e-05,
"loss": 0.5665,
"step": 209
},
{
"epoch": 0.672,
"grad_norm": 2.8717494010925293,
"learning_rate": 1.5078998695977586e-05,
"loss": 0.6568,
"step": 210
},
{
"epoch": 0.6752,
"grad_norm": 2.610119581222534,
"learning_rate": 1.482307339660983e-05,
"loss": 0.5354,
"step": 211
},
{
"epoch": 0.6784,
"grad_norm": 2.783958911895752,
"learning_rate": 1.4568420132720106e-05,
"loss": 0.5658,
"step": 212
},
{
"epoch": 0.6816,
"grad_norm": 2.3880062103271484,
"learning_rate": 1.4315070733955888e-05,
"loss": 0.5361,
"step": 213
},
{
"epoch": 0.6848,
"grad_norm": 2.3239946365356445,
"learning_rate": 1.4063056866991826e-05,
"loss": 0.5477,
"step": 214
},
{
"epoch": 0.688,
"grad_norm": 3.000854730606079,
"learning_rate": 1.381241003157162e-05,
"loss": 0.5555,
"step": 215
},
{
"epoch": 0.6912,
"grad_norm": 2.6521692276000977,
"learning_rate": 1.3563161556570826e-05,
"loss": 0.5702,
"step": 216
},
{
"epoch": 0.6944,
"grad_norm": 2.789949417114258,
"learning_rate": 1.3315342596080996e-05,
"loss": 0.5894,
"step": 217
},
{
"epoch": 0.6976,
"grad_norm": 2.630718231201172,
"learning_rate": 1.3068984125515644e-05,
"loss": 0.5601,
"step": 218
},
{
"epoch": 0.7008,
"grad_norm": 2.7398900985717773,
"learning_rate": 1.2824116937738579e-05,
"loss": 0.6077,
"step": 219
},
{
"epoch": 0.704,
"grad_norm": 2.9107697010040283,
"learning_rate": 1.2580771639215027e-05,
"loss": 0.5389,
"step": 220
},
{
"epoch": 0.7072,
"grad_norm": 5.506183624267578,
"learning_rate": 1.2338978646186084e-05,
"loss": 0.565,
"step": 221
},
{
"epoch": 0.7104,
"grad_norm": 2.896214008331299,
"learning_rate": 1.2098768180866895e-05,
"loss": 0.5993,
"step": 222
},
{
"epoch": 0.7136,
"grad_norm": 2.5421102046966553,
"learning_rate": 1.1860170267669174e-05,
"loss": 0.6117,
"step": 223
},
{
"epoch": 0.7168,
"grad_norm": 3.075676679611206,
"learning_rate": 1.1623214729448317e-05,
"loss": 0.5713,
"step": 224
},
{
"epoch": 0.72,
"grad_norm": 2.6561267375946045,
"learning_rate": 1.1387931183775822e-05,
"loss": 0.553,
"step": 225
},
{
"epoch": 0.7232,
"grad_norm": 2.5555975437164307,
"learning_rate": 1.1154349039237322e-05,
"loss": 0.5496,
"step": 226
},
{
"epoch": 0.7264,
"grad_norm": 2.842125177383423,
"learning_rate": 1.0922497491756734e-05,
"loss": 0.6698,
"step": 227
},
{
"epoch": 0.7296,
"grad_norm": 3.4003820419311523,
"learning_rate": 1.0692405520947028e-05,
"loss": 0.6218,
"step": 228
},
{
"epoch": 0.7328,
"grad_norm": 2.400970697402954,
"learning_rate": 1.0464101886487958e-05,
"loss": 0.5709,
"step": 229
},
{
"epoch": 0.736,
"grad_norm": 2.4426651000976562,
"learning_rate": 1.0237615124531363e-05,
"loss": 0.5921,
"step": 230
},
{
"epoch": 0.7392,
"grad_norm": 3.67607045173645,
"learning_rate": 1.0012973544134358e-05,
"loss": 0.5904,
"step": 231
},
{
"epoch": 0.7424,
"grad_norm": 2.6775803565979004,
"learning_rate": 9.79020522372093e-06,
"loss": 0.6506,
"step": 232
},
{
"epoch": 0.7456,
"grad_norm": 2.234372854232788,
"learning_rate": 9.569338007572382e-06,
"loss": 0.5061,
"step": 233
},
{
"epoch": 0.7488,
"grad_norm": 3.26226806640625,
"learning_rate": 9.35039950234696e-06,
"loss": 0.5994,
"step": 234
},
{
"epoch": 0.752,
"grad_norm": 2.563432216644287,
"learning_rate": 9.133417073629289e-06,
"loss": 0.5684,
"step": 235
},
{
"epoch": 0.7552,
"grad_norm": 2.5276975631713867,
"learning_rate": 8.918417842509867e-06,
"loss": 0.5555,
"step": 236
},
{
"epoch": 0.7584,
"grad_norm": 2.4704692363739014,
"learning_rate": 8.705428682195155e-06,
"loss": 0.5411,
"step": 237
},
{
"epoch": 0.7616,
"grad_norm": 2.7508208751678467,
"learning_rate": 8.494476214648626e-06,
"loss": 0.6805,
"step": 238
},
{
"epoch": 0.7648,
"grad_norm": 2.4423398971557617,
"learning_rate": 8.285586807263254e-06,
"loss": 0.5083,
"step": 239
},
{
"epoch": 0.768,
"grad_norm": 3.0647759437561035,
"learning_rate": 8.078786569565763e-06,
"loss": 0.6004,
"step": 240
},
{
"epoch": 0.7712,
"grad_norm": 2.466031312942505,
"learning_rate": 7.874101349953167e-06,
"loss": 0.5198,
"step": 241
},
{
"epoch": 0.7744,
"grad_norm": 2.9429163932800293,
"learning_rate": 7.671556732461905e-06,
"loss": 0.5538,
"step": 242
},
{
"epoch": 0.7776,
"grad_norm": 2.379636526107788,
"learning_rate": 7.471178033570081e-06,
"loss": 0.4544,
"step": 243
},
{
"epoch": 0.7808,
"grad_norm": 2.734103202819824,
"learning_rate": 7.272990299033045e-06,
"loss": 0.541,
"step": 244
},
{
"epoch": 0.784,
"grad_norm": 2.27902889251709,
"learning_rate": 7.077018300752916e-06,
"loss": 0.5235,
"step": 245
},
{
"epoch": 0.7872,
"grad_norm": 2.284369468688965,
"learning_rate": 6.883286533682265e-06,
"loss": 0.49,
"step": 246
},
{
"epoch": 0.7904,
"grad_norm": 2.76302170753479,
"learning_rate": 6.691819212762454e-06,
"loss": 0.6323,
"step": 247
},
{
"epoch": 0.7936,
"grad_norm": 3.0082130432128906,
"learning_rate": 6.502640269896953e-06,
"loss": 0.503,
"step": 248
},
{
"epoch": 0.7968,
"grad_norm": 2.735734462738037,
"learning_rate": 6.3157733509600355e-06,
"loss": 0.5215,
"step": 249
},
{
"epoch": 0.8,
"grad_norm": 3.3777284622192383,
"learning_rate": 6.1312418128412565e-06,
"loss": 0.5809,
"step": 250
},
{
"epoch": 0.8032,
"grad_norm": 2.971823215484619,
"learning_rate": 5.949068720525991e-06,
"loss": 0.5922,
"step": 251
},
{
"epoch": 0.8064,
"grad_norm": 2.5565667152404785,
"learning_rate": 5.769276844212501e-06,
"loss": 0.5451,
"step": 252
},
{
"epoch": 0.8096,
"grad_norm": 2.3006818294525146,
"learning_rate": 5.591888656465874e-06,
"loss": 0.4815,
"step": 253
},
{
"epoch": 0.8128,
"grad_norm": 3.275210380554199,
"learning_rate": 5.416926329409083e-06,
"loss": 0.5855,
"step": 254
},
{
"epoch": 0.816,
"grad_norm": 2.7305386066436768,
"learning_rate": 5.244411731951671e-06,
"loss": 0.5612,
"step": 255
},
{
"epoch": 0.8192,
"grad_norm": 2.7912111282348633,
"learning_rate": 5.074366427056309e-06,
"loss": 0.5502,
"step": 256
},
{
"epoch": 0.8224,
"grad_norm": 2.3598272800445557,
"learning_rate": 4.90681166904359e-06,
"loss": 0.4626,
"step": 257
},
{
"epoch": 0.8256,
"grad_norm": 2.215691566467285,
"learning_rate": 4.741768400935417e-06,
"loss": 0.5392,
"step": 258
},
{
"epoch": 0.8288,
"grad_norm": 3.136962652206421,
"learning_rate": 4.579257251837271e-06,
"loss": 0.6118,
"step": 259
},
{
"epoch": 0.832,
"grad_norm": 2.4404520988464355,
"learning_rate": 4.419298534359759e-06,
"loss": 0.5114,
"step": 260
},
{
"epoch": 0.8352,
"grad_norm": 2.8911097049713135,
"learning_rate": 4.261912242079674e-06,
"loss": 0.5594,
"step": 261
},
{
"epoch": 0.8384,
"grad_norm": 2.625516653060913,
"learning_rate": 4.107118047040995e-06,
"loss": 0.5123,
"step": 262
},
{
"epoch": 0.8416,
"grad_norm": 3.0125982761383057,
"learning_rate": 3.954935297295975e-06,
"loss": 0.5648,
"step": 263
},
{
"epoch": 0.8448,
"grad_norm": 2.3485519886016846,
"learning_rate": 3.8053830144868547e-06,
"loss": 0.5347,
"step": 264
},
{
"epoch": 0.848,
"grad_norm": 3.10200572013855,
"learning_rate": 3.6584798914682582e-06,
"loss": 0.4884,
"step": 265
},
{
"epoch": 0.8512,
"grad_norm": 2.7132773399353027,
"learning_rate": 3.514244289970753e-06,
"loss": 0.5918,
"step": 266
},
{
"epoch": 0.8544,
"grad_norm": 2.857553005218506,
"learning_rate": 3.3726942383057763e-06,
"loss": 0.6052,
"step": 267
},
{
"epoch": 0.8576,
"grad_norm": 2.5855906009674072,
"learning_rate": 3.233847429112244e-06,
"loss": 0.5894,
"step": 268
},
{
"epoch": 0.8608,
"grad_norm": 2.1608877182006836,
"learning_rate": 3.0977212171451e-06,
"loss": 0.4059,
"step": 269
},
{
"epoch": 0.864,
"grad_norm": 2.8646678924560547,
"learning_rate": 2.9643326171061165e-06,
"loss": 0.5117,
"step": 270
},
{
"epoch": 0.8672,
"grad_norm": 3.1131443977355957,
"learning_rate": 2.833698301517185e-06,
"loss": 0.5713,
"step": 271
},
{
"epoch": 0.8704,
"grad_norm": 2.588733434677124,
"learning_rate": 2.7058345986363974e-06,
"loss": 0.5429,
"step": 272
},
{
"epoch": 0.8736,
"grad_norm": 3.296912908554077,
"learning_rate": 2.5807574904171155e-06,
"loss": 0.5944,
"step": 273
},
{
"epoch": 0.8768,
"grad_norm": 2.6463258266448975,
"learning_rate": 2.4584826105103764e-06,
"loss": 0.5025,
"step": 274
},
{
"epoch": 0.88,
"grad_norm": 2.728508710861206,
"learning_rate": 2.3390252423108076e-06,
"loss": 0.514,
"step": 275
},
{
"epoch": 0.8832,
"grad_norm": 2.978658437728882,
"learning_rate": 2.222400317046308e-06,
"loss": 0.5277,
"step": 276
},
{
"epoch": 0.8864,
"grad_norm": 2.75089955329895,
"learning_rate": 2.108622411911773e-06,
"loss": 0.5267,
"step": 277
},
{
"epoch": 0.8896,
"grad_norm": 2.917853593826294,
"learning_rate": 1.997705748247067e-06,
"loss": 0.5621,
"step": 278
},
{
"epoch": 0.8928,
"grad_norm": 2.567577838897705,
"learning_rate": 1.8896641897594492e-06,
"loss": 0.5322,
"step": 279
},
{
"epoch": 0.896,
"grad_norm": 2.7284419536590576,
"learning_rate": 1.78451124079074e-06,
"loss": 0.5843,
"step": 280
},
{
"epoch": 0.8992,
"grad_norm": 3.4555678367614746,
"learning_rate": 1.6822600446293636e-06,
"loss": 0.5511,
"step": 281
},
{
"epoch": 0.9024,
"grad_norm": 2.928220272064209,
"learning_rate": 1.5829233818675766e-06,
"loss": 0.5303,
"step": 282
},
{
"epoch": 0.9056,
"grad_norm": 2.5740914344787598,
"learning_rate": 1.486513668803946e-06,
"loss": 0.5162,
"step": 283
},
{
"epoch": 0.9088,
"grad_norm": 3.3031325340270996,
"learning_rate": 1.3930429558914494e-06,
"loss": 0.5586,
"step": 284
},
{
"epoch": 0.912,
"grad_norm": 3.0398950576782227,
"learning_rate": 1.3025229262312366e-06,
"loss": 0.5671,
"step": 285
},
{
"epoch": 0.9152,
"grad_norm": 2.4170124530792236,
"learning_rate": 1.214964894112361e-06,
"loss": 0.4867,
"step": 286
},
{
"epoch": 0.9184,
"grad_norm": 2.8461062908172607,
"learning_rate": 1.1303798035975643e-06,
"loss": 0.528,
"step": 287
},
{
"epoch": 0.9216,
"grad_norm": 2.772353172302246,
"learning_rate": 1.0487782271553504e-06,
"loss": 0.5361,
"step": 288
},
{
"epoch": 0.9248,
"grad_norm": 2.490363597869873,
"learning_rate": 9.701703643385295e-07,
"loss": 0.5397,
"step": 289
},
{
"epoch": 0.928,
"grad_norm": 2.0275142192840576,
"learning_rate": 8.94566040509337e-07,
"loss": 0.4032,
"step": 290
},
{
"epoch": 0.9312,
"grad_norm": 2.7005605697631836,
"learning_rate": 8.219747056113586e-07,
"loss": 0.528,
"step": 291
},
{
"epoch": 0.9344,
"grad_norm": 2.321662187576294,
"learning_rate": 7.524054329883346e-07,
"loss": 0.4782,
"step": 292
},
{
"epoch": 0.9376,
"grad_norm": 3.391035318374634,
"learning_rate": 6.858669182500971e-07,
"loss": 0.5802,
"step": 293
},
{
"epoch": 0.9408,
"grad_norm": 2.873091697692871,
"learning_rate": 6.223674781856592e-07,
"loss": 0.6049,
"step": 294
},
{
"epoch": 0.944,
"grad_norm": 3.041048288345337,
"learning_rate": 5.619150497236992e-07,
"loss": 0.5748,
"step": 295
},
{
"epoch": 0.9472,
"grad_norm": 2.43678617477417,
"learning_rate": 5.045171889404954e-07,
"loss": 0.4379,
"step": 296
},
{
"epoch": 0.9504,
"grad_norm": 2.705655336380005,
"learning_rate": 4.501810701154907e-07,
"loss": 0.4788,
"step": 297
},
{
"epoch": 0.9536,
"grad_norm": 3.0620837211608887,
"learning_rate": 3.98913484834551e-07,
"loss": 0.5725,
"step": 298
},
{
"epoch": 0.9568,
"grad_norm": 2.623995542526245,
"learning_rate": 3.507208411410778e-07,
"loss": 0.509,
"step": 299
},
{
"epoch": 0.96,
"grad_norm": 3.280472755432129,
"learning_rate": 3.0560916273504325e-07,
"loss": 0.5453,
"step": 300
},
{
"epoch": 0.9632,
"grad_norm": 2.764864683151245,
"learning_rate": 2.635840882200924e-07,
"loss": 0.4987,
"step": 301
},
{
"epoch": 0.9664,
"grad_norm": 2.27424955368042,
"learning_rate": 2.246508703987543e-07,
"loss": 0.4541,
"step": 302
},
{
"epoch": 0.9696,
"grad_norm": 2.5994205474853516,
"learning_rate": 1.8881437561586722e-07,
"loss": 0.5156,
"step": 303
},
{
"epoch": 0.9728,
"grad_norm": 2.2249081134796143,
"learning_rate": 1.5607908315035667e-07,
"loss": 0.3998,
"step": 304
},
{
"epoch": 0.976,
"grad_norm": 2.5577194690704346,
"learning_rate": 1.264490846553279e-07,
"loss": 0.47,
"step": 305
},
{
"epoch": 0.9792,
"grad_norm": 2.661684036254883,
"learning_rate": 9.992808364666373e-08,
"loss": 0.5783,
"step": 306
},
{
"epoch": 0.9824,
"grad_norm": 2.729504346847534,
"learning_rate": 7.651939504010885e-08,
"loss": 0.5448,
"step": 307
},
{
"epoch": 0.9856,
"grad_norm": 2.986076831817627,
"learning_rate": 5.622594473692067e-08,
"loss": 0.5982,
"step": 308
},
{
"epoch": 0.9888,
"grad_norm": 3.096761703491211,
"learning_rate": 3.90502692581729e-08,
"loss": 0.5652,
"step": 309
},
{
"epoch": 0.992,
"grad_norm": 2.4944214820861816,
"learning_rate": 2.4994515427695374e-08,
"loss": 0.4978,
"step": 310
},
{
"epoch": 0.9952,
"grad_norm": 2.4917850494384766,
"learning_rate": 1.4060440103746964e-08,
"loss": 0.537,
"step": 311
},
{
"epoch": 0.9984,
"grad_norm": 2.160087823867798,
"learning_rate": 6.249409959421803e-09,
"loss": 0.4661,
"step": 312
},
{
"epoch": 1.0,
"grad_norm": 4.158653259277344,
"learning_rate": 1.5624013118137326e-09,
"loss": 0.5729,
"step": 313
}
],
"logging_steps": 1,
"max_steps": 313,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.2702536303312896e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}