Mistral_7b_Tex / trainer_state.json
JK-TK's picture
Upload trained model and tokenizer πŸš€
414227b verified
raw
history blame
25.5 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984,
"eval_steps": 500,
"global_step": 156,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0064,
"grad_norm": 4.704057693481445,
"learning_rate": 0.0,
"loss": 3.1525,
"step": 1
},
{
"epoch": 0.0128,
"grad_norm": 5.1675567626953125,
"learning_rate": 3.125e-06,
"loss": 3.0046,
"step": 2
},
{
"epoch": 0.0192,
"grad_norm": 4.877377986907959,
"learning_rate": 6.25e-06,
"loss": 3.0847,
"step": 3
},
{
"epoch": 0.0256,
"grad_norm": 3.2780067920684814,
"learning_rate": 9.375000000000001e-06,
"loss": 2.429,
"step": 4
},
{
"epoch": 0.032,
"grad_norm": 5.668214797973633,
"learning_rate": 1.25e-05,
"loss": 3.2744,
"step": 5
},
{
"epoch": 0.0384,
"grad_norm": 3.768420934677124,
"learning_rate": 1.5625e-05,
"loss": 2.6841,
"step": 6
},
{
"epoch": 0.0448,
"grad_norm": 4.710444927215576,
"learning_rate": 1.8750000000000002e-05,
"loss": 2.9005,
"step": 7
},
{
"epoch": 0.0512,
"grad_norm": 4.031246662139893,
"learning_rate": 2.1875e-05,
"loss": 2.9428,
"step": 8
},
{
"epoch": 0.0576,
"grad_norm": 5.702942848205566,
"learning_rate": 2.5e-05,
"loss": 3.152,
"step": 9
},
{
"epoch": 0.064,
"grad_norm": 4.755662441253662,
"learning_rate": 2.8125000000000003e-05,
"loss": 2.9298,
"step": 10
},
{
"epoch": 0.0704,
"grad_norm": 5.924014568328857,
"learning_rate": 3.125e-05,
"loss": 2.9489,
"step": 11
},
{
"epoch": 0.0768,
"grad_norm": 3.938117265701294,
"learning_rate": 3.4375e-05,
"loss": 2.7606,
"step": 12
},
{
"epoch": 0.0832,
"grad_norm": 3.8817155361175537,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.7327,
"step": 13
},
{
"epoch": 0.0896,
"grad_norm": 3.159653425216675,
"learning_rate": 4.0625000000000005e-05,
"loss": 2.3963,
"step": 14
},
{
"epoch": 0.096,
"grad_norm": 3.476162910461426,
"learning_rate": 4.375e-05,
"loss": 2.6892,
"step": 15
},
{
"epoch": 0.1024,
"grad_norm": 4.5705180168151855,
"learning_rate": 4.6875e-05,
"loss": 2.8817,
"step": 16
},
{
"epoch": 0.1088,
"grad_norm": 4.219945430755615,
"learning_rate": 5e-05,
"loss": 2.2068,
"step": 17
},
{
"epoch": 0.1152,
"grad_norm": 4.645051956176758,
"learning_rate": 4.9993705873562665e-05,
"loss": 2.7778,
"step": 18
},
{
"epoch": 0.1216,
"grad_norm": 3.949077606201172,
"learning_rate": 4.997482666353287e-05,
"loss": 2.6612,
"step": 19
},
{
"epoch": 0.128,
"grad_norm": 3.4730210304260254,
"learning_rate": 4.99433718761614e-05,
"loss": 2.3016,
"step": 20
},
{
"epoch": 0.1344,
"grad_norm": 3.7095143795013428,
"learning_rate": 4.989935734988098e-05,
"loss": 2.101,
"step": 21
},
{
"epoch": 0.1408,
"grad_norm": 3.7890281677246094,
"learning_rate": 4.984280524733107e-05,
"loss": 2.7039,
"step": 22
},
{
"epoch": 0.1472,
"grad_norm": 2.9922304153442383,
"learning_rate": 4.977374404419837e-05,
"loss": 2.2395,
"step": 23
},
{
"epoch": 0.1536,
"grad_norm": 3.832943916320801,
"learning_rate": 4.9692208514878444e-05,
"loss": 1.8696,
"step": 24
},
{
"epoch": 0.16,
"grad_norm": 3.6049652099609375,
"learning_rate": 4.959823971496574e-05,
"loss": 2.3086,
"step": 25
},
{
"epoch": 0.1664,
"grad_norm": 3.174640655517578,
"learning_rate": 4.9491884960580894e-05,
"loss": 2.3692,
"step": 26
},
{
"epoch": 0.1728,
"grad_norm": 3.7016489505767822,
"learning_rate": 4.937319780454559e-05,
"loss": 2.0063,
"step": 27
},
{
"epoch": 0.1792,
"grad_norm": 3.894601583480835,
"learning_rate": 4.9242238009417175e-05,
"loss": 2.7018,
"step": 28
},
{
"epoch": 0.1856,
"grad_norm": 2.9560084342956543,
"learning_rate": 4.909907151739633e-05,
"loss": 2.1114,
"step": 29
},
{
"epoch": 0.192,
"grad_norm": 3.3287124633789062,
"learning_rate": 4.894377041712326e-05,
"loss": 2.2772,
"step": 30
},
{
"epoch": 0.1984,
"grad_norm": 3.333181142807007,
"learning_rate": 4.877641290737884e-05,
"loss": 2.0911,
"step": 31
},
{
"epoch": 0.2048,
"grad_norm": 3.5498669147491455,
"learning_rate": 4.8597083257709194e-05,
"loss": 2.4118,
"step": 32
},
{
"epoch": 0.2112,
"grad_norm": 3.9147493839263916,
"learning_rate": 4.8405871765993433e-05,
"loss": 2.1987,
"step": 33
},
{
"epoch": 0.2176,
"grad_norm": 3.2478370666503906,
"learning_rate": 4.820287471297598e-05,
"loss": 2.2038,
"step": 34
},
{
"epoch": 0.224,
"grad_norm": 3.533186435699463,
"learning_rate": 4.7988194313786275e-05,
"loss": 2.4584,
"step": 35
},
{
"epoch": 0.2304,
"grad_norm": 3.903890609741211,
"learning_rate": 4.7761938666470403e-05,
"loss": 2.5773,
"step": 36
},
{
"epoch": 0.2368,
"grad_norm": 3.5339035987854004,
"learning_rate": 4.752422169756048e-05,
"loss": 2.3672,
"step": 37
},
{
"epoch": 0.2432,
"grad_norm": 3.715303897857666,
"learning_rate": 4.72751631047092e-05,
"loss": 2.6468,
"step": 38
},
{
"epoch": 0.2496,
"grad_norm": 3.1525583267211914,
"learning_rate": 4.701488829641845e-05,
"loss": 2.0683,
"step": 39
},
{
"epoch": 0.256,
"grad_norm": 3.6886160373687744,
"learning_rate": 4.674352832889239e-05,
"loss": 2.0946,
"step": 40
},
{
"epoch": 0.2624,
"grad_norm": 3.839430570602417,
"learning_rate": 4.6461219840046654e-05,
"loss": 2.1582,
"step": 41
},
{
"epoch": 0.2688,
"grad_norm": 3.764770269393921,
"learning_rate": 4.6168104980707107e-05,
"loss": 2.2363,
"step": 42
},
{
"epoch": 0.2752,
"grad_norm": 2.832568407058716,
"learning_rate": 4.586433134303257e-05,
"loss": 1.837,
"step": 43
},
{
"epoch": 0.2816,
"grad_norm": 3.857421875,
"learning_rate": 4.5550051886197754e-05,
"loss": 2.4129,
"step": 44
},
{
"epoch": 0.288,
"grad_norm": 4.4628682136535645,
"learning_rate": 4.522542485937369e-05,
"loss": 2.2797,
"step": 45
},
{
"epoch": 0.2944,
"grad_norm": 2.5777652263641357,
"learning_rate": 4.489061372204453e-05,
"loss": 2.0681,
"step": 46
},
{
"epoch": 0.3008,
"grad_norm": 3.1532561779022217,
"learning_rate": 4.454578706170075e-05,
"loss": 1.953,
"step": 47
},
{
"epoch": 0.3072,
"grad_norm": 2.7607555389404297,
"learning_rate": 4.419111850895028e-05,
"loss": 1.5028,
"step": 48
},
{
"epoch": 0.3136,
"grad_norm": 2.608351945877075,
"learning_rate": 4.382678665009028e-05,
"loss": 2.0067,
"step": 49
},
{
"epoch": 0.32,
"grad_norm": 3.18412184715271,
"learning_rate": 4.345297493718352e-05,
"loss": 2.1055,
"step": 50
},
{
"epoch": 0.3264,
"grad_norm": 2.8627803325653076,
"learning_rate": 4.306987159568479e-05,
"loss": 1.9178,
"step": 51
},
{
"epoch": 0.3328,
"grad_norm": 4.6859965324401855,
"learning_rate": 4.267766952966369e-05,
"loss": 2.7699,
"step": 52
},
{
"epoch": 0.3392,
"grad_norm": 3.795930862426758,
"learning_rate": 4.227656622467162e-05,
"loss": 2.0893,
"step": 53
},
{
"epoch": 0.3456,
"grad_norm": 3.7162909507751465,
"learning_rate": 4.186676364830186e-05,
"loss": 1.8699,
"step": 54
},
{
"epoch": 0.352,
"grad_norm": 2.9527604579925537,
"learning_rate": 4.144846814849282e-05,
"loss": 1.6775,
"step": 55
},
{
"epoch": 0.3584,
"grad_norm": 3.3146612644195557,
"learning_rate": 4.10218903496256e-05,
"loss": 2.1451,
"step": 56
},
{
"epoch": 0.3648,
"grad_norm": 3.666790723800659,
"learning_rate": 4.058724504646834e-05,
"loss": 2.5592,
"step": 57
},
{
"epoch": 0.3712,
"grad_norm": 3.4549150466918945,
"learning_rate": 4.01447510960205e-05,
"loss": 1.9437,
"step": 58
},
{
"epoch": 0.3776,
"grad_norm": 3.1792612075805664,
"learning_rate": 3.969463130731183e-05,
"loss": 1.9685,
"step": 59
},
{
"epoch": 0.384,
"grad_norm": 3.402393341064453,
"learning_rate": 3.92371123292113e-05,
"loss": 2.4101,
"step": 60
},
{
"epoch": 0.3904,
"grad_norm": 2.981243848800659,
"learning_rate": 3.8772424536302564e-05,
"loss": 1.618,
"step": 61
},
{
"epoch": 0.3968,
"grad_norm": 3.2454357147216797,
"learning_rate": 3.830080191288342e-05,
"loss": 2.2354,
"step": 62
},
{
"epoch": 0.4032,
"grad_norm": 2.588693618774414,
"learning_rate": 3.782248193514766e-05,
"loss": 1.9035,
"step": 63
},
{
"epoch": 0.4096,
"grad_norm": 3.0176501274108887,
"learning_rate": 3.7337705451608674e-05,
"loss": 1.8453,
"step": 64
},
{
"epoch": 0.416,
"grad_norm": 2.457637071609497,
"learning_rate": 3.6846716561824965e-05,
"loss": 1.8545,
"step": 65
},
{
"epoch": 0.4224,
"grad_norm": 3.0648324489593506,
"learning_rate": 3.634976249348867e-05,
"loss": 1.7224,
"step": 66
},
{
"epoch": 0.4288,
"grad_norm": 3.6240382194519043,
"learning_rate": 3.5847093477938956e-05,
"loss": 2.0639,
"step": 67
},
{
"epoch": 0.4352,
"grad_norm": 2.4212806224823,
"learning_rate": 3.533896262416302e-05,
"loss": 1.6711,
"step": 68
},
{
"epoch": 0.4416,
"grad_norm": 2.8671603202819824,
"learning_rate": 3.4825625791348096e-05,
"loss": 1.8226,
"step": 69
},
{
"epoch": 0.448,
"grad_norm": 2.922027349472046,
"learning_rate": 3.4307341460048633e-05,
"loss": 1.4442,
"step": 70
},
{
"epoch": 0.4544,
"grad_norm": 4.1288838386535645,
"learning_rate": 3.378437060203357e-05,
"loss": 2.022,
"step": 71
},
{
"epoch": 0.4608,
"grad_norm": 3.45540189743042,
"learning_rate": 3.3256976548879184e-05,
"loss": 2.1066,
"step": 72
},
{
"epoch": 0.4672,
"grad_norm": 2.9696531295776367,
"learning_rate": 3.272542485937369e-05,
"loss": 1.9677,
"step": 73
},
{
"epoch": 0.4736,
"grad_norm": 2.752713680267334,
"learning_rate": 3.218998318580043e-05,
"loss": 1.537,
"step": 74
},
{
"epoch": 0.48,
"grad_norm": 2.8638994693756104,
"learning_rate": 3.165092113916688e-05,
"loss": 1.7642,
"step": 75
},
{
"epoch": 0.4864,
"grad_norm": 2.7981038093566895,
"learning_rate": 3.110851015344735e-05,
"loss": 1.7863,
"step": 76
},
{
"epoch": 0.4928,
"grad_norm": 3.0902326107025146,
"learning_rate": 3.056302334890786e-05,
"loss": 2.0919,
"step": 77
},
{
"epoch": 0.4992,
"grad_norm": 2.8343629837036133,
"learning_rate": 3.0014735394581823e-05,
"loss": 2.2425,
"step": 78
},
{
"epoch": 0.5056,
"grad_norm": 2.7442476749420166,
"learning_rate": 2.9463922369965917e-05,
"loss": 1.5119,
"step": 79
},
{
"epoch": 0.512,
"grad_norm": 3.2816710472106934,
"learning_rate": 2.8910861626005776e-05,
"loss": 2.4317,
"step": 80
},
{
"epoch": 0.5184,
"grad_norm": 2.9412364959716797,
"learning_rate": 2.8355831645441388e-05,
"loss": 1.7199,
"step": 81
},
{
"epoch": 0.5248,
"grad_norm": 2.4085612297058105,
"learning_rate": 2.7799111902582696e-05,
"loss": 1.6436,
"step": 82
},
{
"epoch": 0.5312,
"grad_norm": 3.3494606018066406,
"learning_rate": 2.724098272258584e-05,
"loss": 1.9426,
"step": 83
},
{
"epoch": 0.5376,
"grad_norm": 3.2767434120178223,
"learning_rate": 2.6681725140300997e-05,
"loss": 1.7712,
"step": 84
},
{
"epoch": 0.544,
"grad_norm": 3.9682724475860596,
"learning_rate": 2.6121620758762877e-05,
"loss": 2.0355,
"step": 85
},
{
"epoch": 0.5504,
"grad_norm": 2.4859561920166016,
"learning_rate": 2.556095160739513e-05,
"loss": 1.7458,
"step": 86
},
{
"epoch": 0.5568,
"grad_norm": 3.25398588180542,
"learning_rate": 2.5e-05,
"loss": 1.7167,
"step": 87
},
{
"epoch": 0.5632,
"grad_norm": 2.7223641872406006,
"learning_rate": 2.443904839260488e-05,
"loss": 1.8083,
"step": 88
},
{
"epoch": 0.5696,
"grad_norm": 3.3979554176330566,
"learning_rate": 2.3878379241237136e-05,
"loss": 1.8491,
"step": 89
},
{
"epoch": 0.576,
"grad_norm": 2.537713050842285,
"learning_rate": 2.331827485969901e-05,
"loss": 1.4982,
"step": 90
},
{
"epoch": 0.5824,
"grad_norm": 2.481809616088867,
"learning_rate": 2.2759017277414166e-05,
"loss": 1.6553,
"step": 91
},
{
"epoch": 0.5888,
"grad_norm": 2.704338312149048,
"learning_rate": 2.2200888097417307e-05,
"loss": 1.3204,
"step": 92
},
{
"epoch": 0.5952,
"grad_norm": 3.5501091480255127,
"learning_rate": 2.164416835455862e-05,
"loss": 2.2412,
"step": 93
},
{
"epoch": 0.6016,
"grad_norm": 2.8751282691955566,
"learning_rate": 2.1089138373994223e-05,
"loss": 1.6818,
"step": 94
},
{
"epoch": 0.608,
"grad_norm": 3.134270191192627,
"learning_rate": 2.0536077630034086e-05,
"loss": 1.7345,
"step": 95
},
{
"epoch": 0.6144,
"grad_norm": 3.4039204120635986,
"learning_rate": 1.9985264605418183e-05,
"loss": 1.8876,
"step": 96
},
{
"epoch": 0.6208,
"grad_norm": 2.858107566833496,
"learning_rate": 1.9436976651092144e-05,
"loss": 1.3758,
"step": 97
},
{
"epoch": 0.6272,
"grad_norm": 3.3995652198791504,
"learning_rate": 1.8891489846552646e-05,
"loss": 1.7547,
"step": 98
},
{
"epoch": 0.6336,
"grad_norm": 3.344923257827759,
"learning_rate": 1.8349078860833123e-05,
"loss": 1.6181,
"step": 99
},
{
"epoch": 0.64,
"grad_norm": 2.736323118209839,
"learning_rate": 1.781001681419957e-05,
"loss": 1.3949,
"step": 100
},
{
"epoch": 0.6464,
"grad_norm": 3.303896427154541,
"learning_rate": 1.7274575140626318e-05,
"loss": 1.6077,
"step": 101
},
{
"epoch": 0.6528,
"grad_norm": 2.998396158218384,
"learning_rate": 1.6743023451120832e-05,
"loss": 1.613,
"step": 102
},
{
"epoch": 0.6592,
"grad_norm": 4.653803825378418,
"learning_rate": 1.621562939796643e-05,
"loss": 1.9022,
"step": 103
},
{
"epoch": 0.6656,
"grad_norm": 2.9627840518951416,
"learning_rate": 1.5692658539951372e-05,
"loss": 1.6514,
"step": 104
},
{
"epoch": 0.672,
"grad_norm": 4.12007474899292,
"learning_rate": 1.5174374208651912e-05,
"loss": 2.0834,
"step": 105
},
{
"epoch": 0.6784,
"grad_norm": 3.2830817699432373,
"learning_rate": 1.466103737583699e-05,
"loss": 1.7247,
"step": 106
},
{
"epoch": 0.6848,
"grad_norm": 2.853792667388916,
"learning_rate": 1.4152906522061048e-05,
"loss": 1.6993,
"step": 107
},
{
"epoch": 0.6912,
"grad_norm": 2.8175644874572754,
"learning_rate": 1.3650237506511331e-05,
"loss": 1.5805,
"step": 108
},
{
"epoch": 0.6976,
"grad_norm": 2.7665114402770996,
"learning_rate": 1.3153283438175034e-05,
"loss": 1.4819,
"step": 109
},
{
"epoch": 0.704,
"grad_norm": 2.951678514480591,
"learning_rate": 1.2662294548391328e-05,
"loss": 1.4978,
"step": 110
},
{
"epoch": 0.7104,
"grad_norm": 3.3238184452056885,
"learning_rate": 1.217751806485235e-05,
"loss": 1.8354,
"step": 111
},
{
"epoch": 0.7168,
"grad_norm": 2.4784748554229736,
"learning_rate": 1.1699198087116589e-05,
"loss": 1.3185,
"step": 112
},
{
"epoch": 0.7232,
"grad_norm": 3.491586923599243,
"learning_rate": 1.122757546369744e-05,
"loss": 2.1179,
"step": 113
},
{
"epoch": 0.7296,
"grad_norm": 3.4382336139678955,
"learning_rate": 1.0762887670788702e-05,
"loss": 1.7461,
"step": 114
},
{
"epoch": 0.736,
"grad_norm": 3.145657777786255,
"learning_rate": 1.0305368692688174e-05,
"loss": 1.5797,
"step": 115
},
{
"epoch": 0.7424,
"grad_norm": 3.600944757461548,
"learning_rate": 9.855248903979506e-06,
"loss": 1.5077,
"step": 116
},
{
"epoch": 0.7488,
"grad_norm": 3.1408603191375732,
"learning_rate": 9.412754953531663e-06,
"loss": 1.6516,
"step": 117
},
{
"epoch": 0.7552,
"grad_norm": 3.197890043258667,
"learning_rate": 8.978109650374397e-06,
"loss": 1.743,
"step": 118
},
{
"epoch": 0.7616,
"grad_norm": 2.4245102405548096,
"learning_rate": 8.551531851507186e-06,
"loss": 1.443,
"step": 119
},
{
"epoch": 0.768,
"grad_norm": 2.798938512802124,
"learning_rate": 8.133236351698143e-06,
"loss": 1.3257,
"step": 120
},
{
"epoch": 0.7744,
"grad_norm": 3.1017847061157227,
"learning_rate": 7.723433775328384e-06,
"loss": 1.6557,
"step": 121
},
{
"epoch": 0.7808,
"grad_norm": 2.4948604106903076,
"learning_rate": 7.3223304703363135e-06,
"loss": 1.6317,
"step": 122
},
{
"epoch": 0.7872,
"grad_norm": 2.6814234256744385,
"learning_rate": 6.930128404315214e-06,
"loss": 1.6922,
"step": 123
},
{
"epoch": 0.7936,
"grad_norm": 3.2181453704833984,
"learning_rate": 6.547025062816486e-06,
"loss": 1.6152,
"step": 124
},
{
"epoch": 0.8,
"grad_norm": 2.9685909748077393,
"learning_rate": 6.173213349909729e-06,
"loss": 1.6251,
"step": 125
},
{
"epoch": 0.8064,
"grad_norm": 3.1302011013031006,
"learning_rate": 5.808881491049723e-06,
"loss": 1.821,
"step": 126
},
{
"epoch": 0.8128,
"grad_norm": 3.1760427951812744,
"learning_rate": 5.454212938299255e-06,
"loss": 1.9791,
"step": 127
},
{
"epoch": 0.8192,
"grad_norm": 2.8934803009033203,
"learning_rate": 5.1093862779554776e-06,
"loss": 1.356,
"step": 128
},
{
"epoch": 0.8256,
"grad_norm": 2.6532275676727295,
"learning_rate": 4.7745751406263165e-06,
"loss": 1.3942,
"step": 129
},
{
"epoch": 0.832,
"grad_norm": 2.6779580116271973,
"learning_rate": 4.4499481138022544e-06,
"loss": 1.2577,
"step": 130
},
{
"epoch": 0.8384,
"grad_norm": 2.9538562297821045,
"learning_rate": 4.135668656967434e-06,
"loss": 1.5183,
"step": 131
},
{
"epoch": 0.8448,
"grad_norm": 2.1692054271698,
"learning_rate": 3.831895019292897e-06,
"loss": 1.0417,
"step": 132
},
{
"epoch": 0.8512,
"grad_norm": 3.9423463344573975,
"learning_rate": 3.5387801599533475e-06,
"loss": 2.0205,
"step": 133
},
{
"epoch": 0.8576,
"grad_norm": 3.072523593902588,
"learning_rate": 3.2564716711076167e-06,
"loss": 1.2959,
"step": 134
},
{
"epoch": 0.864,
"grad_norm": 2.2884180545806885,
"learning_rate": 2.98511170358155e-06,
"loss": 1.2522,
"step": 135
},
{
"epoch": 0.8704,
"grad_norm": 3.3118743896484375,
"learning_rate": 2.7248368952908053e-06,
"loss": 1.6146,
"step": 136
},
{
"epoch": 0.8768,
"grad_norm": 3.355278968811035,
"learning_rate": 2.475778302439524e-06,
"loss": 1.6541,
"step": 137
},
{
"epoch": 0.8832,
"grad_norm": 2.1355369091033936,
"learning_rate": 2.2380613335296036e-06,
"loss": 1.0463,
"step": 138
},
{
"epoch": 0.8896,
"grad_norm": 2.4513282775878906,
"learning_rate": 2.0118056862137357e-06,
"loss": 1.1024,
"step": 139
},
{
"epoch": 0.896,
"grad_norm": 3.1772351264953613,
"learning_rate": 1.7971252870240291e-06,
"loss": 1.2631,
"step": 140
},
{
"epoch": 0.9024,
"grad_norm": 2.525724411010742,
"learning_rate": 1.59412823400657e-06,
"loss": 1.2131,
"step": 141
},
{
"epoch": 0.9088,
"grad_norm": 4.4134416580200195,
"learning_rate": 1.4029167422908107e-06,
"loss": 1.4394,
"step": 142
},
{
"epoch": 0.9152,
"grad_norm": 2.139984607696533,
"learning_rate": 1.2235870926211619e-06,
"loss": 1.1603,
"step": 143
},
{
"epoch": 0.9216,
"grad_norm": 2.7187249660491943,
"learning_rate": 1.0562295828767387e-06,
"loss": 1.283,
"step": 144
},
{
"epoch": 0.928,
"grad_norm": 4.029594898223877,
"learning_rate": 9.009284826036691e-07,
"loss": 1.855,
"step": 145
},
{
"epoch": 0.9344,
"grad_norm": 2.671586036682129,
"learning_rate": 7.577619905828282e-07,
"loss": 1.3775,
"step": 146
},
{
"epoch": 0.9408,
"grad_norm": 3.0629734992980957,
"learning_rate": 6.268021954544096e-07,
"loss": 1.4093,
"step": 147
},
{
"epoch": 0.9472,
"grad_norm": 2.15995454788208,
"learning_rate": 5.08115039419113e-07,
"loss": 1.1758,
"step": 148
},
{
"epoch": 0.9536,
"grad_norm": 2.610565423965454,
"learning_rate": 4.0176028503425835e-07,
"loss": 1.6003,
"step": 149
},
{
"epoch": 0.96,
"grad_norm": 3.3255887031555176,
"learning_rate": 3.077914851215585e-07,
"loss": 1.3682,
"step": 150
},
{
"epoch": 0.9664,
"grad_norm": 3.8929100036621094,
"learning_rate": 2.262559558016325e-07,
"loss": 1.7135,
"step": 151
},
{
"epoch": 0.9728,
"grad_norm": 2.5593698024749756,
"learning_rate": 1.571947526689349e-07,
"loss": 1.497,
"step": 152
},
{
"epoch": 0.9792,
"grad_norm": 2.687330961227417,
"learning_rate": 1.006426501190233e-07,
"loss": 1.3305,
"step": 153
},
{
"epoch": 0.9856,
"grad_norm": 2.625413656234741,
"learning_rate": 5.662812383859795e-08,
"loss": 1.3444,
"step": 154
},
{
"epoch": 0.992,
"grad_norm": 2.951202154159546,
"learning_rate": 2.5173336467135267e-08,
"loss": 1.3641,
"step": 155
},
{
"epoch": 0.9984,
"grad_norm": 2.8701841831207275,
"learning_rate": 6.294126437336734e-09,
"loss": 1.561,
"step": 156
}
],
"logging_steps": 1,
"max_steps": 156,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3416669053378560.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}