armenian-ner / checkpoint-12292 /trainer_state.json
daviddallakyan2005's picture
Upload folder using huggingface_hub
f73869d verified
{
"best_global_step": 12292,
"best_metric": 0.9563834240267894,
"best_model_checkpoint": "C:\\Users\\Shara\\projects\\models\\run_16-lr_2e-05-acc_1-wd_0.01-bs_8-ep_7\\checkpoint-12292",
"epoch": 7.0,
"eval_steps": 500,
"global_step": 12292,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02847380410022779,
"grad_norm": 4.8302321434021,
"learning_rate": 1.992840872111943e-05,
"loss": 0.9994,
"step": 50
},
{
"epoch": 0.05694760820045558,
"grad_norm": 3.4978649616241455,
"learning_rate": 1.9847054995118778e-05,
"loss": 0.3939,
"step": 100
},
{
"epoch": 0.08542141230068337,
"grad_norm": 8.353672981262207,
"learning_rate": 1.9765701269118128e-05,
"loss": 0.2882,
"step": 150
},
{
"epoch": 0.11389521640091116,
"grad_norm": 9.822871208190918,
"learning_rate": 1.9684347543117477e-05,
"loss": 0.1728,
"step": 200
},
{
"epoch": 0.14236902050113895,
"grad_norm": 3.1411221027374268,
"learning_rate": 1.9602993817116827e-05,
"loss": 0.1251,
"step": 250
},
{
"epoch": 0.17084282460136674,
"grad_norm": 8.493424415588379,
"learning_rate": 1.9521640091116173e-05,
"loss": 0.1405,
"step": 300
},
{
"epoch": 0.19931662870159453,
"grad_norm": 4.757007122039795,
"learning_rate": 1.9440286365115523e-05,
"loss": 0.1431,
"step": 350
},
{
"epoch": 0.22779043280182232,
"grad_norm": 4.339049816131592,
"learning_rate": 1.9358932639114873e-05,
"loss": 0.0912,
"step": 400
},
{
"epoch": 0.25626423690205014,
"grad_norm": 14.540772438049316,
"learning_rate": 1.9277578913114222e-05,
"loss": 0.1048,
"step": 450
},
{
"epoch": 0.2847380410022779,
"grad_norm": 10.503073692321777,
"learning_rate": 1.9196225187113572e-05,
"loss": 0.1224,
"step": 500
},
{
"epoch": 0.3132118451025057,
"grad_norm": 2.738131284713745,
"learning_rate": 1.911487146111292e-05,
"loss": 0.1025,
"step": 550
},
{
"epoch": 0.3416856492027335,
"grad_norm": 6.681102752685547,
"learning_rate": 1.903351773511227e-05,
"loss": 0.0869,
"step": 600
},
{
"epoch": 0.3701594533029613,
"grad_norm": 1.4330339431762695,
"learning_rate": 1.895216400911162e-05,
"loss": 0.052,
"step": 650
},
{
"epoch": 0.39863325740318906,
"grad_norm": 2.581470012664795,
"learning_rate": 1.8870810283110967e-05,
"loss": 0.0808,
"step": 700
},
{
"epoch": 0.4271070615034169,
"grad_norm": 5.825100898742676,
"learning_rate": 1.8789456557110317e-05,
"loss": 0.0716,
"step": 750
},
{
"epoch": 0.45558086560364464,
"grad_norm": 1.74166738986969,
"learning_rate": 1.8708102831109666e-05,
"loss": 0.0806,
"step": 800
},
{
"epoch": 0.48405466970387245,
"grad_norm": 7.3097405433654785,
"learning_rate": 1.8626749105109016e-05,
"loss": 0.0666,
"step": 850
},
{
"epoch": 0.5125284738041003,
"grad_norm": 2.220766067504883,
"learning_rate": 1.8545395379108362e-05,
"loss": 0.0768,
"step": 900
},
{
"epoch": 0.541002277904328,
"grad_norm": 4.0566511154174805,
"learning_rate": 1.8464041653107715e-05,
"loss": 0.0707,
"step": 950
},
{
"epoch": 0.5694760820045558,
"grad_norm": 0.2833240330219269,
"learning_rate": 1.8382687927107065e-05,
"loss": 0.0858,
"step": 1000
},
{
"epoch": 0.5979498861047836,
"grad_norm": 21.131481170654297,
"learning_rate": 1.8301334201106415e-05,
"loss": 0.0595,
"step": 1050
},
{
"epoch": 0.6264236902050114,
"grad_norm": 27.655433654785156,
"learning_rate": 1.821998047510576e-05,
"loss": 0.0466,
"step": 1100
},
{
"epoch": 0.6548974943052391,
"grad_norm": 3.4721717834472656,
"learning_rate": 1.813862674910511e-05,
"loss": 0.0776,
"step": 1150
},
{
"epoch": 0.683371298405467,
"grad_norm": 2.4811441898345947,
"learning_rate": 1.805727302310446e-05,
"loss": 0.0647,
"step": 1200
},
{
"epoch": 0.7118451025056948,
"grad_norm": 6.169066429138184,
"learning_rate": 1.797591929710381e-05,
"loss": 0.0724,
"step": 1250
},
{
"epoch": 0.7403189066059226,
"grad_norm": 8.984748840332031,
"learning_rate": 1.7894565571103156e-05,
"loss": 0.0838,
"step": 1300
},
{
"epoch": 0.7687927107061503,
"grad_norm": 5.944505214691162,
"learning_rate": 1.7813211845102506e-05,
"loss": 0.0683,
"step": 1350
},
{
"epoch": 0.7972665148063781,
"grad_norm": 0.28812381625175476,
"learning_rate": 1.7731858119101856e-05,
"loss": 0.0614,
"step": 1400
},
{
"epoch": 0.8257403189066059,
"grad_norm": 6.176011085510254,
"learning_rate": 1.7650504393101205e-05,
"loss": 0.0615,
"step": 1450
},
{
"epoch": 0.8542141230068337,
"grad_norm": 2.5244405269622803,
"learning_rate": 1.7569150667100555e-05,
"loss": 0.0714,
"step": 1500
},
{
"epoch": 0.8826879271070615,
"grad_norm": 3.3508074283599854,
"learning_rate": 1.7487796941099904e-05,
"loss": 0.0563,
"step": 1550
},
{
"epoch": 0.9111617312072893,
"grad_norm": 9.980842590332031,
"learning_rate": 1.7406443215099254e-05,
"loss": 0.0647,
"step": 1600
},
{
"epoch": 0.9396355353075171,
"grad_norm": 3.6959774494171143,
"learning_rate": 1.7325089489098604e-05,
"loss": 0.0682,
"step": 1650
},
{
"epoch": 0.9681093394077449,
"grad_norm": 0.626620888710022,
"learning_rate": 1.724373576309795e-05,
"loss": 0.0499,
"step": 1700
},
{
"epoch": 0.9965831435079726,
"grad_norm": 7.145672798156738,
"learning_rate": 1.71623820370973e-05,
"loss": 0.0569,
"step": 1750
},
{
"epoch": 1.0,
"eval_f1": 0.9242246437552389,
"eval_loss": 0.05600914731621742,
"eval_precision": 0.9206746826987308,
"eval_recall": 0.927802086839448,
"eval_runtime": 3.7538,
"eval_samples_per_second": 865.798,
"eval_steps_per_second": 108.425,
"step": 1756
},
{
"epoch": 1.0250569476082005,
"grad_norm": 4.280463695526123,
"learning_rate": 1.708102831109665e-05,
"loss": 0.0673,
"step": 1800
},
{
"epoch": 1.0535307517084282,
"grad_norm": 0.06692535430192947,
"learning_rate": 1.6999674585096e-05,
"loss": 0.0403,
"step": 1850
},
{
"epoch": 1.082004555808656,
"grad_norm": 0.03789375349879265,
"learning_rate": 1.691832085909535e-05,
"loss": 0.0496,
"step": 1900
},
{
"epoch": 1.1104783599088839,
"grad_norm": 3.698348045349121,
"learning_rate": 1.6836967133094698e-05,
"loss": 0.0381,
"step": 1950
},
{
"epoch": 1.1389521640091116,
"grad_norm": 2.753032684326172,
"learning_rate": 1.6755613407094048e-05,
"loss": 0.0432,
"step": 2000
},
{
"epoch": 1.1674259681093395,
"grad_norm": 0.06336130946874619,
"learning_rate": 1.6674259681093398e-05,
"loss": 0.0482,
"step": 2050
},
{
"epoch": 1.1958997722095672,
"grad_norm": 3.9503254890441895,
"learning_rate": 1.6592905955092744e-05,
"loss": 0.0436,
"step": 2100
},
{
"epoch": 1.224373576309795,
"grad_norm": 0.08679741621017456,
"learning_rate": 1.6511552229092093e-05,
"loss": 0.0346,
"step": 2150
},
{
"epoch": 1.2528473804100229,
"grad_norm": 2.1995127201080322,
"learning_rate": 1.6430198503091443e-05,
"loss": 0.0312,
"step": 2200
},
{
"epoch": 1.2813211845102506,
"grad_norm": 5.7709808349609375,
"learning_rate": 1.634884477709079e-05,
"loss": 0.032,
"step": 2250
},
{
"epoch": 1.3097949886104785,
"grad_norm": 0.2031829059123993,
"learning_rate": 1.626749105109014e-05,
"loss": 0.053,
"step": 2300
},
{
"epoch": 1.3382687927107062,
"grad_norm": 4.292157173156738,
"learning_rate": 1.6186137325089492e-05,
"loss": 0.0413,
"step": 2350
},
{
"epoch": 1.366742596810934,
"grad_norm": 0.07821401953697205,
"learning_rate": 1.6104783599088842e-05,
"loss": 0.0304,
"step": 2400
},
{
"epoch": 1.3952164009111616,
"grad_norm": 0.5930206179618835,
"learning_rate": 1.6023429873088188e-05,
"loss": 0.0525,
"step": 2450
},
{
"epoch": 1.4236902050113895,
"grad_norm": 1.6114712953567505,
"learning_rate": 1.5942076147087538e-05,
"loss": 0.0391,
"step": 2500
},
{
"epoch": 1.4521640091116172,
"grad_norm": 0.28632065653800964,
"learning_rate": 1.5860722421086887e-05,
"loss": 0.0491,
"step": 2550
},
{
"epoch": 1.4806378132118452,
"grad_norm": 1.0017669200897217,
"learning_rate": 1.5779368695086237e-05,
"loss": 0.0395,
"step": 2600
},
{
"epoch": 1.5091116173120729,
"grad_norm": 0.03409272059798241,
"learning_rate": 1.5698014969085583e-05,
"loss": 0.0399,
"step": 2650
},
{
"epoch": 1.5375854214123006,
"grad_norm": 3.5108702182769775,
"learning_rate": 1.5616661243084933e-05,
"loss": 0.0424,
"step": 2700
},
{
"epoch": 1.5660592255125285,
"grad_norm": 3.5875349044799805,
"learning_rate": 1.5535307517084283e-05,
"loss": 0.0497,
"step": 2750
},
{
"epoch": 1.5945330296127562,
"grad_norm": 0.10498251020908356,
"learning_rate": 1.5453953791083632e-05,
"loss": 0.0461,
"step": 2800
},
{
"epoch": 1.6230068337129842,
"grad_norm": 7.618963718414307,
"learning_rate": 1.5374227139602996e-05,
"loss": 0.0394,
"step": 2850
},
{
"epoch": 1.6514806378132119,
"grad_norm": 3.3920793533325195,
"learning_rate": 1.5292873413602346e-05,
"loss": 0.0339,
"step": 2900
},
{
"epoch": 1.6799544419134396,
"grad_norm": 0.04736129939556122,
"learning_rate": 1.5211519687601692e-05,
"loss": 0.0358,
"step": 2950
},
{
"epoch": 1.7084282460136673,
"grad_norm": 0.17463508248329163,
"learning_rate": 1.5130165961601042e-05,
"loss": 0.034,
"step": 3000
},
{
"epoch": 1.7369020501138952,
"grad_norm": 0.058148205280303955,
"learning_rate": 1.5048812235600392e-05,
"loss": 0.0317,
"step": 3050
},
{
"epoch": 1.7653758542141231,
"grad_norm": 3.5262815952301025,
"learning_rate": 1.4967458509599741e-05,
"loss": 0.0477,
"step": 3100
},
{
"epoch": 1.7938496583143508,
"grad_norm": 2.971937894821167,
"learning_rate": 1.488610478359909e-05,
"loss": 0.0364,
"step": 3150
},
{
"epoch": 1.8223234624145785,
"grad_norm": 0.05181724950671196,
"learning_rate": 1.4804751057598439e-05,
"loss": 0.0394,
"step": 3200
},
{
"epoch": 1.8507972665148062,
"grad_norm": 0.2712370455265045,
"learning_rate": 1.4723397331597789e-05,
"loss": 0.0456,
"step": 3250
},
{
"epoch": 1.8792710706150342,
"grad_norm": 1.608622431755066,
"learning_rate": 1.4642043605597138e-05,
"loss": 0.05,
"step": 3300
},
{
"epoch": 1.907744874715262,
"grad_norm": 5.2423529624938965,
"learning_rate": 1.4560689879596486e-05,
"loss": 0.0371,
"step": 3350
},
{
"epoch": 1.9362186788154898,
"grad_norm": 0.04786211624741554,
"learning_rate": 1.4479336153595836e-05,
"loss": 0.0329,
"step": 3400
},
{
"epoch": 1.9646924829157175,
"grad_norm": 0.5264931917190552,
"learning_rate": 1.4397982427595185e-05,
"loss": 0.0379,
"step": 3450
},
{
"epoch": 1.9931662870159452,
"grad_norm": 0.236006498336792,
"learning_rate": 1.4316628701594535e-05,
"loss": 0.0253,
"step": 3500
},
{
"epoch": 2.0,
"eval_f1": 0.9406438631790744,
"eval_loss": 0.05223705992102623,
"eval_precision": 0.9371867691279653,
"eval_recall": 0.9441265567149109,
"eval_runtime": 3.7109,
"eval_samples_per_second": 875.791,
"eval_steps_per_second": 109.676,
"step": 3512
},
{
"epoch": 2.021640091116173,
"grad_norm": 4.225963115692139,
"learning_rate": 1.4235274975593883e-05,
"loss": 0.0373,
"step": 3550
},
{
"epoch": 2.050113895216401,
"grad_norm": 0.15388111770153046,
"learning_rate": 1.4153921249593233e-05,
"loss": 0.0258,
"step": 3600
},
{
"epoch": 2.078587699316629,
"grad_norm": 6.3911895751953125,
"learning_rate": 1.4072567523592582e-05,
"loss": 0.0217,
"step": 3650
},
{
"epoch": 2.1070615034168565,
"grad_norm": 0.03075530007481575,
"learning_rate": 1.3991213797591932e-05,
"loss": 0.019,
"step": 3700
},
{
"epoch": 2.135535307517084,
"grad_norm": 0.16612432897090912,
"learning_rate": 1.3909860071591278e-05,
"loss": 0.0175,
"step": 3750
},
{
"epoch": 2.164009111617312,
"grad_norm": 0.14543022215366364,
"learning_rate": 1.382850634559063e-05,
"loss": 0.0289,
"step": 3800
},
{
"epoch": 2.19248291571754,
"grad_norm": 2.9880359172821045,
"learning_rate": 1.374715261958998e-05,
"loss": 0.0208,
"step": 3850
},
{
"epoch": 2.2209567198177678,
"grad_norm": 9.878133773803711,
"learning_rate": 1.3665798893589329e-05,
"loss": 0.021,
"step": 3900
},
{
"epoch": 2.2494305239179955,
"grad_norm": 9.684176445007324,
"learning_rate": 1.3584445167588675e-05,
"loss": 0.0267,
"step": 3950
},
{
"epoch": 2.277904328018223,
"grad_norm": 33.15859603881836,
"learning_rate": 1.3503091441588025e-05,
"loss": 0.0295,
"step": 4000
},
{
"epoch": 2.306378132118451,
"grad_norm": 2.559544086456299,
"learning_rate": 1.3421737715587374e-05,
"loss": 0.0292,
"step": 4050
},
{
"epoch": 2.334851936218679,
"grad_norm": 0.07091552764177322,
"learning_rate": 1.3340383989586724e-05,
"loss": 0.0262,
"step": 4100
},
{
"epoch": 2.3633257403189067,
"grad_norm": 1.8119585514068604,
"learning_rate": 1.3259030263586072e-05,
"loss": 0.03,
"step": 4150
},
{
"epoch": 2.3917995444191344,
"grad_norm": 0.1476636379957199,
"learning_rate": 1.3177676537585422e-05,
"loss": 0.0196,
"step": 4200
},
{
"epoch": 2.420273348519362,
"grad_norm": 0.04482650011777878,
"learning_rate": 1.3096322811584771e-05,
"loss": 0.0263,
"step": 4250
},
{
"epoch": 2.44874715261959,
"grad_norm": 0.09201560169458389,
"learning_rate": 1.3014969085584121e-05,
"loss": 0.0172,
"step": 4300
},
{
"epoch": 2.477220956719818,
"grad_norm": 0.02215876244008541,
"learning_rate": 1.2933615359583469e-05,
"loss": 0.0149,
"step": 4350
},
{
"epoch": 2.5056947608200457,
"grad_norm": 0.6993932127952576,
"learning_rate": 1.2852261633582819e-05,
"loss": 0.0331,
"step": 4400
},
{
"epoch": 2.5341685649202734,
"grad_norm": 0.030453965067863464,
"learning_rate": 1.2770907907582168e-05,
"loss": 0.0307,
"step": 4450
},
{
"epoch": 2.562642369020501,
"grad_norm": 0.22211593389511108,
"learning_rate": 1.2689554181581518e-05,
"loss": 0.0183,
"step": 4500
},
{
"epoch": 2.591116173120729,
"grad_norm": 1.2842738628387451,
"learning_rate": 1.2608200455580866e-05,
"loss": 0.0226,
"step": 4550
},
{
"epoch": 2.619589977220957,
"grad_norm": 3.9048423767089844,
"learning_rate": 1.2526846729580216e-05,
"loss": 0.0239,
"step": 4600
},
{
"epoch": 2.6480637813211843,
"grad_norm": 0.9887784719467163,
"learning_rate": 1.2445493003579565e-05,
"loss": 0.0271,
"step": 4650
},
{
"epoch": 2.6765375854214124,
"grad_norm": 4.759728908538818,
"learning_rate": 1.2364139277578915e-05,
"loss": 0.0201,
"step": 4700
},
{
"epoch": 2.70501138952164,
"grad_norm": 0.2671191096305847,
"learning_rate": 1.2282785551578263e-05,
"loss": 0.0212,
"step": 4750
},
{
"epoch": 2.733485193621868,
"grad_norm": 0.036626849323511124,
"learning_rate": 1.2201431825577612e-05,
"loss": 0.0155,
"step": 4800
},
{
"epoch": 2.7619589977220955,
"grad_norm": 6.877408027648926,
"learning_rate": 1.2120078099576962e-05,
"loss": 0.0266,
"step": 4850
},
{
"epoch": 2.7904328018223232,
"grad_norm": 4.725896835327148,
"learning_rate": 1.2038724373576312e-05,
"loss": 0.0163,
"step": 4900
},
{
"epoch": 2.8189066059225514,
"grad_norm": 0.11965059489011765,
"learning_rate": 1.195737064757566e-05,
"loss": 0.0246,
"step": 4950
},
{
"epoch": 2.847380410022779,
"grad_norm": 0.6634079813957214,
"learning_rate": 1.187601692157501e-05,
"loss": 0.0304,
"step": 5000
},
{
"epoch": 2.875854214123007,
"grad_norm": 3.952694892883301,
"learning_rate": 1.1794663195574359e-05,
"loss": 0.0183,
"step": 5050
},
{
"epoch": 2.9043280182232345,
"grad_norm": 19.788942337036133,
"learning_rate": 1.1713309469573709e-05,
"loss": 0.0193,
"step": 5100
},
{
"epoch": 2.932801822323462,
"grad_norm": 4.989261627197266,
"learning_rate": 1.1631955743573055e-05,
"loss": 0.023,
"step": 5150
},
{
"epoch": 2.9612756264236904,
"grad_norm": 9.020853996276855,
"learning_rate": 1.1550602017572406e-05,
"loss": 0.0231,
"step": 5200
},
{
"epoch": 2.989749430523918,
"grad_norm": 0.9151140451431274,
"learning_rate": 1.1469248291571756e-05,
"loss": 0.0314,
"step": 5250
},
{
"epoch": 3.0,
"eval_f1": 0.9470420646891236,
"eval_loss": 0.04870199039578438,
"eval_precision": 0.943090787716956,
"eval_recall": 0.9510265903736116,
"eval_runtime": 3.7494,
"eval_samples_per_second": 866.805,
"eval_steps_per_second": 108.551,
"step": 5268
},
{
"epoch": 3.0182232346241458,
"grad_norm": 8.62992000579834,
"learning_rate": 1.1387894565571106e-05,
"loss": 0.0194,
"step": 5300
},
{
"epoch": 3.0466970387243735,
"grad_norm": 0.5615554451942444,
"learning_rate": 1.1306540839570452e-05,
"loss": 0.0114,
"step": 5350
},
{
"epoch": 3.075170842824601,
"grad_norm": 2.5735936164855957,
"learning_rate": 1.1225187113569801e-05,
"loss": 0.0163,
"step": 5400
},
{
"epoch": 3.1036446469248293,
"grad_norm": 6.504094123840332,
"learning_rate": 1.1143833387569151e-05,
"loss": 0.0135,
"step": 5450
},
{
"epoch": 3.132118451025057,
"grad_norm": 14.477537155151367,
"learning_rate": 1.10624796615685e-05,
"loss": 0.0129,
"step": 5500
},
{
"epoch": 3.1605922551252847,
"grad_norm": 0.04929841682314873,
"learning_rate": 1.0981125935567849e-05,
"loss": 0.0263,
"step": 5550
},
{
"epoch": 3.1890660592255125,
"grad_norm": 0.08875144273042679,
"learning_rate": 1.0899772209567198e-05,
"loss": 0.0134,
"step": 5600
},
{
"epoch": 3.21753986332574,
"grad_norm": 0.6238455772399902,
"learning_rate": 1.0818418483566548e-05,
"loss": 0.0107,
"step": 5650
},
{
"epoch": 3.2460136674259683,
"grad_norm": 0.004948179703205824,
"learning_rate": 1.0737064757565898e-05,
"loss": 0.0167,
"step": 5700
},
{
"epoch": 3.274487471526196,
"grad_norm": 0.017031218856573105,
"learning_rate": 1.0655711031565246e-05,
"loss": 0.0085,
"step": 5750
},
{
"epoch": 3.3029612756264237,
"grad_norm": 2.9840469360351562,
"learning_rate": 1.0574357305564595e-05,
"loss": 0.0158,
"step": 5800
},
{
"epoch": 3.3314350797266514,
"grad_norm": 1.7477540969848633,
"learning_rate": 1.0493003579563945e-05,
"loss": 0.0157,
"step": 5850
},
{
"epoch": 3.359908883826879,
"grad_norm": 0.03963543102145195,
"learning_rate": 1.0411649853563295e-05,
"loss": 0.0201,
"step": 5900
},
{
"epoch": 3.3883826879271073,
"grad_norm": 0.16669808328151703,
"learning_rate": 1.0330296127562643e-05,
"loss": 0.0159,
"step": 5950
},
{
"epoch": 3.416856492027335,
"grad_norm": 0.01953568309545517,
"learning_rate": 1.0248942401561992e-05,
"loss": 0.0117,
"step": 6000
},
{
"epoch": 3.4453302961275627,
"grad_norm": 0.07708246260881424,
"learning_rate": 1.0167588675561342e-05,
"loss": 0.0117,
"step": 6050
},
{
"epoch": 3.4738041002277904,
"grad_norm": 2.423590898513794,
"learning_rate": 1.0086234949560691e-05,
"loss": 0.0111,
"step": 6100
},
{
"epoch": 3.502277904328018,
"grad_norm": 0.006337775848805904,
"learning_rate": 1.000488122356004e-05,
"loss": 0.0052,
"step": 6150
},
{
"epoch": 3.5307517084282463,
"grad_norm": 0.01373753696680069,
"learning_rate": 9.923527497559389e-06,
"loss": 0.009,
"step": 6200
},
{
"epoch": 3.559225512528474,
"grad_norm": 0.11260247975587845,
"learning_rate": 9.842173771558739e-06,
"loss": 0.0208,
"step": 6250
},
{
"epoch": 3.5876993166287017,
"grad_norm": 1.240822196006775,
"learning_rate": 9.760820045558087e-06,
"loss": 0.0157,
"step": 6300
},
{
"epoch": 3.6161731207289294,
"grad_norm": 0.05271293595433235,
"learning_rate": 9.679466319557436e-06,
"loss": 0.0175,
"step": 6350
},
{
"epoch": 3.644646924829157,
"grad_norm": 24.546663284301758,
"learning_rate": 9.598112593556786e-06,
"loss": 0.015,
"step": 6400
},
{
"epoch": 3.6731207289293852,
"grad_norm": 5.120173931121826,
"learning_rate": 9.516758867556136e-06,
"loss": 0.017,
"step": 6450
},
{
"epoch": 3.7015945330296125,
"grad_norm": 1.1922008991241455,
"learning_rate": 9.435405141555484e-06,
"loss": 0.0194,
"step": 6500
},
{
"epoch": 3.7300683371298406,
"grad_norm": 0.039439987391233444,
"learning_rate": 9.354051415554833e-06,
"loss": 0.0309,
"step": 6550
},
{
"epoch": 3.7585421412300684,
"grad_norm": 0.04055279493331909,
"learning_rate": 9.272697689554181e-06,
"loss": 0.0195,
"step": 6600
},
{
"epoch": 3.787015945330296,
"grad_norm": 0.5971085429191589,
"learning_rate": 9.191343963553533e-06,
"loss": 0.0156,
"step": 6650
},
{
"epoch": 3.8154897494305238,
"grad_norm": 0.2191866636276245,
"learning_rate": 9.10999023755288e-06,
"loss": 0.0174,
"step": 6700
},
{
"epoch": 3.8439635535307515,
"grad_norm": 0.033559828996658325,
"learning_rate": 9.02863651155223e-06,
"loss": 0.0149,
"step": 6750
},
{
"epoch": 3.8724373576309796,
"grad_norm": 3.7128634452819824,
"learning_rate": 8.947282785551578e-06,
"loss": 0.0221,
"step": 6800
},
{
"epoch": 3.9009111617312073,
"grad_norm": 0.08547580987215042,
"learning_rate": 8.865929059550928e-06,
"loss": 0.0153,
"step": 6850
},
{
"epoch": 3.929384965831435,
"grad_norm": 0.058124080300331116,
"learning_rate": 8.784575333550277e-06,
"loss": 0.0161,
"step": 6900
},
{
"epoch": 3.9578587699316627,
"grad_norm": 0.015579139813780785,
"learning_rate": 8.703221607549627e-06,
"loss": 0.011,
"step": 6950
},
{
"epoch": 3.9863325740318905,
"grad_norm": 0.12764935195446014,
"learning_rate": 8.621867881548975e-06,
"loss": 0.0166,
"step": 7000
},
{
"epoch": 4.0,
"eval_f1": 0.9525329748802823,
"eval_loss": 0.04983380436897278,
"eval_precision": 0.9510149303808086,
"eval_recall": 0.9540558734432851,
"eval_runtime": 3.7495,
"eval_samples_per_second": 866.79,
"eval_steps_per_second": 108.549,
"step": 7024
},
{
"epoch": 4.014806378132119,
"grad_norm": 0.04521024227142334,
"learning_rate": 8.540514155548325e-06,
"loss": 0.0102,
"step": 7050
},
{
"epoch": 4.043280182232346,
"grad_norm": 0.015535669401288033,
"learning_rate": 8.459160429547674e-06,
"loss": 0.0068,
"step": 7100
},
{
"epoch": 4.071753986332574,
"grad_norm": 12.566010475158691,
"learning_rate": 8.377806703547024e-06,
"loss": 0.0151,
"step": 7150
},
{
"epoch": 4.100227790432802,
"grad_norm": 0.027761396020650864,
"learning_rate": 8.296452977546372e-06,
"loss": 0.0053,
"step": 7200
},
{
"epoch": 4.128701594533029,
"grad_norm": 1.8846938610076904,
"learning_rate": 8.215099251545722e-06,
"loss": 0.0077,
"step": 7250
},
{
"epoch": 4.157175398633258,
"grad_norm": 0.005013479385524988,
"learning_rate": 8.13374552554507e-06,
"loss": 0.0073,
"step": 7300
},
{
"epoch": 4.185649202733485,
"grad_norm": 0.32696235179901123,
"learning_rate": 8.052391799544421e-06,
"loss": 0.0092,
"step": 7350
},
{
"epoch": 4.214123006833713,
"grad_norm": 0.014357910491526127,
"learning_rate": 7.971038073543769e-06,
"loss": 0.0164,
"step": 7400
},
{
"epoch": 4.242596810933941,
"grad_norm": 0.010845329612493515,
"learning_rate": 7.889684347543118e-06,
"loss": 0.0123,
"step": 7450
},
{
"epoch": 4.271070615034168,
"grad_norm": 0.6969354748725891,
"learning_rate": 7.808330621542466e-06,
"loss": 0.0103,
"step": 7500
},
{
"epoch": 4.2995444191343966,
"grad_norm": 0.3575742840766907,
"learning_rate": 7.726976895541816e-06,
"loss": 0.0161,
"step": 7550
},
{
"epoch": 4.328018223234624,
"grad_norm": 0.008076228201389313,
"learning_rate": 7.645623169541166e-06,
"loss": 0.0122,
"step": 7600
},
{
"epoch": 4.356492027334852,
"grad_norm": 0.029101597145199776,
"learning_rate": 7.564269443540515e-06,
"loss": 0.0071,
"step": 7650
},
{
"epoch": 4.38496583143508,
"grad_norm": 0.06841142475605011,
"learning_rate": 7.482915717539863e-06,
"loss": 0.0091,
"step": 7700
},
{
"epoch": 4.413439635535307,
"grad_norm": 0.04133535176515579,
"learning_rate": 7.401561991539213e-06,
"loss": 0.0073,
"step": 7750
},
{
"epoch": 4.4419134396355355,
"grad_norm": 0.03571132943034172,
"learning_rate": 7.320208265538562e-06,
"loss": 0.0131,
"step": 7800
},
{
"epoch": 4.470387243735763,
"grad_norm": 2.8286924362182617,
"learning_rate": 7.2388545395379114e-06,
"loss": 0.0043,
"step": 7850
},
{
"epoch": 4.498861047835991,
"grad_norm": 1.3785158395767212,
"learning_rate": 7.15750081353726e-06,
"loss": 0.0103,
"step": 7900
},
{
"epoch": 4.527334851936219,
"grad_norm": 0.05979786813259125,
"learning_rate": 7.07614708753661e-06,
"loss": 0.006,
"step": 7950
},
{
"epoch": 4.555808656036446,
"grad_norm": 1.3434393405914307,
"learning_rate": 6.994793361535959e-06,
"loss": 0.0148,
"step": 8000
},
{
"epoch": 4.5842824601366745,
"grad_norm": 0.019605603069067,
"learning_rate": 6.913439635535308e-06,
"loss": 0.0068,
"step": 8050
},
{
"epoch": 4.612756264236902,
"grad_norm": 0.0028891051188111305,
"learning_rate": 6.832085909534657e-06,
"loss": 0.0075,
"step": 8100
},
{
"epoch": 4.64123006833713,
"grad_norm": 0.014856363646686077,
"learning_rate": 6.750732183534007e-06,
"loss": 0.0073,
"step": 8150
},
{
"epoch": 4.669703872437358,
"grad_norm": 0.0037149768322706223,
"learning_rate": 6.669378457533356e-06,
"loss": 0.0066,
"step": 8200
},
{
"epoch": 4.698177676537585,
"grad_norm": 0.008534993045032024,
"learning_rate": 6.588024731532705e-06,
"loss": 0.0121,
"step": 8250
},
{
"epoch": 4.7266514806378135,
"grad_norm": 0.013920712284743786,
"learning_rate": 6.506671005532054e-06,
"loss": 0.013,
"step": 8300
},
{
"epoch": 4.755125284738041,
"grad_norm": 0.10121840238571167,
"learning_rate": 6.425317279531404e-06,
"loss": 0.0087,
"step": 8350
},
{
"epoch": 4.783599088838269,
"grad_norm": 0.07937771081924438,
"learning_rate": 6.343963553530752e-06,
"loss": 0.0086,
"step": 8400
},
{
"epoch": 4.812072892938497,
"grad_norm": 0.07674901187419891,
"learning_rate": 6.262609827530101e-06,
"loss": 0.0052,
"step": 8450
},
{
"epoch": 4.840546697038724,
"grad_norm": 0.06683178246021271,
"learning_rate": 6.18125610152945e-06,
"loss": 0.015,
"step": 8500
},
{
"epoch": 4.8690205011389525,
"grad_norm": 0.08861212432384491,
"learning_rate": 6.0999023755288e-06,
"loss": 0.0109,
"step": 8550
},
{
"epoch": 4.89749430523918,
"grad_norm": 0.006198623217642307,
"learning_rate": 6.018548649528149e-06,
"loss": 0.0181,
"step": 8600
},
{
"epoch": 4.925968109339408,
"grad_norm": 0.009029284119606018,
"learning_rate": 5.937194923527498e-06,
"loss": 0.0043,
"step": 8650
},
{
"epoch": 4.954441913439636,
"grad_norm": 1.1225602626800537,
"learning_rate": 5.855841197526847e-06,
"loss": 0.0121,
"step": 8700
},
{
"epoch": 4.982915717539863,
"grad_norm": 0.01218325924128294,
"learning_rate": 5.774487471526197e-06,
"loss": 0.0115,
"step": 8750
},
{
"epoch": 5.0,
"eval_f1": 0.9533444816053511,
"eval_loss": 0.04722925275564194,
"eval_precision": 0.9473246925889,
"eval_recall": 0.9594412655671491,
"eval_runtime": 3.7358,
"eval_samples_per_second": 869.968,
"eval_steps_per_second": 108.947,
"step": 8780
},
{
"epoch": 5.011389521640091,
"grad_norm": 0.8161097764968872,
"learning_rate": 5.6931337455255455e-06,
"loss": 0.0127,
"step": 8800
},
{
"epoch": 5.039863325740319,
"grad_norm": 0.048089127987623215,
"learning_rate": 5.611780019524895e-06,
"loss": 0.0116,
"step": 8850
},
{
"epoch": 5.068337129840547,
"grad_norm": 1.7576072216033936,
"learning_rate": 5.530426293524244e-06,
"loss": 0.0093,
"step": 8900
},
{
"epoch": 5.096810933940774,
"grad_norm": 0.07270015776157379,
"learning_rate": 5.449072567523594e-06,
"loss": 0.0043,
"step": 8950
},
{
"epoch": 5.125284738041002,
"grad_norm": 0.2580782175064087,
"learning_rate": 5.367718841522942e-06,
"loss": 0.0058,
"step": 9000
},
{
"epoch": 5.15375854214123,
"grad_norm": 0.12649740278720856,
"learning_rate": 5.286365115522292e-06,
"loss": 0.0033,
"step": 9050
},
{
"epoch": 5.182232346241458,
"grad_norm": 0.06850716471672058,
"learning_rate": 5.20501138952164e-06,
"loss": 0.003,
"step": 9100
},
{
"epoch": 5.210706150341686,
"grad_norm": 0.01197959017008543,
"learning_rate": 5.12365766352099e-06,
"loss": 0.0029,
"step": 9150
},
{
"epoch": 5.239179954441913,
"grad_norm": 0.10818086564540863,
"learning_rate": 5.0423039375203385e-06,
"loss": 0.0025,
"step": 9200
},
{
"epoch": 5.267653758542141,
"grad_norm": 0.035460665822029114,
"learning_rate": 4.960950211519688e-06,
"loss": 0.0047,
"step": 9250
},
{
"epoch": 5.296127562642369,
"grad_norm": 4.532562255859375,
"learning_rate": 4.879596485519037e-06,
"loss": 0.0086,
"step": 9300
},
{
"epoch": 5.324601366742597,
"grad_norm": 0.013236219063401222,
"learning_rate": 4.7982427595183866e-06,
"loss": 0.0027,
"step": 9350
},
{
"epoch": 5.353075170842825,
"grad_norm": 0.018059909343719482,
"learning_rate": 4.716889033517735e-06,
"loss": 0.0065,
"step": 9400
},
{
"epoch": 5.381548974943052,
"grad_norm": 0.019096272066235542,
"learning_rate": 4.635535307517084e-06,
"loss": 0.0053,
"step": 9450
},
{
"epoch": 5.41002277904328,
"grad_norm": 0.10389436781406403,
"learning_rate": 4.554181581516434e-06,
"loss": 0.0029,
"step": 9500
},
{
"epoch": 5.438496583143508,
"grad_norm": 0.0029252381063997746,
"learning_rate": 4.472827855515783e-06,
"loss": 0.0045,
"step": 9550
},
{
"epoch": 5.466970387243736,
"grad_norm": 0.004542892333120108,
"learning_rate": 4.391474129515132e-06,
"loss": 0.0102,
"step": 9600
},
{
"epoch": 5.495444191343964,
"grad_norm": 0.130916565656662,
"learning_rate": 4.310120403514481e-06,
"loss": 0.0064,
"step": 9650
},
{
"epoch": 5.523917995444191,
"grad_norm": 0.008013393729925156,
"learning_rate": 4.22876667751383e-06,
"loss": 0.005,
"step": 9700
},
{
"epoch": 5.552391799544419,
"grad_norm": 0.012782514095306396,
"learning_rate": 4.1474129515131795e-06,
"loss": 0.0122,
"step": 9750
},
{
"epoch": 5.5808656036446465,
"grad_norm": 0.9617053866386414,
"learning_rate": 4.066059225512528e-06,
"loss": 0.0063,
"step": 9800
},
{
"epoch": 5.609339407744875,
"grad_norm": 0.014953136444091797,
"learning_rate": 3.984705499511878e-06,
"loss": 0.0046,
"step": 9850
},
{
"epoch": 5.637813211845103,
"grad_norm": 1.0275633335113525,
"learning_rate": 3.903351773511227e-06,
"loss": 0.0075,
"step": 9900
},
{
"epoch": 5.66628701594533,
"grad_norm": 0.007999264635145664,
"learning_rate": 3.8219980475105764e-06,
"loss": 0.0031,
"step": 9950
},
{
"epoch": 5.694760820045558,
"grad_norm": 0.008241960778832436,
"learning_rate": 3.7406443215099252e-06,
"loss": 0.0065,
"step": 10000
},
{
"epoch": 5.723234624145785,
"grad_norm": 2.578386068344116,
"learning_rate": 3.6592905955092745e-06,
"loss": 0.0057,
"step": 10050
},
{
"epoch": 5.751708428246014,
"grad_norm": 2.8855443000793457,
"learning_rate": 3.5779368695086237e-06,
"loss": 0.0038,
"step": 10100
},
{
"epoch": 5.780182232346242,
"grad_norm": 0.016262667253613472,
"learning_rate": 3.496583143507973e-06,
"loss": 0.0128,
"step": 10150
},
{
"epoch": 5.808656036446469,
"grad_norm": 0.045168060809373856,
"learning_rate": 3.415229417507322e-06,
"loss": 0.0036,
"step": 10200
},
{
"epoch": 5.837129840546697,
"grad_norm": 0.1192622259259224,
"learning_rate": 3.3355027660266842e-06,
"loss": 0.0111,
"step": 10250
},
{
"epoch": 5.865603644646924,
"grad_norm": 0.08544190227985382,
"learning_rate": 3.2541490400260335e-06,
"loss": 0.0051,
"step": 10300
},
{
"epoch": 5.894077448747153,
"grad_norm": 3.493161678314209,
"learning_rate": 3.1727953140253827e-06,
"loss": 0.0054,
"step": 10350
},
{
"epoch": 5.922551252847381,
"grad_norm": 0.022789066657423973,
"learning_rate": 3.0914415880247315e-06,
"loss": 0.0033,
"step": 10400
},
{
"epoch": 5.951025056947608,
"grad_norm": 12.131625175476074,
"learning_rate": 3.0100878620240807e-06,
"loss": 0.0023,
"step": 10450
},
{
"epoch": 5.979498861047836,
"grad_norm": 0.03329641371965408,
"learning_rate": 2.92873413602343e-06,
"loss": 0.0058,
"step": 10500
},
{
"epoch": 6.0,
"eval_f1": 0.9557566616390145,
"eval_loss": 0.05410688370466232,
"eval_precision": 0.9517690253671562,
"eval_recall": 0.9597778525748906,
"eval_runtime": 3.6572,
"eval_samples_per_second": 888.662,
"eval_steps_per_second": 111.288,
"step": 10536
},
{
"epoch": 6.007972665148063,
"grad_norm": 0.008557640947401524,
"learning_rate": 2.847380410022779e-06,
"loss": 0.0152,
"step": 10550
},
{
"epoch": 6.0364464692482915,
"grad_norm": 0.08105529844760895,
"learning_rate": 2.7660266840221284e-06,
"loss": 0.0025,
"step": 10600
},
{
"epoch": 6.06492027334852,
"grad_norm": 1.100066065788269,
"learning_rate": 2.6846729580214776e-06,
"loss": 0.0015,
"step": 10650
},
{
"epoch": 6.093394077448747,
"grad_norm": 1.96909761428833,
"learning_rate": 2.603319232020827e-06,
"loss": 0.0021,
"step": 10700
},
{
"epoch": 6.121867881548975,
"grad_norm": 0.00583766121417284,
"learning_rate": 2.5219655060201757e-06,
"loss": 0.0127,
"step": 10750
},
{
"epoch": 6.150341685649202,
"grad_norm": 0.026862677186727524,
"learning_rate": 2.440611780019525e-06,
"loss": 0.004,
"step": 10800
},
{
"epoch": 6.1788154897494305,
"grad_norm": 0.010042566806077957,
"learning_rate": 2.359258054018874e-06,
"loss": 0.0033,
"step": 10850
},
{
"epoch": 6.207289293849659,
"grad_norm": 0.8956929445266724,
"learning_rate": 2.2779043280182233e-06,
"loss": 0.0031,
"step": 10900
},
{
"epoch": 6.235763097949886,
"grad_norm": 0.009118441492319107,
"learning_rate": 2.1965506020175726e-06,
"loss": 0.0039,
"step": 10950
},
{
"epoch": 6.264236902050114,
"grad_norm": 0.22793921828269958,
"learning_rate": 2.115196876016922e-06,
"loss": 0.0013,
"step": 11000
},
{
"epoch": 6.292710706150341,
"grad_norm": 0.015608682297170162,
"learning_rate": 2.033843150016271e-06,
"loss": 0.0021,
"step": 11050
},
{
"epoch": 6.3211845102505695,
"grad_norm": 0.004031027667224407,
"learning_rate": 1.95248942401562e-06,
"loss": 0.0011,
"step": 11100
},
{
"epoch": 6.349658314350798,
"grad_norm": 0.008949857205152512,
"learning_rate": 1.8711356980149693e-06,
"loss": 0.0049,
"step": 11150
},
{
"epoch": 6.378132118451025,
"grad_norm": 0.018670039251446724,
"learning_rate": 1.7897819720143183e-06,
"loss": 0.003,
"step": 11200
},
{
"epoch": 6.406605922551253,
"grad_norm": 0.032393742352724075,
"learning_rate": 1.7084282460136675e-06,
"loss": 0.0051,
"step": 11250
},
{
"epoch": 6.43507972665148,
"grad_norm": 0.11851054430007935,
"learning_rate": 1.6270745200130167e-06,
"loss": 0.0046,
"step": 11300
},
{
"epoch": 6.4635535307517085,
"grad_norm": 0.09247086197137833,
"learning_rate": 1.5457207940123657e-06,
"loss": 0.0049,
"step": 11350
},
{
"epoch": 6.492027334851937,
"grad_norm": 0.002555207349359989,
"learning_rate": 1.464367068011715e-06,
"loss": 0.0026,
"step": 11400
},
{
"epoch": 6.520501138952164,
"grad_norm": 3.7294840812683105,
"learning_rate": 1.3830133420110642e-06,
"loss": 0.0014,
"step": 11450
},
{
"epoch": 6.548974943052392,
"grad_norm": 0.00703430688008666,
"learning_rate": 1.3016596160104134e-06,
"loss": 0.0035,
"step": 11500
},
{
"epoch": 6.577448747152619,
"grad_norm": 12.061240196228027,
"learning_rate": 1.2203058900097624e-06,
"loss": 0.0078,
"step": 11550
},
{
"epoch": 6.605922551252847,
"grad_norm": 0.011964640580117702,
"learning_rate": 1.1389521640091117e-06,
"loss": 0.0024,
"step": 11600
},
{
"epoch": 6.634396355353076,
"grad_norm": 0.014603933319449425,
"learning_rate": 1.057598438008461e-06,
"loss": 0.0072,
"step": 11650
},
{
"epoch": 6.662870159453303,
"grad_norm": 0.026642296463251114,
"learning_rate": 9.7624471200781e-07,
"loss": 0.0007,
"step": 11700
},
{
"epoch": 6.691343963553531,
"grad_norm": 3.3646230697631836,
"learning_rate": 8.948909860071591e-07,
"loss": 0.0046,
"step": 11750
},
{
"epoch": 6.719817767653758,
"grad_norm": 0.027053840458393097,
"learning_rate": 8.135372600065084e-07,
"loss": 0.0031,
"step": 11800
},
{
"epoch": 6.748291571753986,
"grad_norm": 2.6146697998046875,
"learning_rate": 7.321835340058575e-07,
"loss": 0.003,
"step": 11850
},
{
"epoch": 6.776765375854215,
"grad_norm": 8.964512825012207,
"learning_rate": 6.508298080052067e-07,
"loss": 0.0069,
"step": 11900
},
{
"epoch": 6.805239179954442,
"grad_norm": 0.3086203932762146,
"learning_rate": 5.694760820045558e-07,
"loss": 0.0035,
"step": 11950
},
{
"epoch": 6.83371298405467,
"grad_norm": 0.018831729888916016,
"learning_rate": 4.88122356003905e-07,
"loss": 0.0033,
"step": 12000
},
{
"epoch": 6.862186788154897,
"grad_norm": 0.0018483272287994623,
"learning_rate": 4.067686300032542e-07,
"loss": 0.0022,
"step": 12050
},
{
"epoch": 6.890660592255125,
"grad_norm": 0.003242627950385213,
"learning_rate": 3.2541490400260336e-07,
"loss": 0.0043,
"step": 12100
},
{
"epoch": 6.9191343963553535,
"grad_norm": 0.002707740291953087,
"learning_rate": 2.440611780019525e-07,
"loss": 0.0017,
"step": 12150
},
{
"epoch": 6.947608200455581,
"grad_norm": 0.21824024617671967,
"learning_rate": 1.6270745200130168e-07,
"loss": 0.0069,
"step": 12200
},
{
"epoch": 6.976082004555809,
"grad_norm": 0.008130647242069244,
"learning_rate": 8.135372600065084e-08,
"loss": 0.0047,
"step": 12250
},
{
"epoch": 7.0,
"eval_f1": 0.9563834240267894,
"eval_loss": 0.052909377962350845,
"eval_precision": 0.9515242378810594,
"eval_recall": 0.9612924941097274,
"eval_runtime": 3.7426,
"eval_samples_per_second": 868.392,
"eval_steps_per_second": 108.749,
"step": 12292
}
],
"logging_steps": 50,
"max_steps": 12292,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2418024281348142.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}