atsuki-yamaguchi's picture
Upload folder using huggingface_hub
55e6e7e verified
raw
history blame
17.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.10739294134205989,
"eval_steps": 500,
"global_step": 3052,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010908195221506739,
"grad_norm": 27.24474334716797,
"learning_rate": 1.0157273918741808e-06,
"loss": 9.0396,
"step": 31
},
{
"epoch": 0.0021816390443013477,
"grad_norm": 16.773351669311523,
"learning_rate": 2.0314547837483616e-06,
"loss": 7.9471,
"step": 62
},
{
"epoch": 0.0032724585664520214,
"grad_norm": 15.084428787231445,
"learning_rate": 3.0471821756225426e-06,
"loss": 6.5868,
"step": 93
},
{
"epoch": 0.0043632780886026954,
"grad_norm": 6.7851786613464355,
"learning_rate": 4.062909567496723e-06,
"loss": 5.3744,
"step": 124
},
{
"epoch": 0.0054540976107533695,
"grad_norm": 7.081949234008789,
"learning_rate": 5.078636959370905e-06,
"loss": 4.6378,
"step": 155
},
{
"epoch": 0.006544917132904043,
"grad_norm": 8.945405960083008,
"learning_rate": 6.094364351245085e-06,
"loss": 4.2292,
"step": 186
},
{
"epoch": 0.007635736655054717,
"grad_norm": 15.078816413879395,
"learning_rate": 7.110091743119267e-06,
"loss": 3.9767,
"step": 217
},
{
"epoch": 0.008726556177205391,
"grad_norm": 8.799291610717773,
"learning_rate": 8.125819134993446e-06,
"loss": 3.765,
"step": 248
},
{
"epoch": 0.009817375699356064,
"grad_norm": 9.741459846496582,
"learning_rate": 9.141546526867629e-06,
"loss": 3.6496,
"step": 279
},
{
"epoch": 0.010908195221506739,
"grad_norm": 9.403132438659668,
"learning_rate": 1.015727391874181e-05,
"loss": 3.4936,
"step": 310
},
{
"epoch": 0.011999014743657412,
"grad_norm": 8.479109764099121,
"learning_rate": 1.117300131061599e-05,
"loss": 3.3879,
"step": 341
},
{
"epoch": 0.013089834265808085,
"grad_norm": 4.988598823547363,
"learning_rate": 1.218872870249017e-05,
"loss": 3.3147,
"step": 372
},
{
"epoch": 0.01418065378795876,
"grad_norm": 5.518200874328613,
"learning_rate": 1.3204456094364351e-05,
"loss": 3.2022,
"step": 403
},
{
"epoch": 0.015271473310109434,
"grad_norm": 5.685606002807617,
"learning_rate": 1.4220183486238533e-05,
"loss": 3.1393,
"step": 434
},
{
"epoch": 0.01636229283226011,
"grad_norm": 5.6373491287231445,
"learning_rate": 1.5235910878112714e-05,
"loss": 3.05,
"step": 465
},
{
"epoch": 0.017453112354410782,
"grad_norm": 5.174935340881348,
"learning_rate": 1.6251638269986893e-05,
"loss": 2.992,
"step": 496
},
{
"epoch": 0.018543931876561455,
"grad_norm": 5.266006946563721,
"learning_rate": 1.7267365661861077e-05,
"loss": 2.9558,
"step": 527
},
{
"epoch": 0.019634751398712128,
"grad_norm": 4.111412048339844,
"learning_rate": 1.8283093053735257e-05,
"loss": 2.9089,
"step": 558
},
{
"epoch": 0.0207255709208628,
"grad_norm": 4.149659156799316,
"learning_rate": 1.9298820445609438e-05,
"loss": 2.879,
"step": 589
},
{
"epoch": 0.021816390443013478,
"grad_norm": 4.711772441864014,
"learning_rate": 2.031454783748362e-05,
"loss": 2.8057,
"step": 620
},
{
"epoch": 0.02290720996516415,
"grad_norm": 4.372698783874512,
"learning_rate": 2.13302752293578e-05,
"loss": 2.7954,
"step": 651
},
{
"epoch": 0.023998029487314824,
"grad_norm": 3.9052813053131104,
"learning_rate": 2.234600262123198e-05,
"loss": 2.7286,
"step": 682
},
{
"epoch": 0.025088849009465498,
"grad_norm": 3.544926404953003,
"learning_rate": 2.336173001310616e-05,
"loss": 2.6875,
"step": 713
},
{
"epoch": 0.02617966853161617,
"grad_norm": 4.380484104156494,
"learning_rate": 2.437745740498034e-05,
"loss": 2.6703,
"step": 744
},
{
"epoch": 0.027270488053766848,
"grad_norm": 4.843706130981445,
"learning_rate": 2.5393184796854525e-05,
"loss": 2.6387,
"step": 775
},
{
"epoch": 0.02836130757591752,
"grad_norm": 3.555110454559326,
"learning_rate": 2.6408912188728702e-05,
"loss": 2.5996,
"step": 806
},
{
"epoch": 0.029452127098068194,
"grad_norm": 3.3113982677459717,
"learning_rate": 2.7424639580602886e-05,
"loss": 2.5823,
"step": 837
},
{
"epoch": 0.030542946620218867,
"grad_norm": 3.4552953243255615,
"learning_rate": 2.8440366972477066e-05,
"loss": 2.5643,
"step": 868
},
{
"epoch": 0.03163376614236954,
"grad_norm": 2.9786770343780518,
"learning_rate": 2.9456094364351244e-05,
"loss": 2.5426,
"step": 899
},
{
"epoch": 0.03272458566452022,
"grad_norm": 3.227999210357666,
"learning_rate": 3.0471821756225428e-05,
"loss": 2.4834,
"step": 930
},
{
"epoch": 0.03381540518667089,
"grad_norm": 3.0225250720977783,
"learning_rate": 3.148754914809961e-05,
"loss": 2.4755,
"step": 961
},
{
"epoch": 0.034906224708821564,
"grad_norm": 2.6292648315429688,
"learning_rate": 3.2503276539973785e-05,
"loss": 2.4393,
"step": 992
},
{
"epoch": 0.03599704423097224,
"grad_norm": 2.767667055130005,
"learning_rate": 3.351900393184797e-05,
"loss": 2.4124,
"step": 1023
},
{
"epoch": 0.03708786375312291,
"grad_norm": 2.6588900089263916,
"learning_rate": 3.453473132372215e-05,
"loss": 2.3551,
"step": 1054
},
{
"epoch": 0.03817868327527359,
"grad_norm": 2.7497403621673584,
"learning_rate": 3.555045871559633e-05,
"loss": 2.3576,
"step": 1085
},
{
"epoch": 0.039269502797424256,
"grad_norm": 2.5148332118988037,
"learning_rate": 3.6566186107470514e-05,
"loss": 2.3078,
"step": 1116
},
{
"epoch": 0.04036032231957493,
"grad_norm": 3.0025594234466553,
"learning_rate": 3.7581913499344695e-05,
"loss": 2.303,
"step": 1147
},
{
"epoch": 0.0414511418417256,
"grad_norm": 2.6871118545532227,
"learning_rate": 3.8597640891218876e-05,
"loss": 2.3062,
"step": 1178
},
{
"epoch": 0.04254196136387628,
"grad_norm": 10.630929946899414,
"learning_rate": 3.9613368283093056e-05,
"loss": 2.307,
"step": 1209
},
{
"epoch": 0.043632780886026956,
"grad_norm": 2.571220874786377,
"learning_rate": 4.062909567496724e-05,
"loss": 2.2837,
"step": 1240
},
{
"epoch": 0.044723600408177626,
"grad_norm": 3.5510289669036865,
"learning_rate": 4.164482306684142e-05,
"loss": 2.2797,
"step": 1271
},
{
"epoch": 0.0458144199303283,
"grad_norm": 2.794853925704956,
"learning_rate": 4.26605504587156e-05,
"loss": 2.2797,
"step": 1302
},
{
"epoch": 0.04690523945247897,
"grad_norm": 6.132640838623047,
"learning_rate": 4.367627785058978e-05,
"loss": 2.2504,
"step": 1333
},
{
"epoch": 0.04799605897462965,
"grad_norm": 6.17495584487915,
"learning_rate": 4.469200524246396e-05,
"loss": 2.2414,
"step": 1364
},
{
"epoch": 0.049086878496780326,
"grad_norm": 2.3518564701080322,
"learning_rate": 4.570773263433814e-05,
"loss": 2.2198,
"step": 1395
},
{
"epoch": 0.050177698018930995,
"grad_norm": 2.2073466777801514,
"learning_rate": 4.672346002621232e-05,
"loss": 2.2057,
"step": 1426
},
{
"epoch": 0.05126851754108167,
"grad_norm": 2.018749237060547,
"learning_rate": 4.77391874180865e-05,
"loss": 2.1965,
"step": 1457
},
{
"epoch": 0.05235933706323234,
"grad_norm": 2.7125244140625,
"learning_rate": 4.875491480996068e-05,
"loss": 2.19,
"step": 1488
},
{
"epoch": 0.05345015658538302,
"grad_norm": 1.9725641012191772,
"learning_rate": 4.977064220183487e-05,
"loss": 2.1669,
"step": 1519
},
{
"epoch": 0.054540976107533695,
"grad_norm": 2.0198206901550293,
"learning_rate": 4.9999915451558777e-05,
"loss": 2.1486,
"step": 1550
},
{
"epoch": 0.055631795629684365,
"grad_norm": 2.068424940109253,
"learning_rate": 4.999955597496219e-05,
"loss": 2.1476,
"step": 1581
},
{
"epoch": 0.05672261515183504,
"grad_norm": 1.8509161472320557,
"learning_rate": 4.9998914381774255e-05,
"loss": 2.1129,
"step": 1612
},
{
"epoch": 0.05781343467398571,
"grad_norm": 1.805915355682373,
"learning_rate": 4.999799067923527e-05,
"loss": 2.1014,
"step": 1643
},
{
"epoch": 0.05890425419613639,
"grad_norm": 1.9447969198226929,
"learning_rate": 4.999678487776908e-05,
"loss": 2.1067,
"step": 1674
},
{
"epoch": 0.059995073718287065,
"grad_norm": 2.131700277328491,
"learning_rate": 4.9995296990983006e-05,
"loss": 2.0817,
"step": 1705
},
{
"epoch": 0.061085893240437734,
"grad_norm": 1.9113484621047974,
"learning_rate": 4.999352703566763e-05,
"loss": 2.067,
"step": 1736
},
{
"epoch": 0.06217671276258841,
"grad_norm": 1.8802495002746582,
"learning_rate": 4.999147503179668e-05,
"loss": 2.0617,
"step": 1767
},
{
"epoch": 0.06326753228473908,
"grad_norm": 1.6704238653182983,
"learning_rate": 4.998914100252672e-05,
"loss": 2.0671,
"step": 1798
},
{
"epoch": 0.06435835180688976,
"grad_norm": 4.303380966186523,
"learning_rate": 4.998652497419696e-05,
"loss": 2.0436,
"step": 1829
},
{
"epoch": 0.06544917132904043,
"grad_norm": 1.8054529428482056,
"learning_rate": 4.9983626976328927e-05,
"loss": 2.0499,
"step": 1860
},
{
"epoch": 0.06653999085119111,
"grad_norm": 1.663872241973877,
"learning_rate": 4.998044704162613e-05,
"loss": 2.0227,
"step": 1891
},
{
"epoch": 0.06763081037334177,
"grad_norm": 10.130581855773926,
"learning_rate": 4.9976985205973705e-05,
"loss": 2.0022,
"step": 1922
},
{
"epoch": 0.06872162989549245,
"grad_norm": 3.7727627754211426,
"learning_rate": 4.997324150843799e-05,
"loss": 2.0172,
"step": 1953
},
{
"epoch": 0.06981244941764313,
"grad_norm": 1.8669508695602417,
"learning_rate": 4.99692159912661e-05,
"loss": 2.0214,
"step": 1984
},
{
"epoch": 0.0709032689397938,
"grad_norm": 17.45224952697754,
"learning_rate": 4.996490869988546e-05,
"loss": 1.9959,
"step": 2015
},
{
"epoch": 0.07199408846194448,
"grad_norm": 1.7007793188095093,
"learning_rate": 4.996031968290326e-05,
"loss": 2.0139,
"step": 2046
},
{
"epoch": 0.07308490798409514,
"grad_norm": 1.6912076473236084,
"learning_rate": 4.995544899210594e-05,
"loss": 1.9963,
"step": 2077
},
{
"epoch": 0.07417572750624582,
"grad_norm": 1.6827666759490967,
"learning_rate": 4.9950296682458583e-05,
"loss": 1.973,
"step": 2108
},
{
"epoch": 0.0752665470283965,
"grad_norm": 1.767269253730774,
"learning_rate": 4.994486281210429e-05,
"loss": 1.9949,
"step": 2139
},
{
"epoch": 0.07635736655054717,
"grad_norm": 7.507472038269043,
"learning_rate": 4.9939147442363566e-05,
"loss": 1.9837,
"step": 2170
},
{
"epoch": 0.07744818607269784,
"grad_norm": 1.6840234994888306,
"learning_rate": 4.9933150637733574e-05,
"loss": 1.9789,
"step": 2201
},
{
"epoch": 0.07853900559484851,
"grad_norm": 1.6442159414291382,
"learning_rate": 4.992687246588743e-05,
"loss": 1.9436,
"step": 2232
},
{
"epoch": 0.07962982511699919,
"grad_norm": 1.546859622001648,
"learning_rate": 4.992031299767347e-05,
"loss": 1.9341,
"step": 2263
},
{
"epoch": 0.08072064463914987,
"grad_norm": 1.4932371377944946,
"learning_rate": 4.9913472307114386e-05,
"loss": 1.9583,
"step": 2294
},
{
"epoch": 0.08181146416130054,
"grad_norm": 1.4531840085983276,
"learning_rate": 4.9906350471406446e-05,
"loss": 1.9414,
"step": 2325
},
{
"epoch": 0.0829022836834512,
"grad_norm": 1.5285487174987793,
"learning_rate": 4.989894757091861e-05,
"loss": 1.9502,
"step": 2356
},
{
"epoch": 0.08399310320560188,
"grad_norm": 1.4159945249557495,
"learning_rate": 4.989126368919158e-05,
"loss": 1.9299,
"step": 2387
},
{
"epoch": 0.08508392272775256,
"grad_norm": 1.4361426830291748,
"learning_rate": 4.988329891293693e-05,
"loss": 1.9266,
"step": 2418
},
{
"epoch": 0.08617474224990324,
"grad_norm": 1.4310742616653442,
"learning_rate": 4.987505333203608e-05,
"loss": 1.9116,
"step": 2449
},
{
"epoch": 0.08726556177205391,
"grad_norm": 1.3780311346054077,
"learning_rate": 4.9866527039539276e-05,
"loss": 1.9189,
"step": 2480
},
{
"epoch": 0.08835638129420458,
"grad_norm": 1.3917787075042725,
"learning_rate": 4.9857720131664594e-05,
"loss": 1.9107,
"step": 2511
},
{
"epoch": 0.08944720081635525,
"grad_norm": 1.3963998556137085,
"learning_rate": 4.9848632707796773e-05,
"loss": 1.899,
"step": 2542
},
{
"epoch": 0.09053802033850593,
"grad_norm": 1.3573299646377563,
"learning_rate": 4.9839264870486155e-05,
"loss": 1.9084,
"step": 2573
},
{
"epoch": 0.0916288398606566,
"grad_norm": 1.362552523612976,
"learning_rate": 4.9829616725447526e-05,
"loss": 1.8954,
"step": 2604
},
{
"epoch": 0.09271965938280728,
"grad_norm": 1.3538340330123901,
"learning_rate": 4.981968838155888e-05,
"loss": 1.9035,
"step": 2635
},
{
"epoch": 0.09381047890495794,
"grad_norm": 1.407291054725647,
"learning_rate": 4.980947995086024e-05,
"loss": 1.8778,
"step": 2666
},
{
"epoch": 0.09490129842710862,
"grad_norm": 1.3554202318191528,
"learning_rate": 4.979899154855234e-05,
"loss": 1.8783,
"step": 2697
},
{
"epoch": 0.0959921179492593,
"grad_norm": 1.3513182401657104,
"learning_rate": 4.9788223292995386e-05,
"loss": 1.8857,
"step": 2728
},
{
"epoch": 0.09708293747140997,
"grad_norm": 1.345706582069397,
"learning_rate": 4.977717530570768e-05,
"loss": 1.8882,
"step": 2759
},
{
"epoch": 0.09817375699356065,
"grad_norm": 1.3450254201889038,
"learning_rate": 4.976584771136425e-05,
"loss": 1.8746,
"step": 2790
},
{
"epoch": 0.09926457651571131,
"grad_norm": 1.7820961475372314,
"learning_rate": 4.975424063779547e-05,
"loss": 1.8557,
"step": 2821
},
{
"epoch": 0.10035539603786199,
"grad_norm": 1.3493785858154297,
"learning_rate": 4.974235421598557e-05,
"loss": 1.8683,
"step": 2852
},
{
"epoch": 0.10144621556001267,
"grad_norm": 1.3066306114196777,
"learning_rate": 4.973018858007122e-05,
"loss": 1.8777,
"step": 2883
},
{
"epoch": 0.10253703508216334,
"grad_norm": 1.2991000413894653,
"learning_rate": 4.9717743867339963e-05,
"loss": 1.8605,
"step": 2914
},
{
"epoch": 0.10362785460431402,
"grad_norm": 1.3748496770858765,
"learning_rate": 4.9705020218228695e-05,
"loss": 1.8683,
"step": 2945
},
{
"epoch": 0.10471867412646468,
"grad_norm": 1.3229252099990845,
"learning_rate": 4.969201777632205e-05,
"loss": 1.858,
"step": 2976
},
{
"epoch": 0.10580949364861536,
"grad_norm": 1.2874318361282349,
"learning_rate": 4.9678736688350846e-05,
"loss": 1.8435,
"step": 3007
},
{
"epoch": 0.10690031317076604,
"grad_norm": 1.3341971635818481,
"learning_rate": 4.966517710419033e-05,
"loss": 1.8663,
"step": 3038
}
],
"logging_steps": 31,
"max_steps": 30517,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 3052,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.131685419530584e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}