aleegis's picture
Training in progress, epoch 0, checkpoint
7a8c4a6 verified
raw
history blame
38 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.03932698421021584,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00018352592631434058,
"grad_norm": 6.3117218017578125,
"learning_rate": 7.000000000000001e-06,
"loss": 1.3791,
"step": 7
},
{
"epoch": 0.00036705185262868117,
"grad_norm": 2.600785255432129,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.2643,
"step": 14
},
{
"epoch": 0.0005505777789430217,
"grad_norm": 0.2835376560688019,
"learning_rate": 2.1e-05,
"loss": 0.0565,
"step": 21
},
{
"epoch": 0.0007341037052573623,
"grad_norm": 0.10635749995708466,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.0706,
"step": 28
},
{
"epoch": 0.0009176296315717029,
"grad_norm": 1.4374091625213623,
"learning_rate": 3.5e-05,
"loss": 0.0657,
"step": 35
},
{
"epoch": 0.0011011555578860435,
"grad_norm": 2.8197357654571533,
"learning_rate": 4.2e-05,
"loss": 0.0532,
"step": 42
},
{
"epoch": 0.0012846814842003842,
"grad_norm": 3.916447877883911,
"learning_rate": 4.9e-05,
"loss": 0.0308,
"step": 49
},
{
"epoch": 0.0014682074105147247,
"grad_norm": 6.559009552001953,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.0369,
"step": 56
},
{
"epoch": 0.0016517333368290654,
"grad_norm": 1.6039685010910034,
"learning_rate": 6.3e-05,
"loss": 0.0236,
"step": 63
},
{
"epoch": 0.0018352592631434058,
"grad_norm": 10.057470321655273,
"learning_rate": 7e-05,
"loss": 0.064,
"step": 70
},
{
"epoch": 0.0020187851894577463,
"grad_norm": 26.96637535095215,
"learning_rate": 7.7e-05,
"loss": 0.0906,
"step": 77
},
{
"epoch": 0.002202311115772087,
"grad_norm": 1.050362467765808,
"learning_rate": 8.4e-05,
"loss": 0.0707,
"step": 84
},
{
"epoch": 0.0023858370420864277,
"grad_norm": 4.492347240447998,
"learning_rate": 9.1e-05,
"loss": 0.0583,
"step": 91
},
{
"epoch": 0.0025693629684007684,
"grad_norm": 6.358117580413818,
"learning_rate": 9.8e-05,
"loss": 0.067,
"step": 98
},
{
"epoch": 0.0027528888947151086,
"grad_norm": 5.182433605194092,
"learning_rate": 9.999685283773504e-05,
"loss": 0.2084,
"step": 105
},
{
"epoch": 0.0029364148210294493,
"grad_norm": 5.367171764373779,
"learning_rate": 9.998187325055106e-05,
"loss": 0.1108,
"step": 112
},
{
"epoch": 0.00311994074734379,
"grad_norm": 1.7939116954803467,
"learning_rate": 9.995456138403733e-05,
"loss": 0.0369,
"step": 119
},
{
"epoch": 0.0033034666736581307,
"grad_norm": 0.6074697375297546,
"learning_rate": 9.991492397698826e-05,
"loss": 0.064,
"step": 126
},
{
"epoch": 0.003486992599972471,
"grad_norm": 2.814542770385742,
"learning_rate": 9.986297080934089e-05,
"loss": 0.1008,
"step": 133
},
{
"epoch": 0.0036705185262868117,
"grad_norm": 5.787035942077637,
"learning_rate": 9.979871469976196e-05,
"loss": 0.0465,
"step": 140
},
{
"epoch": 0.0038540444526011523,
"grad_norm": 7.020626068115234,
"learning_rate": 9.972217150248503e-05,
"loss": 0.1523,
"step": 147
},
{
"epoch": 0.004037570378915493,
"grad_norm": 27.655929565429688,
"learning_rate": 9.963336010339868e-05,
"loss": 0.0449,
"step": 154
},
{
"epoch": 0.004221096305229833,
"grad_norm": 43.63124465942383,
"learning_rate": 9.953230241538674e-05,
"loss": 0.0855,
"step": 161
},
{
"epoch": 0.004404622231544174,
"grad_norm": 7.2519211769104,
"learning_rate": 9.941902337292155e-05,
"loss": 0.0665,
"step": 168
},
{
"epoch": 0.004588148157858515,
"grad_norm": 4.373692512512207,
"learning_rate": 9.92935509259118e-05,
"loss": 0.1223,
"step": 175
},
{
"epoch": 0.004771674084172855,
"grad_norm": 3.619549512863159,
"learning_rate": 9.915591603280631e-05,
"loss": 0.0416,
"step": 182
},
{
"epoch": 0.004955200010487196,
"grad_norm": 2.198378324508667,
"learning_rate": 9.900615265295552e-05,
"loss": 0.1072,
"step": 189
},
{
"epoch": 0.005138725936801537,
"grad_norm": 8.324142456054688,
"learning_rate": 9.884429773823239e-05,
"loss": 0.0479,
"step": 196
},
{
"epoch": 0.005322251863115877,
"grad_norm": 8.378353118896484,
"learning_rate": 9.867039122391527e-05,
"loss": 0.0778,
"step": 203
},
{
"epoch": 0.005505777789430217,
"grad_norm": 1.1520366668701172,
"learning_rate": 9.848447601883435e-05,
"loss": 0.0138,
"step": 210
},
{
"epoch": 0.005689303715744558,
"grad_norm": 11.729530334472656,
"learning_rate": 9.828659799478456e-05,
"loss": 0.0723,
"step": 217
},
{
"epoch": 0.005872829642058899,
"grad_norm": 42.59210205078125,
"learning_rate": 9.807680597520746e-05,
"loss": 0.2053,
"step": 224
},
{
"epoch": 0.006056355568373239,
"grad_norm": 5.65015172958374,
"learning_rate": 9.785515172314463e-05,
"loss": 0.0846,
"step": 231
},
{
"epoch": 0.00623988149468758,
"grad_norm": 5.274237155914307,
"learning_rate": 9.762168992846614e-05,
"loss": 0.0548,
"step": 238
},
{
"epoch": 0.006423407421001921,
"grad_norm": 4.7061333656311035,
"learning_rate": 9.737647819437645e-05,
"loss": 0.021,
"step": 245
},
{
"epoch": 0.006606933347316261,
"grad_norm": 0.0789986327290535,
"learning_rate": 9.711957702320175e-05,
"loss": 0.0621,
"step": 252
},
{
"epoch": 0.006790459273630601,
"grad_norm": 26.16635513305664,
"learning_rate": 9.685104980146193e-05,
"loss": 0.07,
"step": 259
},
{
"epoch": 0.006973985199944942,
"grad_norm": 9.659036636352539,
"learning_rate": 9.657096278423093e-05,
"loss": 0.0952,
"step": 266
},
{
"epoch": 0.007157511126259283,
"grad_norm": 8.012511253356934,
"learning_rate": 9.627938507878917e-05,
"loss": 0.0412,
"step": 273
},
{
"epoch": 0.007341037052573623,
"grad_norm": 1.9756983518600464,
"learning_rate": 9.597638862757255e-05,
"loss": 0.0387,
"step": 280
},
{
"epoch": 0.007524562978887964,
"grad_norm": 13.931029319763184,
"learning_rate": 9.566204819042152e-05,
"loss": 0.2204,
"step": 287
},
{
"epoch": 0.007708088905202305,
"grad_norm": 9.124067306518555,
"learning_rate": 9.533644132613541e-05,
"loss": 0.0479,
"step": 294
},
{
"epoch": 0.007891614831516645,
"grad_norm": 8.203433990478516,
"learning_rate": 9.49996483733358e-05,
"loss": 0.038,
"step": 301
},
{
"epoch": 0.008075140757830985,
"grad_norm": 14.49701976776123,
"learning_rate": 9.465175243064428e-05,
"loss": 0.1173,
"step": 308
},
{
"epoch": 0.008258666684145327,
"grad_norm": 12.1497163772583,
"learning_rate": 9.4292839336179e-05,
"loss": 0.5881,
"step": 315
},
{
"epoch": 0.008442192610459667,
"grad_norm": 1.9857338666915894,
"learning_rate": 9.39229976463755e-05,
"loss": 0.0714,
"step": 322
},
{
"epoch": 0.008625718536774008,
"grad_norm": 6.0610151290893555,
"learning_rate": 9.354231861413668e-05,
"loss": 0.0567,
"step": 329
},
{
"epoch": 0.008809244463088348,
"grad_norm": 9.057708740234375,
"learning_rate": 9.315089616631752e-05,
"loss": 0.073,
"step": 336
},
{
"epoch": 0.008992770389402688,
"grad_norm": 3.3271257877349854,
"learning_rate": 9.274882688055005e-05,
"loss": 0.034,
"step": 343
},
{
"epoch": 0.00917629631571703,
"grad_norm": 3.928396701812744,
"learning_rate": 9.233620996141421e-05,
"loss": 0.0393,
"step": 350
},
{
"epoch": 0.00935982224203137,
"grad_norm": 15.419633865356445,
"learning_rate": 9.191314721596072e-05,
"loss": 0.1113,
"step": 357
},
{
"epoch": 0.00954334816834571,
"grad_norm": 13.88370418548584,
"learning_rate": 9.147974302859157e-05,
"loss": 0.1967,
"step": 364
},
{
"epoch": 0.00972687409466005,
"grad_norm": 52.43412780761719,
"learning_rate": 9.103610433530483e-05,
"loss": 0.412,
"step": 371
},
{
"epoch": 0.009910400020974392,
"grad_norm": 17.925010681152344,
"learning_rate": 9.058234059730976e-05,
"loss": 0.0833,
"step": 378
},
{
"epoch": 0.010093925947288732,
"grad_norm": 11.497563362121582,
"learning_rate": 9.01185637740189e-05,
"loss": 0.1545,
"step": 385
},
{
"epoch": 0.010277451873603074,
"grad_norm": 1.4290711879730225,
"learning_rate": 8.964488829542377e-05,
"loss": 0.0297,
"step": 392
},
{
"epoch": 0.010460977799917413,
"grad_norm": 0.6557618975639343,
"learning_rate": 8.916143103386093e-05,
"loss": 0.0612,
"step": 399
},
{
"epoch": 0.010644503726231753,
"grad_norm": 15.241708755493164,
"learning_rate": 8.866831127517557e-05,
"loss": 0.0615,
"step": 406
},
{
"epoch": 0.010828029652546095,
"grad_norm": 0.8321524858474731,
"learning_rate": 8.81656506892894e-05,
"loss": 0.0363,
"step": 413
},
{
"epoch": 0.011011555578860435,
"grad_norm": 7.561135292053223,
"learning_rate": 8.765357330018056e-05,
"loss": 0.0542,
"step": 420
},
{
"epoch": 0.011195081505174776,
"grad_norm": 0.6955423355102539,
"learning_rate": 8.71322054552824e-05,
"loss": 0.0222,
"step": 427
},
{
"epoch": 0.011378607431489116,
"grad_norm": 3.8866684436798096,
"learning_rate": 8.660167579430927e-05,
"loss": 0.0324,
"step": 434
},
{
"epoch": 0.011562133357803457,
"grad_norm": 13.728404998779297,
"learning_rate": 8.606211521751652e-05,
"loss": 0.0835,
"step": 441
},
{
"epoch": 0.011745659284117797,
"grad_norm": 18.460779190063477,
"learning_rate": 8.551365685340285e-05,
"loss": 0.0668,
"step": 448
},
{
"epoch": 0.011929185210432139,
"grad_norm": 11.145697593688965,
"learning_rate": 8.495643602586287e-05,
"loss": 0.2785,
"step": 455
},
{
"epoch": 0.012112711136746479,
"grad_norm": 5.782137870788574,
"learning_rate": 8.439059022079789e-05,
"loss": 0.0965,
"step": 462
},
{
"epoch": 0.012296237063060819,
"grad_norm": 4.491423606872559,
"learning_rate": 8.381625905219339e-05,
"loss": 0.0161,
"step": 469
},
{
"epoch": 0.01247976298937516,
"grad_norm": 2.3955559730529785,
"learning_rate": 8.32335842276713e-05,
"loss": 0.0453,
"step": 476
},
{
"epoch": 0.0126632889156895,
"grad_norm": 5.535737991333008,
"learning_rate": 8.264270951352581e-05,
"loss": 0.0626,
"step": 483
},
{
"epoch": 0.012846814842003841,
"grad_norm": 16.16122817993164,
"learning_rate": 8.20437806992512e-05,
"loss": 0.0823,
"step": 490
},
{
"epoch": 0.013030340768318181,
"grad_norm": 7.036065101623535,
"learning_rate": 8.143694556157046e-05,
"loss": 0.0595,
"step": 497
},
{
"epoch": 0.013213866694632523,
"grad_norm": 12.481003761291504,
"learning_rate": 8.082235382797349e-05,
"loss": 0.046,
"step": 504
},
{
"epoch": 0.013397392620946863,
"grad_norm": 12.709206581115723,
"learning_rate": 8.020015713977427e-05,
"loss": 0.0443,
"step": 511
},
{
"epoch": 0.013580918547261202,
"grad_norm": 8.762887001037598,
"learning_rate": 7.957050901469545e-05,
"loss": 0.0859,
"step": 518
},
{
"epoch": 0.013764444473575544,
"grad_norm": 4.909962177276611,
"learning_rate": 7.89335648089903e-05,
"loss": 0.0304,
"step": 525
},
{
"epoch": 0.013947970399889884,
"grad_norm": 53.53133773803711,
"learning_rate": 7.828948167911074e-05,
"loss": 0.0384,
"step": 532
},
{
"epoch": 0.014131496326204225,
"grad_norm": 7.125665664672852,
"learning_rate": 7.763841854293145e-05,
"loss": 0.0371,
"step": 539
},
{
"epoch": 0.014315022252518565,
"grad_norm": 5.672843933105469,
"learning_rate": 7.698053604053922e-05,
"loss": 0.073,
"step": 546
},
{
"epoch": 0.014498548178832907,
"grad_norm": 4.972446918487549,
"learning_rate": 7.631599649459744e-05,
"loss": 0.0681,
"step": 553
},
{
"epoch": 0.014682074105147247,
"grad_norm": 40.443214416503906,
"learning_rate": 7.564496387029532e-05,
"loss": 0.0528,
"step": 560
},
{
"epoch": 0.014865600031461588,
"grad_norm": 5.652285099029541,
"learning_rate": 7.496760373489202e-05,
"loss": 0.0849,
"step": 567
},
{
"epoch": 0.015049125957775928,
"grad_norm": 4.100184917449951,
"learning_rate": 7.428408321686541e-05,
"loss": 0.0595,
"step": 574
},
{
"epoch": 0.015232651884090268,
"grad_norm": 5.924082279205322,
"learning_rate": 7.35945709646756e-05,
"loss": 0.0402,
"step": 581
},
{
"epoch": 0.01541617781040461,
"grad_norm": 3.973080635070801,
"learning_rate": 7.289923710515339e-05,
"loss": 0.0631,
"step": 588
},
{
"epoch": 0.01559970373671895,
"grad_norm": 6.043672561645508,
"learning_rate": 7.219825320152411e-05,
"loss": 0.0612,
"step": 595
},
{
"epoch": 0.01578322966303329,
"grad_norm": 23.230714797973633,
"learning_rate": 7.149179221107694e-05,
"loss": 0.0552,
"step": 602
},
{
"epoch": 0.015966755589347632,
"grad_norm": 11.104944229125977,
"learning_rate": 7.078002844249032e-05,
"loss": 0.0252,
"step": 609
},
{
"epoch": 0.01615028151566197,
"grad_norm": 31.511911392211914,
"learning_rate": 7.006313751282372e-05,
"loss": 0.0974,
"step": 616
},
{
"epoch": 0.016333807441976312,
"grad_norm": 1.653994083404541,
"learning_rate": 6.934129630418701e-05,
"loss": 0.034,
"step": 623
},
{
"epoch": 0.016517333368290654,
"grad_norm": 5.44677209854126,
"learning_rate": 6.861468292009727e-05,
"loss": 0.1175,
"step": 630
},
{
"epoch": 0.01670085929460499,
"grad_norm": 17.435009002685547,
"learning_rate": 6.788347664153447e-05,
"loss": 0.0717,
"step": 637
},
{
"epoch": 0.016884385220919333,
"grad_norm": 25.111541748046875,
"learning_rate": 6.714785788270658e-05,
"loss": 0.0931,
"step": 644
},
{
"epoch": 0.017067911147233675,
"grad_norm": 16.484365463256836,
"learning_rate": 6.640800814653503e-05,
"loss": 0.0241,
"step": 651
},
{
"epoch": 0.017251437073548016,
"grad_norm": 4.067661285400391,
"learning_rate": 6.566410997987163e-05,
"loss": 0.0516,
"step": 658
},
{
"epoch": 0.017434962999862354,
"grad_norm": 1.4263770580291748,
"learning_rate": 6.49163469284578e-05,
"loss": 0.0188,
"step": 665
},
{
"epoch": 0.017618488926176696,
"grad_norm": 0.4355856478214264,
"learning_rate": 6.416490349163748e-05,
"loss": 0.0704,
"step": 672
},
{
"epoch": 0.017802014852491038,
"grad_norm": 3.417959213256836,
"learning_rate": 6.340996507683458e-05,
"loss": 0.0902,
"step": 679
},
{
"epoch": 0.017985540778805376,
"grad_norm": 0.19045788049697876,
"learning_rate": 6.265171795380659e-05,
"loss": 0.0745,
"step": 686
},
{
"epoch": 0.018169066705119717,
"grad_norm": 52.73567199707031,
"learning_rate": 6.189034920868522e-05,
"loss": 0.0481,
"step": 693
},
{
"epoch": 0.01835259263143406,
"grad_norm": 10.482780456542969,
"learning_rate": 6.112604669781572e-05,
"loss": 0.0798,
"step": 700
},
{
"epoch": 0.0185361185577484,
"grad_norm": 0.6400662660598755,
"learning_rate": 6.0358999001406156e-05,
"loss": 0.0277,
"step": 707
},
{
"epoch": 0.01871964448406274,
"grad_norm": 16.048450469970703,
"learning_rate": 5.9589395376998e-05,
"loss": 0.0459,
"step": 714
},
{
"epoch": 0.01890317041037708,
"grad_norm": 7.567919731140137,
"learning_rate": 5.8817425712769794e-05,
"loss": 0.0304,
"step": 721
},
{
"epoch": 0.01908669633669142,
"grad_norm": 2.94071626663208,
"learning_rate": 5.804328048068492e-05,
"loss": 0.0598,
"step": 728
},
{
"epoch": 0.019270222263005763,
"grad_norm": 1.3117618560791016,
"learning_rate": 5.7267150689495644e-05,
"loss": 0.0166,
"step": 735
},
{
"epoch": 0.0194537481893201,
"grad_norm": 16.543682098388672,
"learning_rate": 5.648922783761443e-05,
"loss": 0.0087,
"step": 742
},
{
"epoch": 0.019637274115634443,
"grad_norm": 21.542856216430664,
"learning_rate": 5.570970386586469e-05,
"loss": 0.0414,
"step": 749
},
{
"epoch": 0.019820800041948784,
"grad_norm": 4.565967082977295,
"learning_rate": 5.492877111012218e-05,
"loss": 0.023,
"step": 756
},
{
"epoch": 0.020004325968263122,
"grad_norm": 17.27544593811035,
"learning_rate": 5.414662225385903e-05,
"loss": 0.0472,
"step": 763
},
{
"epoch": 0.020187851894577464,
"grad_norm": 7.022396087646484,
"learning_rate": 5.336345028060199e-05,
"loss": 0.0379,
"step": 770
},
{
"epoch": 0.020371377820891805,
"grad_norm": 10.918366432189941,
"learning_rate": 5.257944842631658e-05,
"loss": 0.0228,
"step": 777
},
{
"epoch": 0.020554903747206147,
"grad_norm": 10.15198802947998,
"learning_rate": 5.179481013172912e-05,
"loss": 0.0631,
"step": 784
},
{
"epoch": 0.020738429673520485,
"grad_norm": 1.512853741645813,
"learning_rate": 5.100972899459796e-05,
"loss": 0.0047,
"step": 791
},
{
"epoch": 0.020921955599834827,
"grad_norm": 5.792972564697266,
"learning_rate": 5.022439872194629e-05,
"loss": 0.024,
"step": 798
},
{
"epoch": 0.021105481526149168,
"grad_norm": 1.3428311347961426,
"learning_rate": 4.943901308226771e-05,
"loss": 0.0241,
"step": 805
},
{
"epoch": 0.021289007452463506,
"grad_norm": 0.24871955811977386,
"learning_rate": 4.865376585771687e-05,
"loss": 0.0408,
"step": 812
},
{
"epoch": 0.021472533378777848,
"grad_norm": 1.912901759147644,
"learning_rate": 4.7868850796296495e-05,
"loss": 0.0273,
"step": 819
},
{
"epoch": 0.02165605930509219,
"grad_norm": 2.7074570655822754,
"learning_rate": 4.708446156405307e-05,
"loss": 0.043,
"step": 826
},
{
"epoch": 0.02183958523140653,
"grad_norm": 2.9339606761932373,
"learning_rate": 4.630079169729257e-05,
"loss": 0.0389,
"step": 833
},
{
"epoch": 0.02202311115772087,
"grad_norm": 9.8698091506958,
"learning_rate": 4.551803455482833e-05,
"loss": 0.0166,
"step": 840
},
{
"epoch": 0.02220663708403521,
"grad_norm": 1.908019781112671,
"learning_rate": 4.473638327027259e-05,
"loss": 0.0115,
"step": 847
},
{
"epoch": 0.022390163010349552,
"grad_norm": 4.707207679748535,
"learning_rate": 4.395603070438373e-05,
"loss": 0.0185,
"step": 854
},
{
"epoch": 0.02257368893666389,
"grad_norm": 6.587421417236328,
"learning_rate": 4.31771693974807e-05,
"loss": 0.0469,
"step": 861
},
{
"epoch": 0.022757214862978232,
"grad_norm": 2.0711984634399414,
"learning_rate": 4.239999152193664e-05,
"loss": 0.0543,
"step": 868
},
{
"epoch": 0.022940740789292573,
"grad_norm": 275.3908996582031,
"learning_rate": 4.162468883476319e-05,
"loss": 0.1184,
"step": 875
},
{
"epoch": 0.023124266715606915,
"grad_norm": 0.8165872693061829,
"learning_rate": 4.085145263029726e-05,
"loss": 0.0206,
"step": 882
},
{
"epoch": 0.023307792641921253,
"grad_norm": 0.032993383705616,
"learning_rate": 4.008047369300218e-05,
"loss": 0.0115,
"step": 889
},
{
"epoch": 0.023491318568235595,
"grad_norm": 1.4026904106140137,
"learning_rate": 3.9311942250394276e-05,
"loss": 0.0053,
"step": 896
},
{
"epoch": 0.023674844494549936,
"grad_norm": 1.8072174787521362,
"learning_rate": 3.8546047926107256e-05,
"loss": 0.0033,
"step": 903
},
{
"epoch": 0.023858370420864278,
"grad_norm": 0.23341313004493713,
"learning_rate": 3.778297969310529e-05,
"loss": 0.0495,
"step": 910
},
{
"epoch": 0.024041896347178616,
"grad_norm": 0.19453687965869904,
"learning_rate": 3.7022925827056884e-05,
"loss": 0.0435,
"step": 917
},
{
"epoch": 0.024225422273492957,
"grad_norm": 12.686004638671875,
"learning_rate": 3.62660738598805e-05,
"loss": 0.0385,
"step": 924
},
{
"epoch": 0.0244089481998073,
"grad_norm": 0.022196639329195023,
"learning_rate": 3.551261053347404e-05,
"loss": 0.0305,
"step": 931
},
{
"epoch": 0.024592474126121637,
"grad_norm": 2.3748865127563477,
"learning_rate": 3.4762721753638995e-05,
"loss": 0.0169,
"step": 938
},
{
"epoch": 0.02477600005243598,
"grad_norm": 3.813014268875122,
"learning_rate": 3.401659254421094e-05,
"loss": 0.0054,
"step": 945
},
{
"epoch": 0.02495952597875032,
"grad_norm": 1.4644575119018555,
"learning_rate": 3.3274407001407735e-05,
"loss": 0.0152,
"step": 952
},
{
"epoch": 0.02514305190506466,
"grad_norm": 16.26466941833496,
"learning_rate": 3.2536348248406534e-05,
"loss": 0.0186,
"step": 959
},
{
"epoch": 0.025326577831379,
"grad_norm": 1.4943578243255615,
"learning_rate": 3.1802598390160784e-05,
"loss": 0.0614,
"step": 966
},
{
"epoch": 0.02551010375769334,
"grad_norm": 0.06556755304336548,
"learning_rate": 3.107333846846872e-05,
"loss": 0.0132,
"step": 973
},
{
"epoch": 0.025693629684007683,
"grad_norm": 0.04227546602487564,
"learning_rate": 3.0348748417303823e-05,
"loss": 0.0114,
"step": 980
},
{
"epoch": 0.02587715561032202,
"grad_norm": 7.547623157501221,
"learning_rate": 2.9629007018418985e-05,
"loss": 0.0064,
"step": 987
},
{
"epoch": 0.026060681536636363,
"grad_norm": 0.793245255947113,
"learning_rate": 2.8914291857234636e-05,
"loss": 0.0206,
"step": 994
},
{
"epoch": 0.026244207462950704,
"grad_norm": 0.18259727954864502,
"learning_rate": 2.8204779279022276e-05,
"loss": 0.0022,
"step": 1001
},
{
"epoch": 0.026427733389265046,
"grad_norm": 11.862234115600586,
"learning_rate": 2.7500644345393943e-05,
"loss": 0.0177,
"step": 1008
},
{
"epoch": 0.026611259315579384,
"grad_norm": 2.59283185005188,
"learning_rate": 2.68020607911083e-05,
"loss": 0.0278,
"step": 1015
},
{
"epoch": 0.026794785241893725,
"grad_norm": 0.04772788658738136,
"learning_rate": 2.610920098120424e-05,
"loss": 0.0181,
"step": 1022
},
{
"epoch": 0.026978311168208067,
"grad_norm": 0.016560537740588188,
"learning_rate": 2.5422235868472345e-05,
"loss": 0.0098,
"step": 1029
},
{
"epoch": 0.027161837094522405,
"grad_norm": 0.38349756598472595,
"learning_rate": 2.4741334951274947e-05,
"loss": 0.0251,
"step": 1036
},
{
"epoch": 0.027345363020836747,
"grad_norm": 0.31784552335739136,
"learning_rate": 2.40666662317248e-05,
"loss": 0.08,
"step": 1043
},
{
"epoch": 0.027528888947151088,
"grad_norm": 0.3511944115161896,
"learning_rate": 2.3398396174233178e-05,
"loss": 0.0017,
"step": 1050
},
{
"epoch": 0.02771241487346543,
"grad_norm": 0.11994984745979309,
"learning_rate": 2.2736689664437217e-05,
"loss": 0.0158,
"step": 1057
},
{
"epoch": 0.027895940799779768,
"grad_norm": 0.1353398561477661,
"learning_rate": 2.2081709968516866e-05,
"loss": 0.0318,
"step": 1064
},
{
"epoch": 0.02807946672609411,
"grad_norm": 0.10267901420593262,
"learning_rate": 2.1433618692911467e-05,
"loss": 0.03,
"step": 1071
},
{
"epoch": 0.02826299265240845,
"grad_norm": 0.787115216255188,
"learning_rate": 2.0792575744445653e-05,
"loss": 0.009,
"step": 1078
},
{
"epoch": 0.02844651857872279,
"grad_norm": 6.143826484680176,
"learning_rate": 2.015873929087482e-05,
"loss": 0.0432,
"step": 1085
},
{
"epoch": 0.02863004450503713,
"grad_norm": 0.05623776093125343,
"learning_rate": 1.95322657218596e-05,
"loss": 0.0229,
"step": 1092
},
{
"epoch": 0.028813570431351472,
"grad_norm": 1.2254972457885742,
"learning_rate": 1.8913309610379015e-05,
"loss": 0.0572,
"step": 1099
},
{
"epoch": 0.028997096357665814,
"grad_norm": 0.01922564022243023,
"learning_rate": 1.8302023674591935e-05,
"loss": 0.0145,
"step": 1106
},
{
"epoch": 0.02918062228398015,
"grad_norm": 0.03139118850231171,
"learning_rate": 1.7698558740156135e-05,
"loss": 0.0281,
"step": 1113
},
{
"epoch": 0.029364148210294493,
"grad_norm": 8.748254776000977,
"learning_rate": 1.7103063703014372e-05,
"loss": 0.0191,
"step": 1120
},
{
"epoch": 0.029547674136608835,
"grad_norm": 2.5039148330688477,
"learning_rate": 1.6515685492656467e-05,
"loss": 0.0017,
"step": 1127
},
{
"epoch": 0.029731200062923176,
"grad_norm": 10.268917083740234,
"learning_rate": 1.59365690358667e-05,
"loss": 0.0144,
"step": 1134
},
{
"epoch": 0.029914725989237514,
"grad_norm": 3.4766223430633545,
"learning_rate": 1.5365857220965275e-05,
"loss": 0.0035,
"step": 1141
},
{
"epoch": 0.030098251915551856,
"grad_norm": 0.5243438482284546,
"learning_rate": 1.4803690862552755e-05,
"loss": 0.0042,
"step": 1148
},
{
"epoch": 0.030281777841866198,
"grad_norm": 0.26499995589256287,
"learning_rate": 1.4250208666766235e-05,
"loss": 0.0028,
"step": 1155
},
{
"epoch": 0.030465303768180536,
"grad_norm": 4.1601338386535645,
"learning_rate": 1.3705547197055584e-05,
"loss": 0.0495,
"step": 1162
},
{
"epoch": 0.030648829694494877,
"grad_norm": 0.10166924446821213,
"learning_rate": 1.3169840840488501e-05,
"loss": 0.0124,
"step": 1169
},
{
"epoch": 0.03083235562080922,
"grad_norm": 0.010529532097280025,
"learning_rate": 1.2643221774592518e-05,
"loss": 0.023,
"step": 1176
},
{
"epoch": 0.03101588154712356,
"grad_norm": 8.22623062133789,
"learning_rate": 1.2125819934742188e-05,
"loss": 0.0436,
"step": 1183
},
{
"epoch": 0.0311994074734379,
"grad_norm": 1.1787302494049072,
"learning_rate": 1.1617762982099446e-05,
"loss": 0.0118,
"step": 1190
},
{
"epoch": 0.03138293339975224,
"grad_norm": 0.2951812744140625,
"learning_rate": 1.1119176272115128e-05,
"loss": 0.0233,
"step": 1197
},
{
"epoch": 0.03156645932606658,
"grad_norm": 0.042958952486515045,
"learning_rate": 1.0630182823599399e-05,
"loss": 0.0024,
"step": 1204
},
{
"epoch": 0.03174998525238092,
"grad_norm": 0.051287971436977386,
"learning_rate": 1.0150903288368741e-05,
"loss": 0.0034,
"step": 1211
},
{
"epoch": 0.031933511178695265,
"grad_norm": 0.15109604597091675,
"learning_rate": 9.681455921476839e-06,
"loss": 0.0016,
"step": 1218
},
{
"epoch": 0.0321170371050096,
"grad_norm": 4.658985137939453,
"learning_rate": 9.221956552036992e-06,
"loss": 0.0049,
"step": 1225
},
{
"epoch": 0.03230056303132394,
"grad_norm": 5.163771629333496,
"learning_rate": 8.772518554642973e-06,
"loss": 0.0078,
"step": 1232
},
{
"epoch": 0.03248408895763828,
"grad_norm": 0.658769428730011,
"learning_rate": 8.333252821395526e-06,
"loss": 0.001,
"step": 1239
},
{
"epoch": 0.032667614883952624,
"grad_norm": 0.03406643494963646,
"learning_rate": 7.904267734541498e-06,
"loss": 0.0025,
"step": 1246
},
{
"epoch": 0.032851140810266966,
"grad_norm": 0.048391830176115036,
"learning_rate": 7.485669139732004e-06,
"loss": 0.0075,
"step": 1253
},
{
"epoch": 0.03303466673658131,
"grad_norm": 0.02164422534406185,
"learning_rate": 7.077560319906695e-06,
"loss": 0.0101,
"step": 1260
},
{
"epoch": 0.03321819266289565,
"grad_norm": 0.0323006734251976,
"learning_rate": 6.680041969810203e-06,
"loss": 0.0212,
"step": 1267
},
{
"epoch": 0.03340171858920998,
"grad_norm": 0.01731325499713421,
"learning_rate": 6.293212171147206e-06,
"loss": 0.0005,
"step": 1274
},
{
"epoch": 0.033585244515524325,
"grad_norm": 0.0469672717154026,
"learning_rate": 5.917166368382277e-06,
"loss": 0.0113,
"step": 1281
},
{
"epoch": 0.033768770441838666,
"grad_norm": 0.5670210719108582,
"learning_rate": 5.5519973451903405e-06,
"loss": 0.0065,
"step": 1288
},
{
"epoch": 0.03395229636815301,
"grad_norm": 0.6679732799530029,
"learning_rate": 5.197795201563743e-06,
"loss": 0.0009,
"step": 1295
},
{
"epoch": 0.03413582229446735,
"grad_norm": 0.7266259789466858,
"learning_rate": 4.8546473315813856e-06,
"loss": 0.0004,
"step": 1302
},
{
"epoch": 0.03431934822078169,
"grad_norm": 0.007858018390834332,
"learning_rate": 4.522638401845547e-06,
"loss": 0.0277,
"step": 1309
},
{
"epoch": 0.03450287414709603,
"grad_norm": 4.2354326248168945,
"learning_rate": 4.2018503305916775e-06,
"loss": 0.0052,
"step": 1316
},
{
"epoch": 0.03468640007341037,
"grad_norm": 36.58195495605469,
"learning_rate": 3.892362267476313e-06,
"loss": 0.0192,
"step": 1323
},
{
"epoch": 0.03486992599972471,
"grad_norm": 0.05499963089823723,
"learning_rate": 3.5942505740480582e-06,
"loss": 0.0264,
"step": 1330
},
{
"epoch": 0.03505345192603905,
"grad_norm": 0.2299686074256897,
"learning_rate": 3.3075888049065196e-06,
"loss": 0.0099,
"step": 1337
},
{
"epoch": 0.03523697785235339,
"grad_norm": 0.014163389801979065,
"learning_rate": 3.03244768955383e-06,
"loss": 0.0014,
"step": 1344
},
{
"epoch": 0.035420503778667733,
"grad_norm": 0.02343047223985195,
"learning_rate": 2.7688951149431595e-06,
"loss": 0.0061,
"step": 1351
},
{
"epoch": 0.035604029704982075,
"grad_norm": 0.3476681709289551,
"learning_rate": 2.5169961087286974e-06,
"loss": 0.002,
"step": 1358
},
{
"epoch": 0.03578755563129642,
"grad_norm": 0.07447517663240433,
"learning_rate": 2.276812823220964e-06,
"loss": 0.0029,
"step": 1365
},
{
"epoch": 0.03597108155761075,
"grad_norm": 0.008758554235100746,
"learning_rate": 2.048404520051722e-06,
"loss": 0.0067,
"step": 1372
},
{
"epoch": 0.03615460748392509,
"grad_norm": 0.46787765622138977,
"learning_rate": 1.8318275555520237e-06,
"loss": 0.0005,
"step": 1379
},
{
"epoch": 0.036338133410239434,
"grad_norm": 0.024509649723768234,
"learning_rate": 1.6271353668471655e-06,
"loss": 0.0015,
"step": 1386
},
{
"epoch": 0.036521659336553776,
"grad_norm": 0.01940271630883217,
"learning_rate": 1.4343784586718311e-06,
"loss": 0.0006,
"step": 1393
},
{
"epoch": 0.03670518526286812,
"grad_norm": 0.1267419457435608,
"learning_rate": 1.2536043909088191e-06,
"loss": 0.0031,
"step": 1400
},
{
"epoch": 0.03688871118918246,
"grad_norm": 1.5990060567855835,
"learning_rate": 1.0848577668543802e-06,
"loss": 0.0277,
"step": 1407
},
{
"epoch": 0.0370722371154968,
"grad_norm": 7.815530300140381,
"learning_rate": 9.281802222129765e-07,
"loss": 0.0045,
"step": 1414
},
{
"epoch": 0.03725576304181114,
"grad_norm": 0.020791860297322273,
"learning_rate": 7.836104148243484e-07,
"loss": 0.0009,
"step": 1421
},
{
"epoch": 0.03743928896812548,
"grad_norm": 0.010349948890507221,
"learning_rate": 6.511840151252169e-07,
"loss": 0.002,
"step": 1428
},
{
"epoch": 0.03762281489443982,
"grad_norm": 0.030976630747318268,
"learning_rate": 5.309336973481683e-07,
"loss": 0.0002,
"step": 1435
},
{
"epoch": 0.03780634082075416,
"grad_norm": 0.06936094164848328,
"learning_rate": 4.228891314597694e-07,
"loss": 0.0003,
"step": 1442
},
{
"epoch": 0.0379898667470685,
"grad_norm": 0.06425853073596954,
"learning_rate": 3.2707697583995167e-07,
"loss": 0.0004,
"step": 1449
},
{
"epoch": 0.03817339267338284,
"grad_norm": 0.005400571506470442,
"learning_rate": 2.4352087070443895e-07,
"loss": 0.0066,
"step": 1456
},
{
"epoch": 0.038356918599697185,
"grad_norm": 0.039011601358652115,
"learning_rate": 1.7224143227190236e-07,
"loss": 0.0081,
"step": 1463
},
{
"epoch": 0.038540444526011526,
"grad_norm": 0.20835164189338684,
"learning_rate": 1.132562476771959e-07,
"loss": 0.0003,
"step": 1470
},
{
"epoch": 0.03872397045232586,
"grad_norm": 1.4877870082855225,
"learning_rate": 6.657987063200533e-08,
"loss": 0.1775,
"step": 1477
},
{
"epoch": 0.0389074963786402,
"grad_norm": 0.1349838227033615,
"learning_rate": 3.2223817833931805e-08,
"loss": 0.0009,
"step": 1484
},
{
"epoch": 0.039091022304954544,
"grad_norm": 4.346598148345947,
"learning_rate": 1.019656612492592e-08,
"loss": 0.0038,
"step": 1491
},
{
"epoch": 0.039274548231268885,
"grad_norm": 0.2060699611902237,
"learning_rate": 5.035503997385949e-10,
"loss": 0.0016,
"step": 1498
}
],
"logging_steps": 7,
"max_steps": 1500,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.546484091060224e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}