zephyr-7b-sft-qlora / trainer_state.json
TIM2177's picture
Model save
6284e11 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995385325334564,
"eval_steps": 500,
"global_step": 1083,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009229349330872173,
"grad_norm": 0.3153176009654999,
"learning_rate": 1.8348623853211011e-06,
"loss": 1.16,
"step": 1
},
{
"epoch": 0.0046146746654360865,
"grad_norm": 0.30069586634635925,
"learning_rate": 9.174311926605506e-06,
"loss": 1.157,
"step": 5
},
{
"epoch": 0.009229349330872173,
"grad_norm": 0.24939967691898346,
"learning_rate": 1.834862385321101e-05,
"loss": 1.1423,
"step": 10
},
{
"epoch": 0.01384402399630826,
"grad_norm": 0.1986856758594513,
"learning_rate": 2.7522935779816515e-05,
"loss": 1.1082,
"step": 15
},
{
"epoch": 0.018458698661744346,
"grad_norm": 0.16805031895637512,
"learning_rate": 3.669724770642202e-05,
"loss": 1.0915,
"step": 20
},
{
"epoch": 0.023073373327180433,
"grad_norm": 0.16181856393814087,
"learning_rate": 4.587155963302753e-05,
"loss": 1.0639,
"step": 25
},
{
"epoch": 0.02768804799261652,
"grad_norm": 0.13395144045352936,
"learning_rate": 5.504587155963303e-05,
"loss": 1.021,
"step": 30
},
{
"epoch": 0.032302722658052604,
"grad_norm": 0.09093035757541656,
"learning_rate": 6.422018348623854e-05,
"loss": 1.0403,
"step": 35
},
{
"epoch": 0.03691739732348869,
"grad_norm": 0.08925778418779373,
"learning_rate": 7.339449541284404e-05,
"loss": 1.0398,
"step": 40
},
{
"epoch": 0.04153207198892478,
"grad_norm": 0.10212849825620651,
"learning_rate": 8.256880733944955e-05,
"loss": 1.0122,
"step": 45
},
{
"epoch": 0.046146746654360866,
"grad_norm": 0.0883544310927391,
"learning_rate": 9.174311926605506e-05,
"loss": 1.003,
"step": 50
},
{
"epoch": 0.050761421319796954,
"grad_norm": 0.09017772227525711,
"learning_rate": 0.00010091743119266055,
"loss": 0.9996,
"step": 55
},
{
"epoch": 0.05537609598523304,
"grad_norm": 0.08947195112705231,
"learning_rate": 0.00011009174311926606,
"loss": 1.0092,
"step": 60
},
{
"epoch": 0.05999077065066913,
"grad_norm": 0.09361358731985092,
"learning_rate": 0.00011926605504587157,
"loss": 1.0053,
"step": 65
},
{
"epoch": 0.06460544531610521,
"grad_norm": 0.08942877501249313,
"learning_rate": 0.00012844036697247707,
"loss": 0.979,
"step": 70
},
{
"epoch": 0.0692201199815413,
"grad_norm": 0.08816999942064285,
"learning_rate": 0.00013761467889908258,
"loss": 0.9817,
"step": 75
},
{
"epoch": 0.07383479464697738,
"grad_norm": 0.10292733460664749,
"learning_rate": 0.0001467889908256881,
"loss": 0.972,
"step": 80
},
{
"epoch": 0.07844946931241348,
"grad_norm": 0.08368176966905594,
"learning_rate": 0.0001559633027522936,
"loss": 0.9869,
"step": 85
},
{
"epoch": 0.08306414397784956,
"grad_norm": 0.08516489714384079,
"learning_rate": 0.0001651376146788991,
"loss": 0.9841,
"step": 90
},
{
"epoch": 0.08767881864328565,
"grad_norm": 0.08280258625745773,
"learning_rate": 0.00017431192660550458,
"loss": 0.9859,
"step": 95
},
{
"epoch": 0.09229349330872173,
"grad_norm": 0.08506108820438385,
"learning_rate": 0.00018348623853211012,
"loss": 0.9913,
"step": 100
},
{
"epoch": 0.09690816797415783,
"grad_norm": 0.0873042568564415,
"learning_rate": 0.0001926605504587156,
"loss": 1.014,
"step": 105
},
{
"epoch": 0.10152284263959391,
"grad_norm": 0.09446421265602112,
"learning_rate": 0.00019999947982262415,
"loss": 0.9959,
"step": 110
},
{
"epoch": 0.10613751730503,
"grad_norm": 0.0792991891503334,
"learning_rate": 0.00019998127418269004,
"loss": 0.9976,
"step": 115
},
{
"epoch": 0.11075219197046608,
"grad_norm": 0.08645275980234146,
"learning_rate": 0.00019993706508539968,
"loss": 0.9774,
"step": 120
},
{
"epoch": 0.11536686663590216,
"grad_norm": 0.07994881272315979,
"learning_rate": 0.0001998668640288,
"loss": 0.9962,
"step": 125
},
{
"epoch": 0.11998154130133826,
"grad_norm": 0.07281699776649475,
"learning_rate": 0.0001997706892710117,
"loss": 0.9682,
"step": 130
},
{
"epoch": 0.12459621596677434,
"grad_norm": 0.0806204155087471,
"learning_rate": 0.00019964856582548092,
"loss": 0.9973,
"step": 135
},
{
"epoch": 0.12921089063221042,
"grad_norm": 0.08152524381875992,
"learning_rate": 0.00019950052545447352,
"loss": 0.9926,
"step": 140
},
{
"epoch": 0.13382556529764653,
"grad_norm": 0.07916297018527985,
"learning_rate": 0.0001993266066608142,
"loss": 0.9681,
"step": 145
},
{
"epoch": 0.1384402399630826,
"grad_norm": 0.07692616432905197,
"learning_rate": 0.00019912685467787257,
"loss": 0.9552,
"step": 150
},
{
"epoch": 0.1430549146285187,
"grad_norm": 0.07867322117090225,
"learning_rate": 0.00019890132145779885,
"loss": 1.0056,
"step": 155
},
{
"epoch": 0.14766958929395477,
"grad_norm": 0.07720212638378143,
"learning_rate": 0.0001986500656580118,
"loss": 0.9603,
"step": 160
},
{
"epoch": 0.15228426395939088,
"grad_norm": 0.0785161629319191,
"learning_rate": 0.00019837315262594306,
"loss": 0.9693,
"step": 165
},
{
"epoch": 0.15689893862482696,
"grad_norm": 0.0887812152504921,
"learning_rate": 0.00019807065438204118,
"loss": 0.9713,
"step": 170
},
{
"epoch": 0.16151361329026304,
"grad_norm": 0.07461399585008621,
"learning_rate": 0.00019774264960104057,
"loss": 0.9561,
"step": 175
},
{
"epoch": 0.16612828795569912,
"grad_norm": 0.07251787185668945,
"learning_rate": 0.00019738922359149926,
"loss": 0.9701,
"step": 180
},
{
"epoch": 0.1707429626211352,
"grad_norm": 0.07772507518529892,
"learning_rate": 0.00019701046827361177,
"loss": 0.9775,
"step": 185
},
{
"epoch": 0.1753576372865713,
"grad_norm": 0.07318438589572906,
"learning_rate": 0.00019660648215530206,
"loss": 0.9585,
"step": 190
},
{
"epoch": 0.17997231195200739,
"grad_norm": 0.07861252874135971,
"learning_rate": 0.00019617737030660338,
"loss": 0.9642,
"step": 195
},
{
"epoch": 0.18458698661744347,
"grad_norm": 0.07000173628330231,
"learning_rate": 0.0001957232443323312,
"loss": 0.9679,
"step": 200
},
{
"epoch": 0.18920166128287955,
"grad_norm": 0.07377786934375763,
"learning_rate": 0.00019524422234305677,
"loss": 0.9579,
"step": 205
},
{
"epoch": 0.19381633594831565,
"grad_norm": 0.07430287450551987,
"learning_rate": 0.0001947404289243885,
"loss": 0.9385,
"step": 210
},
{
"epoch": 0.19843101061375173,
"grad_norm": 0.07597630470991135,
"learning_rate": 0.0001942119951045692,
"loss": 0.9522,
"step": 215
},
{
"epoch": 0.20304568527918782,
"grad_norm": 0.07650153338909149,
"learning_rate": 0.00019365905832039815,
"loss": 0.9766,
"step": 220
},
{
"epoch": 0.2076603599446239,
"grad_norm": 0.07343017309904099,
"learning_rate": 0.00019308176238148564,
"loss": 0.979,
"step": 225
},
{
"epoch": 0.21227503461006,
"grad_norm": 0.0811387151479721,
"learning_rate": 0.0001924802574328509,
"loss": 0.9575,
"step": 230
},
{
"epoch": 0.21688970927549608,
"grad_norm": 0.0797664150595665,
"learning_rate": 0.00019185469991587166,
"loss": 0.9746,
"step": 235
},
{
"epoch": 0.22150438394093216,
"grad_norm": 0.07318708300590515,
"learning_rate": 0.00019120525252759647,
"loss": 0.9659,
"step": 240
},
{
"epoch": 0.22611905860636825,
"grad_norm": 0.07425623387098312,
"learning_rate": 0.00019053208417842978,
"loss": 0.9644,
"step": 245
},
{
"epoch": 0.23073373327180433,
"grad_norm": 0.07410068064928055,
"learning_rate": 0.0001898353699482014,
"loss": 0.9652,
"step": 250
},
{
"epoch": 0.23534840793724043,
"grad_norm": 0.07423646003007889,
"learning_rate": 0.0001891152910406309,
"loss": 0.9691,
"step": 255
},
{
"epoch": 0.23996308260267651,
"grad_norm": 0.06945409625768661,
"learning_rate": 0.00018837203473619978,
"loss": 0.9739,
"step": 260
},
{
"epoch": 0.2445777572681126,
"grad_norm": 0.0697169378399849,
"learning_rate": 0.0001876057943434428,
"loss": 0.9772,
"step": 265
},
{
"epoch": 0.24919243193354867,
"grad_norm": 0.07200105488300323,
"learning_rate": 0.00018681676914867175,
"loss": 0.9742,
"step": 270
},
{
"epoch": 0.25380710659898476,
"grad_norm": 0.0744839757680893,
"learning_rate": 0.0001860051643641443,
"loss": 0.9613,
"step": 275
},
{
"epoch": 0.25842178126442084,
"grad_norm": 0.08401075750589371,
"learning_rate": 0.0001851711910746919,
"loss": 0.9721,
"step": 280
},
{
"epoch": 0.26303645592985697,
"grad_norm": 0.06904800236225128,
"learning_rate": 0.00018431506618282,
"loss": 0.9572,
"step": 285
},
{
"epoch": 0.26765113059529305,
"grad_norm": 0.07013043761253357,
"learning_rate": 0.0001834370123522954,
"loss": 0.9706,
"step": 290
},
{
"epoch": 0.27226580526072913,
"grad_norm": 0.07055062055587769,
"learning_rate": 0.00018253725795023504,
"loss": 0.9612,
"step": 295
},
{
"epoch": 0.2768804799261652,
"grad_norm": 0.07045809924602509,
"learning_rate": 0.0001816160369877117,
"loss": 0.9707,
"step": 300
},
{
"epoch": 0.2814951545916013,
"grad_norm": 0.06949017941951752,
"learning_rate": 0.00018067358905889146,
"loss": 0.9405,
"step": 305
},
{
"epoch": 0.2861098292570374,
"grad_norm": 0.06768112629652023,
"learning_rate": 0.00017971015927871942,
"loss": 0.97,
"step": 310
},
{
"epoch": 0.29072450392247345,
"grad_norm": 0.06964079290628433,
"learning_rate": 0.0001787259982191692,
"loss": 0.9581,
"step": 315
},
{
"epoch": 0.29533917858790953,
"grad_norm": 0.07223650813102722,
"learning_rate": 0.00017772136184407365,
"loss": 0.9611,
"step": 320
},
{
"epoch": 0.2999538532533456,
"grad_norm": 0.06840556859970093,
"learning_rate": 0.00017669651144255265,
"loss": 0.9631,
"step": 325
},
{
"epoch": 0.30456852791878175,
"grad_norm": 0.07011925429105759,
"learning_rate": 0.00017565171356105627,
"loss": 0.9817,
"step": 330
},
{
"epoch": 0.30918320258421783,
"grad_norm": 0.06768331676721573,
"learning_rate": 0.00017458723993404065,
"loss": 0.9796,
"step": 335
},
{
"epoch": 0.3137978772496539,
"grad_norm": 0.06805837899446487,
"learning_rate": 0.00017350336741329413,
"loss": 0.9683,
"step": 340
},
{
"epoch": 0.31841255191509,
"grad_norm": 0.07038763910531998,
"learning_rate": 0.00017240037789593307,
"loss": 0.9762,
"step": 345
},
{
"epoch": 0.3230272265805261,
"grad_norm": 0.0689588338136673,
"learning_rate": 0.0001712785582510848,
"loss": 0.9537,
"step": 350
},
{
"epoch": 0.32764190124596215,
"grad_norm": 0.06688909232616425,
"learning_rate": 0.00017013820024527798,
"loss": 0.9643,
"step": 355
},
{
"epoch": 0.33225657591139823,
"grad_norm": 0.073676697909832,
"learning_rate": 0.00016897960046655886,
"loss": 0.9611,
"step": 360
},
{
"epoch": 0.3368712505768343,
"grad_norm": 0.07165560126304626,
"learning_rate": 0.00016780306024735382,
"loss": 0.957,
"step": 365
},
{
"epoch": 0.3414859252422704,
"grad_norm": 0.07094935327768326,
"learning_rate": 0.00016660888558609773,
"loss": 0.9854,
"step": 370
},
{
"epoch": 0.34610059990770653,
"grad_norm": 0.06721330434083939,
"learning_rate": 0.00016539738706764894,
"loss": 0.9648,
"step": 375
},
{
"epoch": 0.3507152745731426,
"grad_norm": 0.0699782595038414,
"learning_rate": 0.00016416887978251135,
"loss": 0.9587,
"step": 380
},
{
"epoch": 0.3553299492385787,
"grad_norm": 0.06915416568517685,
"learning_rate": 0.00016292368324488462,
"loss": 0.9395,
"step": 385
},
{
"epoch": 0.35994462390401477,
"grad_norm": 0.06952769309282303,
"learning_rate": 0.00016166212130956382,
"loss": 0.9476,
"step": 390
},
{
"epoch": 0.36455929856945085,
"grad_norm": 0.06895878911018372,
"learning_rate": 0.00016038452208771037,
"loss": 0.9725,
"step": 395
},
{
"epoch": 0.36917397323488693,
"grad_norm": 0.06924083083868027,
"learning_rate": 0.00015909121786151568,
"loss": 0.9541,
"step": 400
},
{
"epoch": 0.373788647900323,
"grad_norm": 0.06652365624904633,
"learning_rate": 0.00015778254499778006,
"loss": 0.9592,
"step": 405
},
{
"epoch": 0.3784033225657591,
"grad_norm": 0.06965645402669907,
"learning_rate": 0.00015645884386042958,
"loss": 0.9484,
"step": 410
},
{
"epoch": 0.3830179972311952,
"grad_norm": 0.0711289793252945,
"learning_rate": 0.00015512045872199276,
"loss": 0.9395,
"step": 415
},
{
"epoch": 0.3876326718966313,
"grad_norm": 0.0690288171172142,
"learning_rate": 0.00015376773767406142,
"loss": 0.983,
"step": 420
},
{
"epoch": 0.3922473465620674,
"grad_norm": 0.0725955069065094,
"learning_rate": 0.00015240103253675756,
"loss": 0.956,
"step": 425
},
{
"epoch": 0.39686202122750347,
"grad_norm": 0.07096763700246811,
"learning_rate": 0.00015102069876723098,
"loss": 0.9638,
"step": 430
},
{
"epoch": 0.40147669589293955,
"grad_norm": 0.06781638413667679,
"learning_rate": 0.00014962709536721087,
"loss": 0.9413,
"step": 435
},
{
"epoch": 0.40609137055837563,
"grad_norm": 0.06911034137010574,
"learning_rate": 0.00014822058478963532,
"loss": 0.9576,
"step": 440
},
{
"epoch": 0.4107060452238117,
"grad_norm": 0.07025574147701263,
"learning_rate": 0.00014680153284438345,
"loss": 0.961,
"step": 445
},
{
"epoch": 0.4153207198892478,
"grad_norm": 0.07122451812028885,
"learning_rate": 0.00014537030860313442,
"loss": 0.9678,
"step": 450
},
{
"epoch": 0.41993539455468387,
"grad_norm": 0.06810062378644943,
"learning_rate": 0.000143927284303378,
"loss": 0.9497,
"step": 455
},
{
"epoch": 0.42455006922012,
"grad_norm": 0.06889387220144272,
"learning_rate": 0.00014247283525160178,
"loss": 0.9461,
"step": 460
},
{
"epoch": 0.4291647438855561,
"grad_norm": 0.06921575218439102,
"learning_rate": 0.00014100733972568038,
"loss": 0.9625,
"step": 465
},
{
"epoch": 0.43377941855099217,
"grad_norm": 0.06750404834747314,
"learning_rate": 0.00013953117887649153,
"loss": 0.9678,
"step": 470
},
{
"epoch": 0.43839409321642825,
"grad_norm": 0.07006347179412842,
"learning_rate": 0.00013804473662878519,
"loss": 0.9646,
"step": 475
},
{
"epoch": 0.44300876788186433,
"grad_norm": 0.06842590123414993,
"learning_rate": 0.00013654839958133117,
"loss": 0.956,
"step": 480
},
{
"epoch": 0.4476234425473004,
"grad_norm": 0.07282765209674835,
"learning_rate": 0.0001350425569063712,
"loss": 0.9787,
"step": 485
},
{
"epoch": 0.4522381172127365,
"grad_norm": 0.06680766493082047,
"learning_rate": 0.00013352760024840175,
"loss": 0.9544,
"step": 490
},
{
"epoch": 0.45685279187817257,
"grad_norm": 0.0665644034743309,
"learning_rate": 0.00013200392362231383,
"loss": 0.9397,
"step": 495
},
{
"epoch": 0.46146746654360865,
"grad_norm": 0.06751461327075958,
"learning_rate": 0.00013047192331091636,
"loss": 0.974,
"step": 500
},
{
"epoch": 0.4660821412090448,
"grad_norm": 0.06943210959434509,
"learning_rate": 0.00012893199776186956,
"loss": 0.9487,
"step": 505
},
{
"epoch": 0.47069681587448087,
"grad_norm": 0.06830769777297974,
"learning_rate": 0.0001273845474840555,
"loss": 0.9614,
"step": 510
},
{
"epoch": 0.47531149053991695,
"grad_norm": 0.06783465296030045,
"learning_rate": 0.0001258299749434123,
"loss": 0.9716,
"step": 515
},
{
"epoch": 0.47992616520535303,
"grad_norm": 0.07035640627145767,
"learning_rate": 0.00012426868445825954,
"loss": 0.9639,
"step": 520
},
{
"epoch": 0.4845408398707891,
"grad_norm": 0.06989864259958267,
"learning_rate": 0.00012270108209414186,
"loss": 0.9483,
"step": 525
},
{
"epoch": 0.4891555145362252,
"grad_norm": 0.06680671125650406,
"learning_rate": 0.00012112757555821797,
"loss": 0.9596,
"step": 530
},
{
"epoch": 0.49377018920166127,
"grad_norm": 0.07067760825157166,
"learning_rate": 0.00011954857409322302,
"loss": 0.9449,
"step": 535
},
{
"epoch": 0.49838486386709735,
"grad_norm": 0.06822020560503006,
"learning_rate": 0.00011796448837103129,
"loss": 0.9558,
"step": 540
},
{
"epoch": 0.5029995385325334,
"grad_norm": 0.06769659370183945,
"learning_rate": 0.00011637573038584729,
"loss": 0.9458,
"step": 545
},
{
"epoch": 0.5076142131979695,
"grad_norm": 0.06936723738908768,
"learning_rate": 0.00011478271334705302,
"loss": 0.9504,
"step": 550
},
{
"epoch": 0.5122288878634056,
"grad_norm": 0.07083772867918015,
"learning_rate": 0.00011318585157173913,
"loss": 0.9628,
"step": 555
},
{
"epoch": 0.5168435625288417,
"grad_norm": 0.07207299768924713,
"learning_rate": 0.0001115855603769479,
"loss": 0.9492,
"step": 560
},
{
"epoch": 0.5214582371942778,
"grad_norm": 0.06986602395772934,
"learning_rate": 0.00010998225597165628,
"loss": 0.9507,
"step": 565
},
{
"epoch": 0.5260729118597139,
"grad_norm": 0.07052252441644669,
"learning_rate": 0.00010837635534852686,
"loss": 0.9497,
"step": 570
},
{
"epoch": 0.53068758652515,
"grad_norm": 0.07066074013710022,
"learning_rate": 0.00010676827617545511,
"loss": 0.9506,
"step": 575
},
{
"epoch": 0.5353022611905861,
"grad_norm": 0.07176294177770615,
"learning_rate": 0.00010515843668694085,
"loss": 0.9501,
"step": 580
},
{
"epoch": 0.5399169358560222,
"grad_norm": 0.07183931022882462,
"learning_rate": 0.00010354725557531257,
"loss": 0.9514,
"step": 585
},
{
"epoch": 0.5445316105214583,
"grad_norm": 0.06932860612869263,
"learning_rate": 0.00010193515188183245,
"loss": 0.9452,
"step": 590
},
{
"epoch": 0.5491462851868943,
"grad_norm": 0.06957747042179108,
"learning_rate": 0.0001003225448877108,
"loss": 0.9649,
"step": 595
},
{
"epoch": 0.5537609598523304,
"grad_norm": 0.06706763803958893,
"learning_rate": 9.870985400505804e-05,
"loss": 0.9468,
"step": 600
},
{
"epoch": 0.5583756345177665,
"grad_norm": 0.06936302781105042,
"learning_rate": 9.709749866780248e-05,
"loss": 0.9569,
"step": 605
},
{
"epoch": 0.5629903091832026,
"grad_norm": 0.06822703033685684,
"learning_rate": 9.548589822260281e-05,
"loss": 0.9672,
"step": 610
},
{
"epoch": 0.5676049838486387,
"grad_norm": 0.06881389766931534,
"learning_rate": 9.387547181978291e-05,
"loss": 0.9493,
"step": 615
},
{
"epoch": 0.5722196585140747,
"grad_norm": 0.06797238439321518,
"learning_rate": 9.226663830431777e-05,
"loss": 0.9658,
"step": 620
},
{
"epoch": 0.5768343331795108,
"grad_norm": 0.06790480017662048,
"learning_rate": 9.065981610689914e-05,
"loss": 0.9506,
"step": 625
},
{
"epoch": 0.5814490078449469,
"grad_norm": 0.06801264733076096,
"learning_rate": 8.905542313510846e-05,
"loss": 0.9552,
"step": 630
},
{
"epoch": 0.586063682510383,
"grad_norm": 0.07247908413410187,
"learning_rate": 8.745387666472637e-05,
"loss": 0.9632,
"step": 635
},
{
"epoch": 0.5906783571758191,
"grad_norm": 0.06877297908067703,
"learning_rate": 8.58555932312059e-05,
"loss": 0.9652,
"step": 640
},
{
"epoch": 0.5952930318412551,
"grad_norm": 0.06959784030914307,
"learning_rate": 8.426098852133892e-05,
"loss": 0.9436,
"step": 645
},
{
"epoch": 0.5999077065066912,
"grad_norm": 0.06700550764799118,
"learning_rate": 8.267047726514278e-05,
"loss": 0.9644,
"step": 650
},
{
"epoch": 0.6045223811721273,
"grad_norm": 0.06979133933782578,
"learning_rate": 8.108447312799587e-05,
"loss": 0.9603,
"step": 655
},
{
"epoch": 0.6091370558375635,
"grad_norm": 0.06916554272174835,
"learning_rate": 7.950338860305048e-05,
"loss": 0.9479,
"step": 660
},
{
"epoch": 0.6137517305029996,
"grad_norm": 0.0692073404788971,
"learning_rate": 7.792763490394984e-05,
"loss": 0.9583,
"step": 665
},
{
"epoch": 0.6183664051684357,
"grad_norm": 0.06973334401845932,
"learning_rate": 7.635762185787868e-05,
"loss": 0.9598,
"step": 670
},
{
"epoch": 0.6229810798338717,
"grad_norm": 0.069077268242836,
"learning_rate": 7.479375779897379e-05,
"loss": 0.9508,
"step": 675
},
{
"epoch": 0.6275957544993078,
"grad_norm": 0.06755080074071884,
"learning_rate": 7.323644946212331e-05,
"loss": 0.9538,
"step": 680
},
{
"epoch": 0.6322104291647439,
"grad_norm": 0.06810387223958969,
"learning_rate": 7.168610187718164e-05,
"loss": 0.9563,
"step": 685
},
{
"epoch": 0.63682510383018,
"grad_norm": 0.06826294958591461,
"learning_rate": 7.014311826362804e-05,
"loss": 0.951,
"step": 690
},
{
"epoch": 0.6414397784956161,
"grad_norm": 0.07014641910791397,
"learning_rate": 6.8607899925696e-05,
"loss": 0.9669,
"step": 695
},
{
"epoch": 0.6460544531610521,
"grad_norm": 0.0688079446554184,
"learning_rate": 6.708084614800064e-05,
"loss": 0.9361,
"step": 700
},
{
"epoch": 0.6506691278264882,
"grad_norm": 0.07005127519369125,
"learning_rate": 6.556235409169154e-05,
"loss": 0.9337,
"step": 705
},
{
"epoch": 0.6552838024919243,
"grad_norm": 0.06724046170711517,
"learning_rate": 6.405281869115768e-05,
"loss": 0.9404,
"step": 710
},
{
"epoch": 0.6598984771573604,
"grad_norm": 0.07361240684986115,
"learning_rate": 6.255263255131172e-05,
"loss": 0.9527,
"step": 715
},
{
"epoch": 0.6645131518227965,
"grad_norm": 0.06800534576177597,
"learning_rate": 6.106218584547991e-05,
"loss": 0.9531,
"step": 720
},
{
"epoch": 0.6691278264882325,
"grad_norm": 0.0687314122915268,
"learning_rate": 5.9581866213924656e-05,
"loss": 0.9421,
"step": 725
},
{
"epoch": 0.6737425011536686,
"grad_norm": 0.06845971941947937,
"learning_rate": 5.8112058663025706e-05,
"loss": 0.9455,
"step": 730
},
{
"epoch": 0.6783571758191047,
"grad_norm": 0.06654047966003418,
"learning_rate": 5.665314546514633e-05,
"loss": 0.9514,
"step": 735
},
{
"epoch": 0.6829718504845408,
"grad_norm": 0.069442018866539,
"learning_rate": 5.520550605921091e-05,
"loss": 0.9537,
"step": 740
},
{
"epoch": 0.687586525149977,
"grad_norm": 0.0674603134393692,
"learning_rate": 5.376951695201894e-05,
"loss": 0.9536,
"step": 745
},
{
"epoch": 0.6922011998154131,
"grad_norm": 0.06752391904592514,
"learning_rate": 5.234555162032221e-05,
"loss": 0.9477,
"step": 750
},
{
"epoch": 0.6968158744808491,
"grad_norm": 0.07034114748239517,
"learning_rate": 5.093398041368942e-05,
"loss": 0.9637,
"step": 755
},
{
"epoch": 0.7014305491462852,
"grad_norm": 0.06883241981267929,
"learning_rate": 4.953517045818473e-05,
"loss": 0.9556,
"step": 760
},
{
"epoch": 0.7060452238117213,
"grad_norm": 0.06869496405124664,
"learning_rate": 4.81494855608843e-05,
"loss": 0.9581,
"step": 765
},
{
"epoch": 0.7106598984771574,
"grad_norm": 0.07270540297031403,
"learning_rate": 4.677728611525605e-05,
"loss": 0.9503,
"step": 770
},
{
"epoch": 0.7152745731425935,
"grad_norm": 0.06813713908195496,
"learning_rate": 4.541892900742757e-05,
"loss": 0.9428,
"step": 775
},
{
"epoch": 0.7198892478080295,
"grad_norm": 0.06918053328990936,
"learning_rate": 4.407476752336576e-05,
"loss": 0.9433,
"step": 780
},
{
"epoch": 0.7245039224734656,
"grad_norm": 0.06645094603300095,
"learning_rate": 4.274515125699332e-05,
"loss": 0.9444,
"step": 785
},
{
"epoch": 0.7291185971389017,
"grad_norm": 0.06784369051456451,
"learning_rate": 4.1430426019264924e-05,
"loss": 0.9558,
"step": 790
},
{
"epoch": 0.7337332718043378,
"grad_norm": 0.0678802952170372,
"learning_rate": 4.0130933748227885e-05,
"loss": 0.938,
"step": 795
},
{
"epoch": 0.7383479464697739,
"grad_norm": 0.06705697625875473,
"learning_rate": 3.884701242008949e-05,
"loss": 0.9309,
"step": 800
},
{
"epoch": 0.7429626211352099,
"grad_norm": 0.06846518814563751,
"learning_rate": 3.757899596131529e-05,
"loss": 0.9602,
"step": 805
},
{
"epoch": 0.747577295800646,
"grad_norm": 0.07015591859817505,
"learning_rate": 3.632721416178029e-05,
"loss": 0.9831,
"step": 810
},
{
"epoch": 0.7521919704660821,
"grad_norm": 0.06770645827054977,
"learning_rate": 3.509199258899603e-05,
"loss": 0.9638,
"step": 815
},
{
"epoch": 0.7568066451315182,
"grad_norm": 0.07250072807073593,
"learning_rate": 3.387365250343615e-05,
"loss": 0.9607,
"step": 820
},
{
"epoch": 0.7614213197969543,
"grad_norm": 0.06818177551031113,
"learning_rate": 3.267251077498169e-05,
"loss": 0.9445,
"step": 825
},
{
"epoch": 0.7660359944623903,
"grad_norm": 0.06822264194488525,
"learning_rate": 3.148887980050872e-05,
"loss": 0.9521,
"step": 830
},
{
"epoch": 0.7706506691278265,
"grad_norm": 0.06892835348844528,
"learning_rate": 3.0323067422638908e-05,
"loss": 0.9624,
"step": 835
},
{
"epoch": 0.7752653437932626,
"grad_norm": 0.06881006807088852,
"learning_rate": 2.9175376849675073e-05,
"loss": 0.9773,
"step": 840
},
{
"epoch": 0.7798800184586987,
"grad_norm": 0.06809905171394348,
"learning_rate": 2.8046106576741605e-05,
"loss": 0.9462,
"step": 845
},
{
"epoch": 0.7844946931241348,
"grad_norm": 0.06759600341320038,
"learning_rate": 2.6935550308150847e-05,
"loss": 0.9488,
"step": 850
},
{
"epoch": 0.7891093677895709,
"grad_norm": 0.06885316222906113,
"learning_rate": 2.5843996881015676e-05,
"loss": 0.9526,
"step": 855
},
{
"epoch": 0.7937240424550069,
"grad_norm": 0.06799903512001038,
"learning_rate": 2.4771730190127618e-05,
"loss": 0.9446,
"step": 860
},
{
"epoch": 0.798338717120443,
"grad_norm": 0.06789611279964447,
"learning_rate": 2.3719029114120716e-05,
"loss": 0.949,
"step": 865
},
{
"epoch": 0.8029533917858791,
"grad_norm": 0.06740868836641312,
"learning_rate": 2.268616744293973e-05,
"loss": 0.9531,
"step": 870
},
{
"epoch": 0.8075680664513152,
"grad_norm": 0.06866241991519928,
"learning_rate": 2.1673413806632102e-05,
"loss": 0.9534,
"step": 875
},
{
"epoch": 0.8121827411167513,
"grad_norm": 0.06787573546171188,
"learning_rate": 2.068103160548156e-05,
"loss": 0.9473,
"step": 880
},
{
"epoch": 0.8167974157821873,
"grad_norm": 0.07063360512256622,
"learning_rate": 1.9709278941502363e-05,
"loss": 0.9486,
"step": 885
},
{
"epoch": 0.8214120904476234,
"grad_norm": 0.07002748548984528,
"learning_rate": 1.8758408551311047e-05,
"loss": 0.943,
"step": 890
},
{
"epoch": 0.8260267651130595,
"grad_norm": 0.07106012105941772,
"learning_rate": 1.7828667740394044e-05,
"loss": 0.9662,
"step": 895
},
{
"epoch": 0.8306414397784956,
"grad_norm": 0.06852705031633377,
"learning_rate": 1.692029831878753e-05,
"loss": 0.9505,
"step": 900
},
{
"epoch": 0.8352561144439317,
"grad_norm": 0.07118076831102371,
"learning_rate": 1.6033536538186778e-05,
"loss": 0.9553,
"step": 905
},
{
"epoch": 0.8398707891093677,
"grad_norm": 0.06763019412755966,
"learning_rate": 1.5168613030500923e-05,
"loss": 0.9445,
"step": 910
},
{
"epoch": 0.8444854637748038,
"grad_norm": 0.06864377856254578,
"learning_rate": 1.4325752747869626e-05,
"loss": 0.9553,
"step": 915
},
{
"epoch": 0.84910013844024,
"grad_norm": 0.06862477213144302,
"learning_rate": 1.3505174904156593e-05,
"loss": 0.948,
"step": 920
},
{
"epoch": 0.8537148131056761,
"grad_norm": 0.06800781190395355,
"learning_rate": 1.2707092917935914e-05,
"loss": 0.9604,
"step": 925
},
{
"epoch": 0.8583294877711122,
"grad_norm": 0.06934888660907745,
"learning_rate": 1.1931714356985257e-05,
"loss": 0.9487,
"step": 930
},
{
"epoch": 0.8629441624365483,
"grad_norm": 0.06795363873243332,
"learning_rate": 1.1179240884301156e-05,
"loss": 0.94,
"step": 935
},
{
"epoch": 0.8675588371019843,
"grad_norm": 0.06862738728523254,
"learning_rate": 1.0449868205649649e-05,
"loss": 0.9314,
"step": 940
},
{
"epoch": 0.8721735117674204,
"grad_norm": 0.06817808747291565,
"learning_rate": 9.74378601866669e-06,
"loss": 0.9552,
"step": 945
},
{
"epoch": 0.8767881864328565,
"grad_norm": 0.06724893301725388,
"learning_rate": 9.061177963520751e-06,
"loss": 0.9624,
"step": 950
},
{
"epoch": 0.8814028610982926,
"grad_norm": 0.06659146398305893,
"learning_rate": 8.402221575151238e-06,
"loss": 0.9275,
"step": 955
},
{
"epoch": 0.8860175357637287,
"grad_norm": 0.06741462647914886,
"learning_rate": 7.767088237094577e-06,
"loss": 0.9576,
"step": 960
},
{
"epoch": 0.8906322104291647,
"grad_norm": 0.06838846951723099,
"learning_rate": 7.155943136910193e-06,
"loss": 0.9583,
"step": 965
},
{
"epoch": 0.8952468850946008,
"grad_norm": 0.06980287283658981,
"learning_rate": 6.5689452232180485e-06,
"loss": 0.9594,
"step": 970
},
{
"epoch": 0.8998615597600369,
"grad_norm": 0.0681188777089119,
"learning_rate": 6.00624716435868e-06,
"loss": 0.9361,
"step": 975
},
{
"epoch": 0.904476234425473,
"grad_norm": 0.06910879909992218,
"learning_rate": 5.467995308686813e-06,
"loss": 0.9675,
"step": 980
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.07297144830226898,
"learning_rate": 4.954329646508505e-06,
"loss": 0.9525,
"step": 985
},
{
"epoch": 0.9137055837563451,
"grad_norm": 0.06912152469158173,
"learning_rate": 4.465383773672127e-06,
"loss": 0.9597,
"step": 990
},
{
"epoch": 0.9183202584217812,
"grad_norm": 0.06780077517032623,
"learning_rate": 4.001284856822174e-06,
"loss": 0.9572,
"step": 995
},
{
"epoch": 0.9229349330872173,
"grad_norm": 0.0673639103770256,
"learning_rate": 3.562153600325491e-06,
"loss": 0.9394,
"step": 1000
},
{
"epoch": 0.9275496077526535,
"grad_norm": 0.06862404197454453,
"learning_rate": 3.1481042148779672e-06,
"loss": 0.9654,
"step": 1005
},
{
"epoch": 0.9321642824180896,
"grad_norm": 0.06896745413541794,
"learning_rate": 2.7592443878003195e-06,
"loss": 0.963,
"step": 1010
},
{
"epoch": 0.9367789570835257,
"grad_norm": 0.06962341070175171,
"learning_rate": 2.395675255030383e-06,
"loss": 0.9473,
"step": 1015
},
{
"epoch": 0.9413936317489617,
"grad_norm": 0.06696717441082001,
"learning_rate": 2.0574913748193647e-06,
"loss": 0.9493,
"step": 1020
},
{
"epoch": 0.9460083064143978,
"grad_norm": 0.06831862032413483,
"learning_rate": 1.7447807031388264e-06,
"loss": 0.9504,
"step": 1025
},
{
"epoch": 0.9506229810798339,
"grad_norm": 0.07115928083658218,
"learning_rate": 1.457624570804772e-06,
"loss": 0.943,
"step": 1030
},
{
"epoch": 0.95523765574527,
"grad_norm": 0.06887007504701614,
"learning_rate": 1.196097662324902e-06,
"loss": 0.9679,
"step": 1035
},
{
"epoch": 0.9598523304107061,
"grad_norm": 0.06956353038549423,
"learning_rate": 9.602679964744288e-07,
"loss": 0.9413,
"step": 1040
},
{
"epoch": 0.9644670050761421,
"grad_norm": 0.06954739987850189,
"learning_rate": 7.501969086054717e-07,
"loss": 0.9497,
"step": 1045
},
{
"epoch": 0.9690816797415782,
"grad_norm": 0.06847439706325531,
"learning_rate": 5.659390346948179e-07,
"loss": 0.9641,
"step": 1050
},
{
"epoch": 0.9736963544070143,
"grad_norm": 0.06772764027118683,
"learning_rate": 4.075422971340115e-07,
"loss": 0.9616,
"step": 1055
},
{
"epoch": 0.9783110290724504,
"grad_norm": 0.06802436709403992,
"learning_rate": 2.7504789226548977e-07,
"loss": 0.953,
"step": 1060
},
{
"epoch": 0.9829257037378865,
"grad_norm": 0.06744276732206345,
"learning_rate": 1.6849027966816532e-07,
"loss": 0.9489,
"step": 1065
},
{
"epoch": 0.9875403784033225,
"grad_norm": 0.06742815673351288,
"learning_rate": 8.789717319505065e-08,
"loss": 0.9593,
"step": 1070
},
{
"epoch": 0.9921550530687586,
"grad_norm": 0.07070456445217133,
"learning_rate": 3.328953376530164e-08,
"loss": 0.9528,
"step": 1075
},
{
"epoch": 0.9967697277341947,
"grad_norm": 0.06779249012470245,
"learning_rate": 4.6815639127006925e-09,
"loss": 0.9468,
"step": 1080
},
{
"epoch": 0.9995385325334564,
"eval_loss": 0.9562498331069946,
"eval_runtime": 12105.6627,
"eval_samples_per_second": 1.268,
"eval_steps_per_second": 1.268,
"step": 1083
},
{
"epoch": 0.9995385325334564,
"step": 1083,
"total_flos": 1.2183828414859313e+19,
"train_loss": 0.9649847933019847,
"train_runtime": 330634.4933,
"train_samples_per_second": 0.419,
"train_steps_per_second": 0.003
}
],
"logging_steps": 5,
"max_steps": 1083,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2183828414859313e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}