zephyr-7b-sft-qlora / trainer_state.json
rayane-donni's picture
Model save
78ddc02 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9997127262280954,
"eval_steps": 500,
"global_step": 870,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.322265625,
"learning_rate": 2.2988505747126437e-06,
"loss": 1.1344,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 0.318359375,
"learning_rate": 1.1494252873563218e-05,
"loss": 1.1669,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 0.255859375,
"learning_rate": 2.2988505747126437e-05,
"loss": 1.1315,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 0.2265625,
"learning_rate": 3.4482758620689657e-05,
"loss": 1.0651,
"step": 15
},
{
"epoch": 0.02,
"grad_norm": 0.2080078125,
"learning_rate": 4.597701149425287e-05,
"loss": 1.0782,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 0.1494140625,
"learning_rate": 5.747126436781609e-05,
"loss": 1.0631,
"step": 25
},
{
"epoch": 0.03,
"grad_norm": 0.142578125,
"learning_rate": 6.896551724137931e-05,
"loss": 1.0328,
"step": 30
},
{
"epoch": 0.04,
"grad_norm": 0.13671875,
"learning_rate": 8.045977011494253e-05,
"loss": 0.999,
"step": 35
},
{
"epoch": 0.05,
"grad_norm": 0.1376953125,
"learning_rate": 9.195402298850575e-05,
"loss": 1.0058,
"step": 40
},
{
"epoch": 0.05,
"grad_norm": 0.1455078125,
"learning_rate": 0.00010344827586206898,
"loss": 1.0379,
"step": 45
},
{
"epoch": 0.06,
"grad_norm": 0.1435546875,
"learning_rate": 0.00011494252873563218,
"loss": 0.9973,
"step": 50
},
{
"epoch": 0.06,
"grad_norm": 0.14453125,
"learning_rate": 0.0001264367816091954,
"loss": 0.9921,
"step": 55
},
{
"epoch": 0.07,
"grad_norm": 0.1376953125,
"learning_rate": 0.00013793103448275863,
"loss": 0.973,
"step": 60
},
{
"epoch": 0.07,
"grad_norm": 0.1376953125,
"learning_rate": 0.00014942528735632183,
"loss": 0.9459,
"step": 65
},
{
"epoch": 0.08,
"grad_norm": 0.142578125,
"learning_rate": 0.00016091954022988506,
"loss": 0.9482,
"step": 70
},
{
"epoch": 0.09,
"grad_norm": 0.1298828125,
"learning_rate": 0.00017241379310344826,
"loss": 1.0018,
"step": 75
},
{
"epoch": 0.09,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001839080459770115,
"loss": 0.9946,
"step": 80
},
{
"epoch": 0.1,
"grad_norm": 0.13671875,
"learning_rate": 0.00019540229885057472,
"loss": 1.0059,
"step": 85
},
{
"epoch": 0.1,
"grad_norm": 0.138671875,
"learning_rate": 0.00019999275591576766,
"loss": 1.0132,
"step": 90
},
{
"epoch": 0.11,
"grad_norm": 0.130859375,
"learning_rate": 0.00019994849031285415,
"loss": 0.9993,
"step": 95
},
{
"epoch": 0.11,
"grad_norm": 0.1171875,
"learning_rate": 0.00019986400139094236,
"loss": 0.9768,
"step": 100
},
{
"epoch": 0.12,
"grad_norm": 0.12060546875,
"learning_rate": 0.000199739323151795,
"loss": 0.9577,
"step": 105
},
{
"epoch": 0.13,
"grad_norm": 0.12255859375,
"learning_rate": 0.00019957450577098322,
"loss": 0.9614,
"step": 110
},
{
"epoch": 0.13,
"grad_norm": 0.130859375,
"learning_rate": 0.00019936961557769385,
"loss": 1.0079,
"step": 115
},
{
"epoch": 0.14,
"grad_norm": 0.11328125,
"learning_rate": 0.00019912473502803582,
"loss": 0.9538,
"step": 120
},
{
"epoch": 0.14,
"grad_norm": 0.115234375,
"learning_rate": 0.0001988399626718565,
"loss": 0.9984,
"step": 125
},
{
"epoch": 0.15,
"grad_norm": 0.125,
"learning_rate": 0.00019851541311308123,
"loss": 0.9909,
"step": 130
},
{
"epoch": 0.16,
"grad_norm": 0.1181640625,
"learning_rate": 0.00019815121696359212,
"loss": 1.0142,
"step": 135
},
{
"epoch": 0.16,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019774752079066452,
"loss": 0.9513,
"step": 140
},
{
"epoch": 0.17,
"grad_norm": 0.12060546875,
"learning_rate": 0.00019730448705798239,
"loss": 0.9907,
"step": 145
},
{
"epoch": 0.17,
"grad_norm": 0.1162109375,
"learning_rate": 0.00019682229406025635,
"loss": 0.9987,
"step": 150
},
{
"epoch": 0.18,
"grad_norm": 0.11083984375,
"learning_rate": 0.00019630113585147063,
"loss": 0.9661,
"step": 155
},
{
"epoch": 0.18,
"grad_norm": 0.11767578125,
"learning_rate": 0.00019574122216678799,
"loss": 0.9911,
"step": 160
},
{
"epoch": 0.19,
"grad_norm": 0.11376953125,
"learning_rate": 0.0001951427783381437,
"loss": 0.9763,
"step": 165
},
{
"epoch": 0.2,
"grad_norm": 0.12890625,
"learning_rate": 0.0001945060452035629,
"loss": 0.9727,
"step": 170
},
{
"epoch": 0.2,
"grad_norm": 0.1171875,
"learning_rate": 0.0001938312790102376,
"loss": 0.9485,
"step": 175
},
{
"epoch": 0.21,
"grad_norm": 0.109375,
"learning_rate": 0.00019311875131140246,
"loss": 0.987,
"step": 180
},
{
"epoch": 0.21,
"grad_norm": 0.11279296875,
"learning_rate": 0.00019236874885705075,
"loss": 0.9369,
"step": 185
},
{
"epoch": 0.22,
"grad_norm": 0.1181640625,
"learning_rate": 0.0001915815734785346,
"loss": 0.962,
"step": 190
},
{
"epoch": 0.22,
"grad_norm": 0.11328125,
"learning_rate": 0.00019075754196709572,
"loss": 1.0007,
"step": 195
},
{
"epoch": 0.23,
"grad_norm": 0.1083984375,
"learning_rate": 0.0001898969859463759,
"loss": 0.9192,
"step": 200
},
{
"epoch": 0.24,
"grad_norm": 0.12890625,
"learning_rate": 0.00018900025173895822,
"loss": 0.9357,
"step": 205
},
{
"epoch": 0.24,
"grad_norm": 0.11279296875,
"learning_rate": 0.00018806770022699278,
"loss": 0.9545,
"step": 210
},
{
"epoch": 0.25,
"grad_norm": 0.119140625,
"learning_rate": 0.00018709970670696308,
"loss": 0.9533,
"step": 215
},
{
"epoch": 0.25,
"grad_norm": 0.11083984375,
"learning_rate": 0.00018609666073865158,
"loss": 1.0024,
"step": 220
},
{
"epoch": 0.26,
"grad_norm": 0.11083984375,
"learning_rate": 0.00018505896598836508,
"loss": 0.986,
"step": 225
},
{
"epoch": 0.26,
"grad_norm": 0.1181640625,
"learning_rate": 0.00018398704006648302,
"loss": 0.9542,
"step": 230
},
{
"epoch": 0.27,
"grad_norm": 0.115234375,
"learning_rate": 0.00018288131435939412,
"loss": 1.0122,
"step": 235
},
{
"epoch": 0.28,
"grad_norm": 0.1142578125,
"learning_rate": 0.00018174223385588917,
"loss": 0.9677,
"step": 240
},
{
"epoch": 0.28,
"grad_norm": 0.119140625,
"learning_rate": 0.0001805702569680794,
"loss": 0.9935,
"step": 245
},
{
"epoch": 0.29,
"grad_norm": 0.11962890625,
"learning_rate": 0.00017936585534691291,
"loss": 0.9992,
"step": 250
},
{
"epoch": 0.29,
"grad_norm": 0.11376953125,
"learning_rate": 0.00017812951369236316,
"loss": 0.979,
"step": 255
},
{
"epoch": 0.3,
"grad_norm": 0.119140625,
"learning_rate": 0.00017686172955836633,
"loss": 0.9367,
"step": 260
},
{
"epoch": 0.3,
"grad_norm": 0.1162109375,
"learning_rate": 0.00017556301315258517,
"loss": 0.9548,
"step": 265
},
{
"epoch": 0.31,
"grad_norm": 0.115234375,
"learning_rate": 0.000174233887131081,
"loss": 1.0021,
"step": 270
},
{
"epoch": 0.32,
"grad_norm": 0.11083984375,
"learning_rate": 0.00017287488638797563,
"loss": 0.9767,
"step": 275
},
{
"epoch": 0.32,
"grad_norm": 0.11083984375,
"learning_rate": 0.00017148655784018829,
"loss": 0.9757,
"step": 280
},
{
"epoch": 0.33,
"grad_norm": 0.11083984375,
"learning_rate": 0.00017006946020733425,
"loss": 0.9611,
"step": 285
},
{
"epoch": 0.33,
"grad_norm": 0.1181640625,
"learning_rate": 0.0001686241637868734,
"loss": 0.9773,
"step": 290
},
{
"epoch": 0.34,
"grad_norm": 0.11083984375,
"learning_rate": 0.00016715125022459922,
"loss": 0.9613,
"step": 295
},
{
"epoch": 0.34,
"grad_norm": 0.11181640625,
"learning_rate": 0.00016565131228056133,
"loss": 0.9993,
"step": 300
},
{
"epoch": 0.35,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001641249535905147,
"loss": 1.0112,
"step": 305
},
{
"epoch": 0.36,
"grad_norm": 0.1103515625,
"learning_rate": 0.00016257278842299197,
"loss": 0.969,
"step": 310
},
{
"epoch": 0.36,
"grad_norm": 0.1064453125,
"learning_rate": 0.0001609954414320973,
"loss": 0.9559,
"step": 315
},
{
"epoch": 0.37,
"grad_norm": 0.111328125,
"learning_rate": 0.00015939354740612,
"loss": 0.9527,
"step": 320
},
{
"epoch": 0.37,
"grad_norm": 0.11376953125,
"learning_rate": 0.0001577677510120701,
"loss": 1.0005,
"step": 325
},
{
"epoch": 0.38,
"grad_norm": 0.1123046875,
"learning_rate": 0.00015611870653623825,
"loss": 0.9512,
"step": 330
},
{
"epoch": 0.38,
"grad_norm": 0.11279296875,
"learning_rate": 0.00015444707762088443,
"loss": 0.9896,
"step": 335
},
{
"epoch": 0.39,
"grad_norm": 0.12158203125,
"learning_rate": 0.00015275353699716155,
"loss": 0.9514,
"step": 340
},
{
"epoch": 0.4,
"grad_norm": 0.1103515625,
"learning_rate": 0.00015103876621438086,
"loss": 0.9326,
"step": 345
},
{
"epoch": 0.4,
"grad_norm": 0.111328125,
"learning_rate": 0.00014930345536572924,
"loss": 0.9457,
"step": 350
},
{
"epoch": 0.41,
"grad_norm": 0.10986328125,
"learning_rate": 0.00014754830281054777,
"loss": 0.9748,
"step": 355
},
{
"epoch": 0.41,
"grad_norm": 0.11279296875,
"learning_rate": 0.00014577401489328335,
"loss": 0.9776,
"step": 360
},
{
"epoch": 0.42,
"grad_norm": 0.10888671875,
"learning_rate": 0.00014398130565922742,
"loss": 0.9827,
"step": 365
},
{
"epoch": 0.43,
"grad_norm": 0.1337890625,
"learning_rate": 0.0001421708965671551,
"loss": 0.9688,
"step": 370
},
{
"epoch": 0.43,
"grad_norm": 0.11376953125,
"learning_rate": 0.00014034351619898088,
"loss": 0.9832,
"step": 375
},
{
"epoch": 0.44,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001384998999665479,
"loss": 0.9901,
"step": 380
},
{
"epoch": 0.44,
"grad_norm": 0.11083984375,
"learning_rate": 0.00013664078981566843,
"loss": 0.9898,
"step": 385
},
{
"epoch": 0.45,
"grad_norm": 0.11083984375,
"learning_rate": 0.00013476693392753476,
"loss": 0.9549,
"step": 390
},
{
"epoch": 0.45,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001328790864176209,
"loss": 0.9486,
"step": 395
},
{
"epoch": 0.46,
"grad_norm": 0.10986328125,
"learning_rate": 0.00013097800703219586,
"loss": 0.9776,
"step": 400
},
{
"epoch": 0.47,
"grad_norm": 0.11083984375,
"learning_rate": 0.0001290644608425711,
"loss": 0.9457,
"step": 405
},
{
"epoch": 0.47,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001271392179372048,
"loss": 0.9693,
"step": 410
},
{
"epoch": 0.48,
"grad_norm": 0.11376953125,
"learning_rate": 0.00012520305311178716,
"loss": 0.9658,
"step": 415
},
{
"epoch": 0.48,
"grad_norm": 0.1103515625,
"learning_rate": 0.00012325674555743106,
"loss": 1.0126,
"step": 420
},
{
"epoch": 0.49,
"grad_norm": 0.109375,
"learning_rate": 0.0001213010785470943,
"loss": 0.9329,
"step": 425
},
{
"epoch": 0.49,
"grad_norm": 0.107421875,
"learning_rate": 0.00011933683912035856,
"loss": 0.9311,
"step": 430
},
{
"epoch": 0.5,
"grad_norm": 0.115234375,
"learning_rate": 0.00011736481776669306,
"loss": 0.9496,
"step": 435
},
{
"epoch": 0.51,
"grad_norm": 0.11474609375,
"learning_rate": 0.00011538580810732938,
"loss": 0.9682,
"step": 440
},
{
"epoch": 0.51,
"grad_norm": 0.1259765625,
"learning_rate": 0.00011340060657587623,
"loss": 0.9753,
"step": 445
},
{
"epoch": 0.52,
"grad_norm": 0.11083984375,
"learning_rate": 0.00011141001209780249,
"loss": 0.9755,
"step": 450
},
{
"epoch": 0.52,
"grad_norm": 0.11181640625,
"learning_rate": 0.00010941482576891689,
"loss": 0.9706,
"step": 455
},
{
"epoch": 0.53,
"grad_norm": 0.11328125,
"learning_rate": 0.00010741585053297494,
"loss": 0.9557,
"step": 460
},
{
"epoch": 0.53,
"grad_norm": 0.111328125,
"learning_rate": 0.00010541389085854176,
"loss": 0.9588,
"step": 465
},
{
"epoch": 0.54,
"grad_norm": 0.10693359375,
"learning_rate": 0.00010340975241524132,
"loss": 0.9213,
"step": 470
},
{
"epoch": 0.55,
"grad_norm": 0.11865234375,
"learning_rate": 0.00010140424174952232,
"loss": 0.9914,
"step": 475
},
{
"epoch": 0.55,
"grad_norm": 0.1201171875,
"learning_rate": 9.939816596007146e-05,
"loss": 0.9455,
"step": 480
},
{
"epoch": 0.56,
"grad_norm": 0.10986328125,
"learning_rate": 9.739233237300402e-05,
"loss": 0.9367,
"step": 485
},
{
"epoch": 0.56,
"grad_norm": 0.11083984375,
"learning_rate": 9.538754821696323e-05,
"loss": 0.9763,
"step": 490
},
{
"epoch": 0.57,
"grad_norm": 0.111328125,
"learning_rate": 9.338462029825886e-05,
"loss": 0.9507,
"step": 495
},
{
"epoch": 0.57,
"grad_norm": 0.11572265625,
"learning_rate": 9.138435467617548e-05,
"loss": 0.992,
"step": 500
},
{
"epoch": 0.58,
"grad_norm": 0.11181640625,
"learning_rate": 8.938755633858186e-05,
"loss": 0.9587,
"step": 505
},
{
"epoch": 0.59,
"grad_norm": 0.1123046875,
"learning_rate": 8.739502887797107e-05,
"loss": 0.9864,
"step": 510
},
{
"epoch": 0.59,
"grad_norm": 0.1123046875,
"learning_rate": 8.540757416806236e-05,
"loss": 0.976,
"step": 515
},
{
"epoch": 0.6,
"grad_norm": 0.10693359375,
"learning_rate": 8.342599204109472e-05,
"loss": 0.948,
"step": 520
},
{
"epoch": 0.6,
"grad_norm": 0.1123046875,
"learning_rate": 8.145107996594206e-05,
"loss": 0.9962,
"step": 525
},
{
"epoch": 0.61,
"grad_norm": 0.11279296875,
"learning_rate": 7.948363272717926e-05,
"loss": 0.9859,
"step": 530
},
{
"epoch": 0.61,
"grad_norm": 0.11669921875,
"learning_rate": 7.752444210522898e-05,
"loss": 1.0142,
"step": 535
},
{
"epoch": 0.62,
"grad_norm": 0.11328125,
"learning_rate": 7.55742965577169e-05,
"loss": 0.954,
"step": 540
},
{
"epoch": 0.63,
"grad_norm": 0.111328125,
"learning_rate": 7.363398090216459e-05,
"loss": 0.9726,
"step": 545
},
{
"epoch": 0.63,
"grad_norm": 0.11474609375,
"learning_rate": 7.170427600014712e-05,
"loss": 0.9598,
"step": 550
},
{
"epoch": 0.64,
"grad_norm": 0.1181640625,
"learning_rate": 6.978595844304271e-05,
"loss": 0.9773,
"step": 555
},
{
"epoch": 0.64,
"grad_norm": 0.11083984375,
"learning_rate": 6.787980023950108e-05,
"loss": 0.964,
"step": 560
},
{
"epoch": 0.65,
"grad_norm": 0.12255859375,
"learning_rate": 6.598656850475562e-05,
"loss": 0.9626,
"step": 565
},
{
"epoch": 0.65,
"grad_norm": 0.11083984375,
"learning_rate": 6.410702515190543e-05,
"loss": 0.9722,
"step": 570
},
{
"epoch": 0.66,
"grad_norm": 0.10888671875,
"learning_rate": 6.22419265852906e-05,
"loss": 0.9332,
"step": 575
},
{
"epoch": 0.67,
"grad_norm": 0.1083984375,
"learning_rate": 6.039202339608432e-05,
"loss": 0.95,
"step": 580
},
{
"epoch": 0.67,
"grad_norm": 0.11083984375,
"learning_rate": 5.8558060060224817e-05,
"loss": 0.9757,
"step": 585
},
{
"epoch": 0.68,
"grad_norm": 0.11279296875,
"learning_rate": 5.6740774638807935e-05,
"loss": 0.9489,
"step": 590
},
{
"epoch": 0.68,
"grad_norm": 0.11669921875,
"learning_rate": 5.494089848106156e-05,
"loss": 0.9606,
"step": 595
},
{
"epoch": 0.69,
"grad_norm": 0.111328125,
"learning_rate": 5.3159155930021e-05,
"loss": 0.9461,
"step": 600
},
{
"epoch": 0.7,
"grad_norm": 0.109375,
"learning_rate": 5.13962640310241e-05,
"loss": 0.9324,
"step": 605
},
{
"epoch": 0.7,
"grad_norm": 0.109375,
"learning_rate": 4.9652932243143146e-05,
"loss": 0.9302,
"step": 610
},
{
"epoch": 0.71,
"grad_norm": 0.1083984375,
"learning_rate": 4.792986215366976e-05,
"loss": 0.9496,
"step": 615
},
{
"epoch": 0.71,
"grad_norm": 0.1123046875,
"learning_rate": 4.62277471957679e-05,
"loss": 0.9185,
"step": 620
},
{
"epoch": 0.72,
"grad_norm": 0.1123046875,
"learning_rate": 4.454727236940814e-05,
"loss": 0.9632,
"step": 625
},
{
"epoch": 0.72,
"grad_norm": 0.11083984375,
"learning_rate": 4.288911396569599e-05,
"loss": 0.9515,
"step": 630
},
{
"epoch": 0.73,
"grad_norm": 0.10986328125,
"learning_rate": 4.1253939294705004e-05,
"loss": 0.9341,
"step": 635
},
{
"epoch": 0.74,
"grad_norm": 0.1103515625,
"learning_rate": 3.964240641692416e-05,
"loss": 0.9914,
"step": 640
},
{
"epoch": 0.74,
"grad_norm": 0.1123046875,
"learning_rate": 3.80551638784277e-05,
"loss": 0.9792,
"step": 645
},
{
"epoch": 0.75,
"grad_norm": 0.1220703125,
"learning_rate": 3.649285044987397e-05,
"loss": 0.9405,
"step": 650
},
{
"epoch": 0.75,
"grad_norm": 0.10986328125,
"learning_rate": 3.495609486943814e-05,
"loss": 0.9875,
"step": 655
},
{
"epoch": 0.76,
"grad_norm": 0.109375,
"learning_rate": 3.3445515589782574e-05,
"loss": 0.933,
"step": 660
},
{
"epoch": 0.76,
"grad_norm": 0.111328125,
"learning_rate": 3.1961720529166436e-05,
"loss": 0.946,
"step": 665
},
{
"epoch": 0.77,
"grad_norm": 0.11376953125,
"learning_rate": 3.05053068267948e-05,
"loss": 0.9533,
"step": 670
},
{
"epoch": 0.78,
"grad_norm": 0.111328125,
"learning_rate": 2.9076860602505564e-05,
"loss": 0.9527,
"step": 675
},
{
"epoch": 0.78,
"grad_norm": 0.107421875,
"learning_rate": 2.7676956720891235e-05,
"loss": 0.9565,
"step": 680
},
{
"epoch": 0.79,
"grad_norm": 0.109375,
"learning_rate": 2.6306158559950023e-05,
"loss": 0.9447,
"step": 685
},
{
"epoch": 0.79,
"grad_norm": 0.11083984375,
"learning_rate": 2.496501778435977e-05,
"loss": 0.9301,
"step": 690
},
{
"epoch": 0.8,
"grad_norm": 0.13671875,
"learning_rate": 2.3654074123465752e-05,
"loss": 0.9523,
"step": 695
},
{
"epoch": 0.8,
"grad_norm": 0.1123046875,
"learning_rate": 2.2373855154071732e-05,
"loss": 0.9705,
"step": 700
},
{
"epoch": 0.81,
"grad_norm": 0.11083984375,
"learning_rate": 2.1124876088121692e-05,
"loss": 0.9743,
"step": 705
},
{
"epoch": 0.82,
"grad_norm": 0.11083984375,
"learning_rate": 1.990763956535777e-05,
"loss": 0.9519,
"step": 710
},
{
"epoch": 0.82,
"grad_norm": 0.10986328125,
"learning_rate": 1.8722635451037497e-05,
"loss": 0.9858,
"step": 715
},
{
"epoch": 0.83,
"grad_norm": 0.10791015625,
"learning_rate": 1.757034063879235e-05,
"loss": 0.9419,
"step": 720
},
{
"epoch": 0.83,
"grad_norm": 0.115234375,
"learning_rate": 1.6451218858706374e-05,
"loss": 0.9403,
"step": 725
},
{
"epoch": 0.84,
"grad_norm": 0.10546875,
"learning_rate": 1.5365720490692426e-05,
"loss": 0.9526,
"step": 730
},
{
"epoch": 0.84,
"grad_norm": 0.109375,
"learning_rate": 1.4314282383241096e-05,
"loss": 0.9508,
"step": 735
},
{
"epoch": 0.85,
"grad_norm": 0.10888671875,
"learning_rate": 1.3297327677615124e-05,
"loss": 0.979,
"step": 740
},
{
"epoch": 0.86,
"grad_norm": 0.11279296875,
"learning_rate": 1.2315265637560357e-05,
"loss": 0.9523,
"step": 745
},
{
"epoch": 0.86,
"grad_norm": 0.1103515625,
"learning_rate": 1.136849148460125e-05,
"loss": 0.959,
"step": 750
},
{
"epoch": 0.87,
"grad_norm": 0.1103515625,
"learning_rate": 1.045738623898791e-05,
"loss": 0.9758,
"step": 755
},
{
"epoch": 0.87,
"grad_norm": 0.11328125,
"learning_rate": 9.582316566357996e-06,
"loss": 0.9481,
"step": 760
},
{
"epoch": 0.88,
"grad_norm": 0.11083984375,
"learning_rate": 8.74363463017569e-06,
"loss": 0.9695,
"step": 765
},
{
"epoch": 0.88,
"grad_norm": 0.10693359375,
"learning_rate": 7.94167795000682e-06,
"loss": 0.964,
"step": 770
},
{
"epoch": 0.89,
"grad_norm": 0.11181640625,
"learning_rate": 7.176769265687389e-06,
"loss": 0.953,
"step": 775
},
{
"epoch": 0.9,
"grad_norm": 0.10791015625,
"learning_rate": 6.4492164074399065e-06,
"loss": 0.9455,
"step": 780
},
{
"epoch": 0.9,
"grad_norm": 0.1064453125,
"learning_rate": 5.7593121719900835e-06,
"loss": 0.966,
"step": 785
},
{
"epoch": 0.91,
"grad_norm": 0.11083984375,
"learning_rate": 5.10733420473366e-06,
"loss": 0.9649,
"step": 790
},
{
"epoch": 0.91,
"grad_norm": 0.1083984375,
"learning_rate": 4.493544888000467e-06,
"loss": 0.9341,
"step": 795
},
{
"epoch": 0.92,
"grad_norm": 0.11279296875,
"learning_rate": 3.918191235461333e-06,
"loss": 0.9724,
"step": 800
},
{
"epoch": 0.93,
"grad_norm": 0.1123046875,
"learning_rate": 3.381504792719714e-06,
"loss": 0.9757,
"step": 805
},
{
"epoch": 0.93,
"grad_norm": 0.1103515625,
"learning_rate": 2.8837015441283586e-06,
"loss": 0.9693,
"step": 810
},
{
"epoch": 0.94,
"grad_norm": 0.111328125,
"learning_rate": 2.4249818258684664e-06,
"loss": 0.9392,
"step": 815
},
{
"epoch": 0.94,
"grad_norm": 0.1123046875,
"learning_rate": 2.0055302453262924e-06,
"loss": 0.9504,
"step": 820
},
{
"epoch": 0.95,
"grad_norm": 0.107421875,
"learning_rate": 1.6255156067997323e-06,
"loss": 0.9688,
"step": 825
},
{
"epoch": 0.95,
"grad_norm": 0.10791015625,
"learning_rate": 1.285090843564485e-06,
"loss": 0.9293,
"step": 830
},
{
"epoch": 0.96,
"grad_norm": 0.1103515625,
"learning_rate": 9.843929563276733e-07,
"loss": 0.9534,
"step": 835
},
{
"epoch": 0.97,
"grad_norm": 0.107421875,
"learning_rate": 7.235429580931152e-07,
"loss": 0.9132,
"step": 840
},
{
"epoch": 0.97,
"grad_norm": 0.111328125,
"learning_rate": 5.026458254608457e-07,
"loss": 0.9614,
"step": 845
},
{
"epoch": 0.98,
"grad_norm": 0.11865234375,
"learning_rate": 3.21790456380211e-07,
"loss": 0.9838,
"step": 850
},
{
"epoch": 0.98,
"grad_norm": 0.1103515625,
"learning_rate": 1.8104963437381993e-07,
"loss": 0.9555,
"step": 855
},
{
"epoch": 0.99,
"grad_norm": 0.10791015625,
"learning_rate": 8.047999924645222e-08,
"loss": 0.9572,
"step": 860
},
{
"epoch": 0.99,
"grad_norm": 0.1103515625,
"learning_rate": 2.012202429091392e-08,
"loss": 0.9562,
"step": 865
},
{
"epoch": 1.0,
"grad_norm": 0.11279296875,
"learning_rate": 0.0,
"loss": 0.98,
"step": 870
},
{
"epoch": 1.0,
"eval_loss": 0.9584344029426575,
"eval_runtime": 2489.4137,
"eval_samples_per_second": 6.199,
"eval_steps_per_second": 0.775,
"step": 870
},
{
"epoch": 1.0,
"step": 870,
"total_flos": 2.446890748289745e+18,
"train_loss": 0.9710182381772446,
"train_runtime": 17696.8906,
"train_samples_per_second": 1.574,
"train_steps_per_second": 0.049
}
],
"logging_steps": 5,
"max_steps": 870,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 2.446890748289745e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}