modern_ancientpoem_encoder / trainer_state.json
slxhere's picture
Upload folder using huggingface_hub
992a607 verified
{
"best_global_step": null,
"best_metric": 0.8121369481086731,
"best_model_checkpoint": null,
"epoch": 5.915813424345847,
"eval_steps": 200,
"global_step": 10400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02844141069397042,
"grad_norm": 8.889737129211426,
"learning_rate": 9.099526066350711e-07,
"loss": 4.4241,
"step": 50
},
{
"epoch": 0.05688282138794084,
"grad_norm": 7.543558120727539,
"learning_rate": 1.8578199052132703e-06,
"loss": 3.4415,
"step": 100
},
{
"epoch": 0.08532423208191127,
"grad_norm": 7.774235725402832,
"learning_rate": 2.8056872037914696e-06,
"loss": 2.6725,
"step": 150
},
{
"epoch": 0.11376564277588168,
"grad_norm": 7.825632572174072,
"learning_rate": 3.7535545023696683e-06,
"loss": 2.4137,
"step": 200
},
{
"epoch": 0.11376564277588168,
"eval_loss": 2.2685751914978027,
"eval_runtime": 29.7449,
"eval_samples_per_second": 840.481,
"eval_steps_per_second": 6.589,
"step": 200
},
{
"epoch": 0.1422070534698521,
"grad_norm": 8.4616060256958,
"learning_rate": 4.701421800947868e-06,
"loss": 2.2701,
"step": 250
},
{
"epoch": 0.17064846416382254,
"grad_norm": 7.439651966094971,
"learning_rate": 5.6492890995260666e-06,
"loss": 2.1523,
"step": 300
},
{
"epoch": 0.19908987485779295,
"grad_norm": 8.319734573364258,
"learning_rate": 6.597156398104266e-06,
"loss": 2.0805,
"step": 350
},
{
"epoch": 0.22753128555176336,
"grad_norm": 7.824019432067871,
"learning_rate": 7.545023696682466e-06,
"loss": 2.0513,
"step": 400
},
{
"epoch": 0.22753128555176336,
"eval_loss": 1.9506336450576782,
"eval_runtime": 28.6984,
"eval_samples_per_second": 871.127,
"eval_steps_per_second": 6.83,
"step": 400
},
{
"epoch": 0.25597269624573377,
"grad_norm": 8.402134895324707,
"learning_rate": 8.492890995260664e-06,
"loss": 2.0048,
"step": 450
},
{
"epoch": 0.2844141069397042,
"grad_norm": 7.345431327819824,
"learning_rate": 9.440758293838863e-06,
"loss": 1.9552,
"step": 500
},
{
"epoch": 0.31285551763367464,
"grad_norm": 8.147149085998535,
"learning_rate": 1.0388625592417063e-05,
"loss": 1.8778,
"step": 550
},
{
"epoch": 0.3412969283276451,
"grad_norm": 7.802554130554199,
"learning_rate": 1.133649289099526e-05,
"loss": 1.8549,
"step": 600
},
{
"epoch": 0.3412969283276451,
"eval_loss": 1.7629565000534058,
"eval_runtime": 33.6232,
"eval_samples_per_second": 743.534,
"eval_steps_per_second": 5.829,
"step": 600
},
{
"epoch": 0.36973833902161546,
"grad_norm": 7.983552932739258,
"learning_rate": 1.228436018957346e-05,
"loss": 1.822,
"step": 650
},
{
"epoch": 0.3981797497155859,
"grad_norm": 8.035250663757324,
"learning_rate": 1.323222748815166e-05,
"loss": 1.8128,
"step": 700
},
{
"epoch": 0.42662116040955633,
"grad_norm": 8.409351348876953,
"learning_rate": 1.4180094786729858e-05,
"loss": 1.7742,
"step": 750
},
{
"epoch": 0.4550625711035267,
"grad_norm": 7.7319183349609375,
"learning_rate": 1.5127962085308059e-05,
"loss": 1.7076,
"step": 800
},
{
"epoch": 0.4550625711035267,
"eval_loss": 1.6330854892730713,
"eval_runtime": 33.0226,
"eval_samples_per_second": 757.058,
"eval_steps_per_second": 5.935,
"step": 800
},
{
"epoch": 0.48350398179749715,
"grad_norm": 7.466287136077881,
"learning_rate": 1.6075829383886257e-05,
"loss": 1.6919,
"step": 850
},
{
"epoch": 0.5119453924914675,
"grad_norm": 7.655446529388428,
"learning_rate": 1.7023696682464458e-05,
"loss": 1.64,
"step": 900
},
{
"epoch": 0.540386803185438,
"grad_norm": 8.173416137695312,
"learning_rate": 1.7971563981042655e-05,
"loss": 1.6291,
"step": 950
},
{
"epoch": 0.5688282138794084,
"grad_norm": 7.376980781555176,
"learning_rate": 1.8919431279620855e-05,
"loss": 1.5881,
"step": 1000
},
{
"epoch": 0.5688282138794084,
"eval_loss": 1.5367897748947144,
"eval_runtime": 32.9799,
"eval_samples_per_second": 758.038,
"eval_steps_per_second": 5.943,
"step": 1000
},
{
"epoch": 0.5972696245733788,
"grad_norm": 7.863293170928955,
"learning_rate": 1.9867298578199055e-05,
"loss": 1.6018,
"step": 1050
},
{
"epoch": 0.6257110352673493,
"grad_norm": 7.6200385093688965,
"learning_rate": 1.9909406931423158e-05,
"loss": 1.5664,
"step": 1100
},
{
"epoch": 0.6541524459613197,
"grad_norm": 8.286286354064941,
"learning_rate": 1.9804066154008218e-05,
"loss": 1.5545,
"step": 1150
},
{
"epoch": 0.6825938566552902,
"grad_norm": 7.845026969909668,
"learning_rate": 1.969872537659328e-05,
"loss": 1.5292,
"step": 1200
},
{
"epoch": 0.6825938566552902,
"eval_loss": 1.4531670808792114,
"eval_runtime": 29.4807,
"eval_samples_per_second": 848.011,
"eval_steps_per_second": 6.648,
"step": 1200
},
{
"epoch": 0.7110352673492605,
"grad_norm": 7.120193004608154,
"learning_rate": 1.9593384599178345e-05,
"loss": 1.5166,
"step": 1250
},
{
"epoch": 0.7394766780432309,
"grad_norm": 7.721842288970947,
"learning_rate": 1.9488043821763408e-05,
"loss": 1.517,
"step": 1300
},
{
"epoch": 0.7679180887372014,
"grad_norm": 7.104468822479248,
"learning_rate": 1.938270304434847e-05,
"loss": 1.4639,
"step": 1350
},
{
"epoch": 0.7963594994311718,
"grad_norm": 7.570240020751953,
"learning_rate": 1.927736226693353e-05,
"loss": 1.4729,
"step": 1400
},
{
"epoch": 0.7963594994311718,
"eval_loss": 1.368685245513916,
"eval_runtime": 28.6992,
"eval_samples_per_second": 871.103,
"eval_steps_per_second": 6.829,
"step": 1400
},
{
"epoch": 0.8248009101251422,
"grad_norm": 7.745856761932373,
"learning_rate": 1.9172021489518595e-05,
"loss": 1.4501,
"step": 1450
},
{
"epoch": 0.8532423208191127,
"grad_norm": 7.175948619842529,
"learning_rate": 1.906668071210366e-05,
"loss": 1.3932,
"step": 1500
},
{
"epoch": 0.8816837315130831,
"grad_norm": 8.291092872619629,
"learning_rate": 1.8961339934688722e-05,
"loss": 1.4063,
"step": 1550
},
{
"epoch": 0.9101251422070534,
"grad_norm": 7.994405269622803,
"learning_rate": 1.8855999157273782e-05,
"loss": 1.3825,
"step": 1600
},
{
"epoch": 0.9101251422070534,
"eval_loss": 1.300325632095337,
"eval_runtime": 28.6638,
"eval_samples_per_second": 872.179,
"eval_steps_per_second": 6.838,
"step": 1600
},
{
"epoch": 0.9385665529010239,
"grad_norm": 8.009012222290039,
"learning_rate": 1.8750658379858845e-05,
"loss": 1.3647,
"step": 1650
},
{
"epoch": 0.9670079635949943,
"grad_norm": 8.436450004577637,
"learning_rate": 1.864531760244391e-05,
"loss": 1.3431,
"step": 1700
},
{
"epoch": 0.9954493742889647,
"grad_norm": 7.547204971313477,
"learning_rate": 1.8539976825028972e-05,
"loss": 1.3417,
"step": 1750
},
{
"epoch": 1.023890784982935,
"grad_norm": 6.637471675872803,
"learning_rate": 1.8434636047614032e-05,
"loss": 1.0839,
"step": 1800
},
{
"epoch": 1.023890784982935,
"eval_loss": 1.2430765628814697,
"eval_runtime": 28.6828,
"eval_samples_per_second": 871.603,
"eval_steps_per_second": 6.833,
"step": 1800
},
{
"epoch": 1.0523321956769056,
"grad_norm": 7.198896408081055,
"learning_rate": 1.8329295270199096e-05,
"loss": 1.0801,
"step": 1850
},
{
"epoch": 1.080773606370876,
"grad_norm": 7.391284942626953,
"learning_rate": 1.8223954492784156e-05,
"loss": 1.0577,
"step": 1900
},
{
"epoch": 1.1092150170648465,
"grad_norm": 6.571183681488037,
"learning_rate": 1.811861371536922e-05,
"loss": 1.0159,
"step": 1950
},
{
"epoch": 1.1376564277588168,
"grad_norm": 7.20968770980835,
"learning_rate": 1.8013272937954283e-05,
"loss": 1.0239,
"step": 2000
},
{
"epoch": 1.1376564277588168,
"eval_loss": 1.213191270828247,
"eval_runtime": 28.5325,
"eval_samples_per_second": 876.195,
"eval_steps_per_second": 6.869,
"step": 2000
},
{
"epoch": 1.1660978384527874,
"grad_norm": 6.97741174697876,
"learning_rate": 1.7907932160539346e-05,
"loss": 1.0335,
"step": 2050
},
{
"epoch": 1.1945392491467577,
"grad_norm": 7.157691478729248,
"learning_rate": 1.7802591383124406e-05,
"loss": 1.0117,
"step": 2100
},
{
"epoch": 1.222980659840728,
"grad_norm": 7.168184280395508,
"learning_rate": 1.769725060570947e-05,
"loss": 1.0343,
"step": 2150
},
{
"epoch": 1.2514220705346986,
"grad_norm": 7.099086284637451,
"learning_rate": 1.7591909828294533e-05,
"loss": 1.0193,
"step": 2200
},
{
"epoch": 1.2514220705346986,
"eval_loss": 1.1807738542556763,
"eval_runtime": 28.5908,
"eval_samples_per_second": 874.407,
"eval_steps_per_second": 6.855,
"step": 2200
},
{
"epoch": 1.2798634812286689,
"grad_norm": 7.232935905456543,
"learning_rate": 1.7486569050879597e-05,
"loss": 1.0235,
"step": 2250
},
{
"epoch": 1.3083048919226394,
"grad_norm": 6.775105953216553,
"learning_rate": 1.738122827346466e-05,
"loss": 0.9949,
"step": 2300
},
{
"epoch": 1.3367463026166098,
"grad_norm": 6.916153430938721,
"learning_rate": 1.727588749604972e-05,
"loss": 1.0058,
"step": 2350
},
{
"epoch": 1.36518771331058,
"grad_norm": 6.561580181121826,
"learning_rate": 1.7170546718634784e-05,
"loss": 1.0039,
"step": 2400
},
{
"epoch": 1.36518771331058,
"eval_loss": 1.1427565813064575,
"eval_runtime": 28.6907,
"eval_samples_per_second": 871.363,
"eval_steps_per_second": 6.831,
"step": 2400
},
{
"epoch": 1.3936291240045506,
"grad_norm": 6.508544921875,
"learning_rate": 1.7065205941219847e-05,
"loss": 1.0164,
"step": 2450
},
{
"epoch": 1.4220705346985212,
"grad_norm": 7.889155387878418,
"learning_rate": 1.695986516380491e-05,
"loss": 0.9934,
"step": 2500
},
{
"epoch": 1.4505119453924915,
"grad_norm": 7.1703782081604,
"learning_rate": 1.685452438638997e-05,
"loss": 0.9777,
"step": 2550
},
{
"epoch": 1.4789533560864618,
"grad_norm": 7.198650360107422,
"learning_rate": 1.6749183608975034e-05,
"loss": 0.9753,
"step": 2600
},
{
"epoch": 1.4789533560864618,
"eval_loss": 1.1101032495498657,
"eval_runtime": 28.9361,
"eval_samples_per_second": 863.971,
"eval_steps_per_second": 6.774,
"step": 2600
},
{
"epoch": 1.5073947667804322,
"grad_norm": 7.485228061676025,
"learning_rate": 1.6643842831560098e-05,
"loss": 0.9621,
"step": 2650
},
{
"epoch": 1.5358361774744027,
"grad_norm": 6.426005840301514,
"learning_rate": 1.653850205414516e-05,
"loss": 0.9756,
"step": 2700
},
{
"epoch": 1.5642775881683733,
"grad_norm": 6.803189277648926,
"learning_rate": 1.643316127673022e-05,
"loss": 0.9725,
"step": 2750
},
{
"epoch": 1.5927189988623436,
"grad_norm": 7.307713508605957,
"learning_rate": 1.6327820499315285e-05,
"loss": 0.9649,
"step": 2800
},
{
"epoch": 1.5927189988623436,
"eval_loss": 1.0812790393829346,
"eval_runtime": 28.8811,
"eval_samples_per_second": 865.619,
"eval_steps_per_second": 6.786,
"step": 2800
},
{
"epoch": 1.621160409556314,
"grad_norm": 6.56484317779541,
"learning_rate": 1.6222479721900348e-05,
"loss": 0.9652,
"step": 2850
},
{
"epoch": 1.6496018202502845,
"grad_norm": 6.714264392852783,
"learning_rate": 1.6117138944485412e-05,
"loss": 0.9861,
"step": 2900
},
{
"epoch": 1.6780432309442548,
"grad_norm": 6.9539642333984375,
"learning_rate": 1.6011798167070475e-05,
"loss": 0.916,
"step": 2950
},
{
"epoch": 1.7064846416382253,
"grad_norm": 6.552751541137695,
"learning_rate": 1.5906457389655535e-05,
"loss": 0.9417,
"step": 3000
},
{
"epoch": 1.7064846416382253,
"eval_loss": 1.0522855520248413,
"eval_runtime": 28.864,
"eval_samples_per_second": 866.132,
"eval_steps_per_second": 6.79,
"step": 3000
},
{
"epoch": 1.7349260523321957,
"grad_norm": 6.961670875549316,
"learning_rate": 1.58011166122406e-05,
"loss": 0.9599,
"step": 3050
},
{
"epoch": 1.763367463026166,
"grad_norm": 7.874273300170898,
"learning_rate": 1.5695775834825662e-05,
"loss": 0.9275,
"step": 3100
},
{
"epoch": 1.7918088737201365,
"grad_norm": 5.82428503036499,
"learning_rate": 1.5590435057410726e-05,
"loss": 0.9247,
"step": 3150
},
{
"epoch": 1.820250284414107,
"grad_norm": 6.425380706787109,
"learning_rate": 1.5485094279995786e-05,
"loss": 0.9417,
"step": 3200
},
{
"epoch": 1.820250284414107,
"eval_loss": 1.0305691957473755,
"eval_runtime": 28.6406,
"eval_samples_per_second": 872.888,
"eval_steps_per_second": 6.843,
"step": 3200
},
{
"epoch": 1.8486916951080774,
"grad_norm": 6.136819362640381,
"learning_rate": 1.537975350258085e-05,
"loss": 0.9275,
"step": 3250
},
{
"epoch": 1.8771331058020477,
"grad_norm": 6.463824272155762,
"learning_rate": 1.5274412725165913e-05,
"loss": 0.9431,
"step": 3300
},
{
"epoch": 1.905574516496018,
"grad_norm": 6.83174467086792,
"learning_rate": 1.5169071947750974e-05,
"loss": 0.9147,
"step": 3350
},
{
"epoch": 1.9340159271899886,
"grad_norm": 7.504420280456543,
"learning_rate": 1.5063731170336038e-05,
"loss": 0.8957,
"step": 3400
},
{
"epoch": 1.9340159271899886,
"eval_loss": 1.0050827264785767,
"eval_runtime": 28.9461,
"eval_samples_per_second": 863.675,
"eval_steps_per_second": 6.771,
"step": 3400
},
{
"epoch": 1.9624573378839592,
"grad_norm": 7.271299839019775,
"learning_rate": 1.49583903929211e-05,
"loss": 0.9169,
"step": 3450
},
{
"epoch": 1.9908987485779295,
"grad_norm": 6.796669960021973,
"learning_rate": 1.4853049615506163e-05,
"loss": 0.9079,
"step": 3500
},
{
"epoch": 2.0193401592719,
"grad_norm": 5.5628180503845215,
"learning_rate": 1.4747708838091227e-05,
"loss": 0.7057,
"step": 3550
},
{
"epoch": 2.04778156996587,
"grad_norm": 5.777904987335205,
"learning_rate": 1.4642368060676288e-05,
"loss": 0.6037,
"step": 3600
},
{
"epoch": 2.04778156996587,
"eval_loss": 0.9944195747375488,
"eval_runtime": 28.8677,
"eval_samples_per_second": 866.019,
"eval_steps_per_second": 6.79,
"step": 3600
},
{
"epoch": 2.076222980659841,
"grad_norm": 5.112311363220215,
"learning_rate": 1.4537027283261352e-05,
"loss": 0.5888,
"step": 3650
},
{
"epoch": 2.1046643913538112,
"grad_norm": 6.392485618591309,
"learning_rate": 1.4431686505846414e-05,
"loss": 0.6134,
"step": 3700
},
{
"epoch": 2.1331058020477816,
"grad_norm": 6.09423303604126,
"learning_rate": 1.4326345728431477e-05,
"loss": 0.6209,
"step": 3750
},
{
"epoch": 2.161547212741752,
"grad_norm": 6.144412040710449,
"learning_rate": 1.4221004951016539e-05,
"loss": 0.6163,
"step": 3800
},
{
"epoch": 2.161547212741752,
"eval_loss": 0.9836474061012268,
"eval_runtime": 28.9354,
"eval_samples_per_second": 863.993,
"eval_steps_per_second": 6.774,
"step": 3800
},
{
"epoch": 2.189988623435722,
"grad_norm": 5.410032272338867,
"learning_rate": 1.4115664173601602e-05,
"loss": 0.6271,
"step": 3850
},
{
"epoch": 2.218430034129693,
"grad_norm": 5.688889980316162,
"learning_rate": 1.4010323396186664e-05,
"loss": 0.629,
"step": 3900
},
{
"epoch": 2.2468714448236633,
"grad_norm": 5.400741100311279,
"learning_rate": 1.3904982618771728e-05,
"loss": 0.6041,
"step": 3950
},
{
"epoch": 2.2753128555176336,
"grad_norm": 6.409387111663818,
"learning_rate": 1.379964184135679e-05,
"loss": 0.622,
"step": 4000
},
{
"epoch": 2.2753128555176336,
"eval_loss": 0.9791940450668335,
"eval_runtime": 29.3397,
"eval_samples_per_second": 852.088,
"eval_steps_per_second": 6.68,
"step": 4000
},
{
"epoch": 2.303754266211604,
"grad_norm": 5.827444076538086,
"learning_rate": 1.3694301063941853e-05,
"loss": 0.6175,
"step": 4050
},
{
"epoch": 2.3321956769055747,
"grad_norm": 6.436943054199219,
"learning_rate": 1.3588960286526916e-05,
"loss": 0.627,
"step": 4100
},
{
"epoch": 2.360637087599545,
"grad_norm": 5.842226028442383,
"learning_rate": 1.3483619509111978e-05,
"loss": 0.6339,
"step": 4150
},
{
"epoch": 2.3890784982935154,
"grad_norm": 6.457271575927734,
"learning_rate": 1.3378278731697042e-05,
"loss": 0.6325,
"step": 4200
},
{
"epoch": 2.3890784982935154,
"eval_loss": 0.9643296003341675,
"eval_runtime": 28.9755,
"eval_samples_per_second": 862.799,
"eval_steps_per_second": 6.764,
"step": 4200
},
{
"epoch": 2.4175199089874857,
"grad_norm": 6.070743560791016,
"learning_rate": 1.3272937954282103e-05,
"loss": 0.6044,
"step": 4250
},
{
"epoch": 2.445961319681456,
"grad_norm": 6.5427565574646,
"learning_rate": 1.3167597176867167e-05,
"loss": 0.6124,
"step": 4300
},
{
"epoch": 2.474402730375427,
"grad_norm": 5.342416286468506,
"learning_rate": 1.3062256399452229e-05,
"loss": 0.6326,
"step": 4350
},
{
"epoch": 2.502844141069397,
"grad_norm": 5.6298041343688965,
"learning_rate": 1.2956915622037292e-05,
"loss": 0.6349,
"step": 4400
},
{
"epoch": 2.502844141069397,
"eval_loss": 0.9462358355522156,
"eval_runtime": 29.0573,
"eval_samples_per_second": 860.369,
"eval_steps_per_second": 6.745,
"step": 4400
},
{
"epoch": 2.5312855517633674,
"grad_norm": 5.618624210357666,
"learning_rate": 1.2851574844622354e-05,
"loss": 0.6286,
"step": 4450
},
{
"epoch": 2.5597269624573378,
"grad_norm": 5.629756927490234,
"learning_rate": 1.2746234067207417e-05,
"loss": 0.6325,
"step": 4500
},
{
"epoch": 2.5881683731513085,
"grad_norm": 5.6407318115234375,
"learning_rate": 1.2640893289792479e-05,
"loss": 0.6399,
"step": 4550
},
{
"epoch": 2.616609783845279,
"grad_norm": 6.080498695373535,
"learning_rate": 1.2535552512377542e-05,
"loss": 0.6184,
"step": 4600
},
{
"epoch": 2.616609783845279,
"eval_loss": 0.9317007064819336,
"eval_runtime": 29.0538,
"eval_samples_per_second": 860.472,
"eval_steps_per_second": 6.746,
"step": 4600
},
{
"epoch": 2.645051194539249,
"grad_norm": 6.4962239265441895,
"learning_rate": 1.2430211734962604e-05,
"loss": 0.6292,
"step": 4650
},
{
"epoch": 2.6734926052332195,
"grad_norm": 6.621969223022461,
"learning_rate": 1.2324870957547668e-05,
"loss": 0.6017,
"step": 4700
},
{
"epoch": 2.70193401592719,
"grad_norm": 5.2126054763793945,
"learning_rate": 1.2219530180132731e-05,
"loss": 0.6305,
"step": 4750
},
{
"epoch": 2.73037542662116,
"grad_norm": 6.410334587097168,
"learning_rate": 1.2114189402717793e-05,
"loss": 0.6152,
"step": 4800
},
{
"epoch": 2.73037542662116,
"eval_loss": 0.9212636947631836,
"eval_runtime": 29.0224,
"eval_samples_per_second": 861.404,
"eval_steps_per_second": 6.753,
"step": 4800
},
{
"epoch": 2.758816837315131,
"grad_norm": 6.005552291870117,
"learning_rate": 1.2008848625302856e-05,
"loss": 0.5972,
"step": 4850
},
{
"epoch": 2.7872582480091013,
"grad_norm": 6.479732990264893,
"learning_rate": 1.1903507847887918e-05,
"loss": 0.6048,
"step": 4900
},
{
"epoch": 2.8156996587030716,
"grad_norm": 6.2526397705078125,
"learning_rate": 1.1798167070472982e-05,
"loss": 0.6096,
"step": 4950
},
{
"epoch": 2.8441410693970424,
"grad_norm": 6.823054313659668,
"learning_rate": 1.1692826293058043e-05,
"loss": 0.6156,
"step": 5000
},
{
"epoch": 2.8441410693970424,
"eval_loss": 0.9072502851486206,
"eval_runtime": 29.0918,
"eval_samples_per_second": 859.348,
"eval_steps_per_second": 6.737,
"step": 5000
},
{
"epoch": 2.8725824800910127,
"grad_norm": 5.63970422744751,
"learning_rate": 1.1587485515643107e-05,
"loss": 0.5942,
"step": 5050
},
{
"epoch": 2.901023890784983,
"grad_norm": 5.7269182205200195,
"learning_rate": 1.1482144738228169e-05,
"loss": 0.592,
"step": 5100
},
{
"epoch": 2.9294653014789533,
"grad_norm": 6.235472202301025,
"learning_rate": 1.1376803960813232e-05,
"loss": 0.6088,
"step": 5150
},
{
"epoch": 2.9579067121729237,
"grad_norm": 6.49041748046875,
"learning_rate": 1.1271463183398294e-05,
"loss": 0.5941,
"step": 5200
},
{
"epoch": 2.9579067121729237,
"eval_loss": 0.8950417041778564,
"eval_runtime": 29.0632,
"eval_samples_per_second": 860.195,
"eval_steps_per_second": 6.744,
"step": 5200
},
{
"epoch": 2.986348122866894,
"grad_norm": 6.089723587036133,
"learning_rate": 1.1166122405983357e-05,
"loss": 0.6161,
"step": 5250
},
{
"epoch": 3.0147895335608648,
"grad_norm": 4.977637767791748,
"learning_rate": 1.1060781628568419e-05,
"loss": 0.5021,
"step": 5300
},
{
"epoch": 3.043230944254835,
"grad_norm": 5.729337215423584,
"learning_rate": 1.0955440851153483e-05,
"loss": 0.4116,
"step": 5350
},
{
"epoch": 3.0716723549488054,
"grad_norm": 4.303124904632568,
"learning_rate": 1.0850100073738546e-05,
"loss": 0.3936,
"step": 5400
},
{
"epoch": 3.0716723549488054,
"eval_loss": 0.9009103775024414,
"eval_runtime": 28.839,
"eval_samples_per_second": 866.881,
"eval_steps_per_second": 6.796,
"step": 5400
},
{
"epoch": 3.1001137656427757,
"grad_norm": 5.400048732757568,
"learning_rate": 1.0744759296323608e-05,
"loss": 0.4193,
"step": 5450
},
{
"epoch": 3.1285551763367465,
"grad_norm": 6.018354415893555,
"learning_rate": 1.0639418518908671e-05,
"loss": 0.422,
"step": 5500
},
{
"epoch": 3.156996587030717,
"grad_norm": 5.685466766357422,
"learning_rate": 1.0534077741493733e-05,
"loss": 0.432,
"step": 5550
},
{
"epoch": 3.185437997724687,
"grad_norm": 5.172823905944824,
"learning_rate": 1.0428736964078797e-05,
"loss": 0.4281,
"step": 5600
},
{
"epoch": 3.185437997724687,
"eval_loss": 0.8985010981559753,
"eval_runtime": 28.8596,
"eval_samples_per_second": 866.262,
"eval_steps_per_second": 6.791,
"step": 5600
},
{
"epoch": 3.2138794084186575,
"grad_norm": 4.836643218994141,
"learning_rate": 1.0323396186663858e-05,
"loss": 0.4091,
"step": 5650
},
{
"epoch": 3.242320819112628,
"grad_norm": 5.528740406036377,
"learning_rate": 1.0218055409248922e-05,
"loss": 0.4305,
"step": 5700
},
{
"epoch": 3.2707622298065986,
"grad_norm": 4.45158576965332,
"learning_rate": 1.0112714631833984e-05,
"loss": 0.4203,
"step": 5750
},
{
"epoch": 3.299203640500569,
"grad_norm": 6.183067798614502,
"learning_rate": 1.0007373854419047e-05,
"loss": 0.4193,
"step": 5800
},
{
"epoch": 3.299203640500569,
"eval_loss": 0.8869061470031738,
"eval_runtime": 28.6962,
"eval_samples_per_second": 871.197,
"eval_steps_per_second": 6.83,
"step": 5800
},
{
"epoch": 3.3276450511945392,
"grad_norm": 5.19403600692749,
"learning_rate": 9.902033077004109e-06,
"loss": 0.4238,
"step": 5850
},
{
"epoch": 3.3560864618885096,
"grad_norm": 5.304056644439697,
"learning_rate": 9.796692299589172e-06,
"loss": 0.4274,
"step": 5900
},
{
"epoch": 3.3845278725824803,
"grad_norm": 4.698873519897461,
"learning_rate": 9.691351522174236e-06,
"loss": 0.4124,
"step": 5950
},
{
"epoch": 3.4129692832764507,
"grad_norm": 5.627292156219482,
"learning_rate": 9.586010744759297e-06,
"loss": 0.4241,
"step": 6000
},
{
"epoch": 3.4129692832764507,
"eval_loss": 0.8842443823814392,
"eval_runtime": 28.6817,
"eval_samples_per_second": 871.636,
"eval_steps_per_second": 6.834,
"step": 6000
},
{
"epoch": 3.441410693970421,
"grad_norm": 6.473363876342773,
"learning_rate": 9.480669967344361e-06,
"loss": 0.427,
"step": 6050
},
{
"epoch": 3.4698521046643913,
"grad_norm": 4.9653801918029785,
"learning_rate": 9.375329189929423e-06,
"loss": 0.4275,
"step": 6100
},
{
"epoch": 3.4982935153583616,
"grad_norm": 4.9852294921875,
"learning_rate": 9.269988412514486e-06,
"loss": 0.4152,
"step": 6150
},
{
"epoch": 3.526734926052332,
"grad_norm": 5.868428707122803,
"learning_rate": 9.164647635099548e-06,
"loss": 0.4247,
"step": 6200
},
{
"epoch": 3.526734926052332,
"eval_loss": 0.8732792139053345,
"eval_runtime": 28.8814,
"eval_samples_per_second": 865.608,
"eval_steps_per_second": 6.786,
"step": 6200
},
{
"epoch": 3.5551763367463027,
"grad_norm": 5.333588600158691,
"learning_rate": 9.05930685768461e-06,
"loss": 0.4111,
"step": 6250
},
{
"epoch": 3.583617747440273,
"grad_norm": 5.569532871246338,
"learning_rate": 8.953966080269673e-06,
"loss": 0.4396,
"step": 6300
},
{
"epoch": 3.6120591581342434,
"grad_norm": 5.38419771194458,
"learning_rate": 8.848625302854735e-06,
"loss": 0.4122,
"step": 6350
},
{
"epoch": 3.640500568828214,
"grad_norm": 5.328497409820557,
"learning_rate": 8.743284525439798e-06,
"loss": 0.4252,
"step": 6400
},
{
"epoch": 3.640500568828214,
"eval_loss": 0.8656958937644958,
"eval_runtime": 28.751,
"eval_samples_per_second": 869.534,
"eval_steps_per_second": 6.817,
"step": 6400
},
{
"epoch": 3.6689419795221845,
"grad_norm": 5.675217151641846,
"learning_rate": 8.63794374802486e-06,
"loss": 0.4167,
"step": 6450
},
{
"epoch": 3.697383390216155,
"grad_norm": 5.26973295211792,
"learning_rate": 8.532602970609924e-06,
"loss": 0.4282,
"step": 6500
},
{
"epoch": 3.725824800910125,
"grad_norm": 5.991490840911865,
"learning_rate": 8.427262193194985e-06,
"loss": 0.411,
"step": 6550
},
{
"epoch": 3.7542662116040955,
"grad_norm": 5.413957118988037,
"learning_rate": 8.321921415780049e-06,
"loss": 0.4273,
"step": 6600
},
{
"epoch": 3.7542662116040955,
"eval_loss": 0.8539847135543823,
"eval_runtime": 28.8669,
"eval_samples_per_second": 866.045,
"eval_steps_per_second": 6.79,
"step": 6600
},
{
"epoch": 3.782707622298066,
"grad_norm": 5.672956466674805,
"learning_rate": 8.21658063836511e-06,
"loss": 0.4327,
"step": 6650
},
{
"epoch": 3.8111490329920366,
"grad_norm": 6.0553059577941895,
"learning_rate": 8.111239860950174e-06,
"loss": 0.431,
"step": 6700
},
{
"epoch": 3.839590443686007,
"grad_norm": 6.111351013183594,
"learning_rate": 8.005899083535238e-06,
"loss": 0.4347,
"step": 6750
},
{
"epoch": 3.868031854379977,
"grad_norm": 6.185035705566406,
"learning_rate": 7.9005583061203e-06,
"loss": 0.4264,
"step": 6800
},
{
"epoch": 3.868031854379977,
"eval_loss": 0.8523036241531372,
"eval_runtime": 28.7415,
"eval_samples_per_second": 869.823,
"eval_steps_per_second": 6.819,
"step": 6800
},
{
"epoch": 3.8964732650739475,
"grad_norm": 4.952618598937988,
"learning_rate": 7.795217528705363e-06,
"loss": 0.4213,
"step": 6850
},
{
"epoch": 3.9249146757679183,
"grad_norm": 5.168086528778076,
"learning_rate": 7.689876751290425e-06,
"loss": 0.4285,
"step": 6900
},
{
"epoch": 3.9533560864618886,
"grad_norm": 5.6217732429504395,
"learning_rate": 7.584535973875487e-06,
"loss": 0.4138,
"step": 6950
},
{
"epoch": 3.981797497155859,
"grad_norm": 4.983550548553467,
"learning_rate": 7.47919519646055e-06,
"loss": 0.4051,
"step": 7000
},
{
"epoch": 3.981797497155859,
"eval_loss": 0.8406953811645508,
"eval_runtime": 28.8132,
"eval_samples_per_second": 867.659,
"eval_steps_per_second": 6.802,
"step": 7000
},
{
"epoch": 4.010238907849829,
"grad_norm": 3.829274892807007,
"learning_rate": 7.373854419045613e-06,
"loss": 0.3779,
"step": 7050
},
{
"epoch": 4.0386803185438,
"grad_norm": 4.154295921325684,
"learning_rate": 7.268513641630676e-06,
"loss": 0.2957,
"step": 7100
},
{
"epoch": 4.06712172923777,
"grad_norm": 5.0097222328186035,
"learning_rate": 7.1631728642157386e-06,
"loss": 0.2939,
"step": 7150
},
{
"epoch": 4.09556313993174,
"grad_norm": 5.015048027038574,
"learning_rate": 7.057832086800801e-06,
"loss": 0.3065,
"step": 7200
},
{
"epoch": 4.09556313993174,
"eval_loss": 0.8590184450149536,
"eval_runtime": 28.7607,
"eval_samples_per_second": 869.241,
"eval_steps_per_second": 6.815,
"step": 7200
},
{
"epoch": 4.1240045506257115,
"grad_norm": 4.9901018142700195,
"learning_rate": 6.952491309385864e-06,
"loss": 0.3081,
"step": 7250
},
{
"epoch": 4.152445961319682,
"grad_norm": 4.8424391746521,
"learning_rate": 6.847150531970926e-06,
"loss": 0.3043,
"step": 7300
},
{
"epoch": 4.180887372013652,
"grad_norm": 5.147951602935791,
"learning_rate": 6.741809754555989e-06,
"loss": 0.3176,
"step": 7350
},
{
"epoch": 4.2093287827076225,
"grad_norm": 4.292293548583984,
"learning_rate": 6.636468977141052e-06,
"loss": 0.3067,
"step": 7400
},
{
"epoch": 4.2093287827076225,
"eval_loss": 0.848746657371521,
"eval_runtime": 29.0524,
"eval_samples_per_second": 860.514,
"eval_steps_per_second": 6.746,
"step": 7400
},
{
"epoch": 4.237770193401593,
"grad_norm": 4.796692848205566,
"learning_rate": 6.531128199726114e-06,
"loss": 0.299,
"step": 7450
},
{
"epoch": 4.266211604095563,
"grad_norm": 5.196813583374023,
"learning_rate": 6.425787422311177e-06,
"loss": 0.3106,
"step": 7500
},
{
"epoch": 4.294653014789533,
"grad_norm": 4.551479816436768,
"learning_rate": 6.3204466448962395e-06,
"loss": 0.3062,
"step": 7550
},
{
"epoch": 4.323094425483504,
"grad_norm": 4.6921257972717285,
"learning_rate": 6.215105867481302e-06,
"loss": 0.3153,
"step": 7600
},
{
"epoch": 4.323094425483504,
"eval_loss": 0.8497870564460754,
"eval_runtime": 29.0027,
"eval_samples_per_second": 861.988,
"eval_steps_per_second": 6.758,
"step": 7600
},
{
"epoch": 4.351535836177474,
"grad_norm": 4.535303592681885,
"learning_rate": 6.109765090066366e-06,
"loss": 0.3206,
"step": 7650
},
{
"epoch": 4.379977246871444,
"grad_norm": 5.174567222595215,
"learning_rate": 6.004424312651428e-06,
"loss": 0.3202,
"step": 7700
},
{
"epoch": 4.408418657565416,
"grad_norm": 4.402812480926514,
"learning_rate": 5.899083535236491e-06,
"loss": 0.3167,
"step": 7750
},
{
"epoch": 4.436860068259386,
"grad_norm": 4.917297840118408,
"learning_rate": 5.7937427578215534e-06,
"loss": 0.3044,
"step": 7800
},
{
"epoch": 4.436860068259386,
"eval_loss": 0.8426228165626526,
"eval_runtime": 29.2233,
"eval_samples_per_second": 855.482,
"eval_steps_per_second": 6.707,
"step": 7800
},
{
"epoch": 4.465301478953356,
"grad_norm": 5.476150989532471,
"learning_rate": 5.688401980406616e-06,
"loss": 0.3015,
"step": 7850
},
{
"epoch": 4.493742889647327,
"grad_norm": 5.594091415405273,
"learning_rate": 5.583061202991679e-06,
"loss": 0.3157,
"step": 7900
},
{
"epoch": 4.522184300341297,
"grad_norm": 4.798509120941162,
"learning_rate": 5.477720425576741e-06,
"loss": 0.3109,
"step": 7950
},
{
"epoch": 4.550625711035267,
"grad_norm": 4.705766201019287,
"learning_rate": 5.372379648161804e-06,
"loss": 0.3164,
"step": 8000
},
{
"epoch": 4.550625711035267,
"eval_loss": 0.8384647369384766,
"eval_runtime": 29.0223,
"eval_samples_per_second": 861.406,
"eval_steps_per_second": 6.753,
"step": 8000
},
{
"epoch": 4.579067121729238,
"grad_norm": 5.214234352111816,
"learning_rate": 5.269145686295165e-06,
"loss": 0.2996,
"step": 8050
},
{
"epoch": 4.607508532423208,
"grad_norm": 3.9629294872283936,
"learning_rate": 5.163804908880228e-06,
"loss": 0.3247,
"step": 8100
},
{
"epoch": 4.635949943117178,
"grad_norm": 5.35923957824707,
"learning_rate": 5.058464131465291e-06,
"loss": 0.3093,
"step": 8150
},
{
"epoch": 4.664391353811149,
"grad_norm": 4.924727916717529,
"learning_rate": 4.9531233540503534e-06,
"loss": 0.3017,
"step": 8200
},
{
"epoch": 4.664391353811149,
"eval_loss": 0.8293972611427307,
"eval_runtime": 29.0332,
"eval_samples_per_second": 861.084,
"eval_steps_per_second": 6.751,
"step": 8200
},
{
"epoch": 4.69283276450512,
"grad_norm": 4.929891586303711,
"learning_rate": 4.847782576635416e-06,
"loss": 0.3075,
"step": 8250
},
{
"epoch": 4.72127417519909,
"grad_norm": 4.345849514007568,
"learning_rate": 4.742441799220479e-06,
"loss": 0.3006,
"step": 8300
},
{
"epoch": 4.74971558589306,
"grad_norm": 4.58878231048584,
"learning_rate": 4.637101021805541e-06,
"loss": 0.3134,
"step": 8350
},
{
"epoch": 4.778156996587031,
"grad_norm": 5.448882579803467,
"learning_rate": 4.531760244390604e-06,
"loss": 0.3111,
"step": 8400
},
{
"epoch": 4.778156996587031,
"eval_loss": 0.8249350786209106,
"eval_runtime": 29.1624,
"eval_samples_per_second": 857.269,
"eval_steps_per_second": 6.721,
"step": 8400
},
{
"epoch": 4.806598407281001,
"grad_norm": 4.381404399871826,
"learning_rate": 4.4264194669756665e-06,
"loss": 0.3165,
"step": 8450
},
{
"epoch": 4.835039817974971,
"grad_norm": 4.86619234085083,
"learning_rate": 4.321078689560729e-06,
"loss": 0.3071,
"step": 8500
},
{
"epoch": 4.863481228668942,
"grad_norm": 5.313292503356934,
"learning_rate": 4.215737912145792e-06,
"loss": 0.3017,
"step": 8550
},
{
"epoch": 4.891922639362912,
"grad_norm": 4.802574157714844,
"learning_rate": 4.110397134730854e-06,
"loss": 0.3092,
"step": 8600
},
{
"epoch": 4.891922639362912,
"eval_loss": 0.8224520087242126,
"eval_runtime": 29.0511,
"eval_samples_per_second": 860.551,
"eval_steps_per_second": 6.747,
"step": 8600
},
{
"epoch": 4.920364050056882,
"grad_norm": 5.428598880767822,
"learning_rate": 4.005056357315917e-06,
"loss": 0.3,
"step": 8650
},
{
"epoch": 4.948805460750854,
"grad_norm": 5.6783528327941895,
"learning_rate": 3.8997155799009805e-06,
"loss": 0.2999,
"step": 8700
},
{
"epoch": 4.977246871444824,
"grad_norm": 5.2957940101623535,
"learning_rate": 3.7943748024860427e-06,
"loss": 0.3116,
"step": 8750
},
{
"epoch": 5.005688282138794,
"grad_norm": 4.1276631355285645,
"learning_rate": 3.6890340250711053e-06,
"loss": 0.3046,
"step": 8800
},
{
"epoch": 5.005688282138794,
"eval_loss": 0.8173409700393677,
"eval_runtime": 28.9634,
"eval_samples_per_second": 863.157,
"eval_steps_per_second": 6.767,
"step": 8800
},
{
"epoch": 5.034129692832765,
"grad_norm": 4.093660354614258,
"learning_rate": 3.5836932476561683e-06,
"loss": 0.2501,
"step": 8850
},
{
"epoch": 5.062571103526735,
"grad_norm": 5.549435615539551,
"learning_rate": 3.478352470241231e-06,
"loss": 0.2443,
"step": 8900
},
{
"epoch": 5.091012514220705,
"grad_norm": 4.558211803436279,
"learning_rate": 3.3730116928262936e-06,
"loss": 0.2338,
"step": 8950
},
{
"epoch": 5.1194539249146755,
"grad_norm": 3.450760841369629,
"learning_rate": 3.267670915411356e-06,
"loss": 0.2382,
"step": 9000
},
{
"epoch": 5.1194539249146755,
"eval_loss": 0.8248207569122314,
"eval_runtime": 29.0514,
"eval_samples_per_second": 860.545,
"eval_steps_per_second": 6.747,
"step": 9000
},
{
"epoch": 5.147895335608646,
"grad_norm": 4.0541205406188965,
"learning_rate": 3.162330137996419e-06,
"loss": 0.2524,
"step": 9050
},
{
"epoch": 5.176336746302616,
"grad_norm": 4.376137733459473,
"learning_rate": 3.0569893605814814e-06,
"loss": 0.2427,
"step": 9100
},
{
"epoch": 5.204778156996587,
"grad_norm": 4.169808864593506,
"learning_rate": 2.951648583166544e-06,
"loss": 0.2512,
"step": 9150
},
{
"epoch": 5.233219567690558,
"grad_norm": 4.089740753173828,
"learning_rate": 2.846307805751607e-06,
"loss": 0.2377,
"step": 9200
},
{
"epoch": 5.233219567690558,
"eval_loss": 0.8218184113502502,
"eval_runtime": 28.9027,
"eval_samples_per_second": 864.97,
"eval_steps_per_second": 6.781,
"step": 9200
},
{
"epoch": 5.261660978384528,
"grad_norm": 4.028066635131836,
"learning_rate": 2.7409670283366697e-06,
"loss": 0.2458,
"step": 9250
},
{
"epoch": 5.290102389078498,
"grad_norm": 5.62259578704834,
"learning_rate": 2.635626250921732e-06,
"loss": 0.2515,
"step": 9300
},
{
"epoch": 5.318543799772469,
"grad_norm": 4.931870937347412,
"learning_rate": 2.5302854735067945e-06,
"loss": 0.2453,
"step": 9350
},
{
"epoch": 5.346985210466439,
"grad_norm": 4.307934284210205,
"learning_rate": 2.4249446960918575e-06,
"loss": 0.244,
"step": 9400
},
{
"epoch": 5.346985210466439,
"eval_loss": 0.8225930333137512,
"eval_runtime": 28.8011,
"eval_samples_per_second": 868.022,
"eval_steps_per_second": 6.805,
"step": 9400
},
{
"epoch": 5.375426621160409,
"grad_norm": 3.650233030319214,
"learning_rate": 2.31960391867692e-06,
"loss": 0.2389,
"step": 9450
},
{
"epoch": 5.40386803185438,
"grad_norm": 4.171177864074707,
"learning_rate": 2.2142631412619828e-06,
"loss": 0.253,
"step": 9500
},
{
"epoch": 5.43230944254835,
"grad_norm": 5.055683135986328,
"learning_rate": 2.1089223638470454e-06,
"loss": 0.2509,
"step": 9550
},
{
"epoch": 5.460750853242321,
"grad_norm": 4.621593952178955,
"learning_rate": 2.003581586432108e-06,
"loss": 0.2492,
"step": 9600
},
{
"epoch": 5.460750853242321,
"eval_loss": 0.8198309540748596,
"eval_runtime": 28.7042,
"eval_samples_per_second": 870.954,
"eval_steps_per_second": 6.828,
"step": 9600
},
{
"epoch": 5.489192263936292,
"grad_norm": 5.461741924285889,
"learning_rate": 1.8982408090171708e-06,
"loss": 0.2379,
"step": 9650
},
{
"epoch": 5.517633674630262,
"grad_norm": 4.083144664764404,
"learning_rate": 1.7929000316022333e-06,
"loss": 0.247,
"step": 9700
},
{
"epoch": 5.546075085324232,
"grad_norm": 4.508319854736328,
"learning_rate": 1.6875592541872959e-06,
"loss": 0.2419,
"step": 9750
},
{
"epoch": 5.5745164960182025,
"grad_norm": 4.420298099517822,
"learning_rate": 1.5822184767723587e-06,
"loss": 0.244,
"step": 9800
},
{
"epoch": 5.5745164960182025,
"eval_loss": 0.8149560689926147,
"eval_runtime": 28.7025,
"eval_samples_per_second": 871.004,
"eval_steps_per_second": 6.829,
"step": 9800
},
{
"epoch": 5.602957906712173,
"grad_norm": 4.702558517456055,
"learning_rate": 1.4768776993574213e-06,
"loss": 0.2498,
"step": 9850
},
{
"epoch": 5.631399317406143,
"grad_norm": 3.864471912384033,
"learning_rate": 1.371536921942484e-06,
"loss": 0.2381,
"step": 9900
},
{
"epoch": 5.6598407281001135,
"grad_norm": 4.41420316696167,
"learning_rate": 1.2661961445275468e-06,
"loss": 0.2425,
"step": 9950
},
{
"epoch": 5.688282138794084,
"grad_norm": 4.402945041656494,
"learning_rate": 1.1608553671126094e-06,
"loss": 0.2451,
"step": 10000
},
{
"epoch": 5.688282138794084,
"eval_loss": 0.8147642016410828,
"eval_runtime": 28.751,
"eval_samples_per_second": 869.534,
"eval_steps_per_second": 6.817,
"step": 10000
},
{
"epoch": 5.716723549488055,
"grad_norm": 4.66687536239624,
"learning_rate": 1.055514589697672e-06,
"loss": 0.2468,
"step": 10050
},
{
"epoch": 5.745164960182025,
"grad_norm": 4.6121649742126465,
"learning_rate": 9.501738122827347e-07,
"loss": 0.2404,
"step": 10100
},
{
"epoch": 5.773606370875996,
"grad_norm": 4.210214614868164,
"learning_rate": 8.469398504160961e-07,
"loss": 0.2397,
"step": 10150
},
{
"epoch": 5.802047781569966,
"grad_norm": 4.265695095062256,
"learning_rate": 7.415990730011588e-07,
"loss": 0.2417,
"step": 10200
},
{
"epoch": 5.802047781569966,
"eval_loss": 0.8124446868896484,
"eval_runtime": 28.7474,
"eval_samples_per_second": 869.643,
"eval_steps_per_second": 6.818,
"step": 10200
},
{
"epoch": 5.830489192263936,
"grad_norm": 4.166738033294678,
"learning_rate": 6.362582955862215e-07,
"loss": 0.2446,
"step": 10250
},
{
"epoch": 5.858930602957907,
"grad_norm": 4.40815544128418,
"learning_rate": 5.309175181712841e-07,
"loss": 0.2443,
"step": 10300
},
{
"epoch": 5.887372013651877,
"grad_norm": 3.757612466812134,
"learning_rate": 4.255767407563468e-07,
"loss": 0.2465,
"step": 10350
},
{
"epoch": 5.915813424345847,
"grad_norm": 5.059196472167969,
"learning_rate": 3.202359633414095e-07,
"loss": 0.2472,
"step": 10400
},
{
"epoch": 5.915813424345847,
"eval_loss": 0.8121369481086731,
"eval_runtime": 28.8178,
"eval_samples_per_second": 867.521,
"eval_steps_per_second": 6.801,
"step": 10400
}
],
"logging_steps": 50,
"max_steps": 10548,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}