|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9998447060285119, |
|
"eval_steps": 500, |
|
"global_step": 503, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.019877628350467434, |
|
"grad_norm": 49.489271990521864, |
|
"learning_rate": 1.730769230769231e-05, |
|
"loss": 18.2674, |
|
"num_tokens": 35435900.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03975525670093487, |
|
"grad_norm": 94.13839060488317, |
|
"learning_rate": 3.653846153846154e-05, |
|
"loss": 6.0352, |
|
"num_tokens": 70986000.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0596328850514023, |
|
"grad_norm": 36.22989263913226, |
|
"learning_rate": 4.999560818649105e-05, |
|
"loss": 2.257, |
|
"num_tokens": 106406261.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07951051340186974, |
|
"grad_norm": 12.381662510455556, |
|
"learning_rate": 4.99175791880267e-05, |
|
"loss": 1.6806, |
|
"num_tokens": 142007549.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09938814175233718, |
|
"grad_norm": 6.078747336589003, |
|
"learning_rate": 4.9742343823849924e-05, |
|
"loss": 1.6283, |
|
"num_tokens": 177749958.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1192657701028046, |
|
"grad_norm": 45.316378371707856, |
|
"learning_rate": 4.947066194375331e-05, |
|
"loss": 1.405, |
|
"num_tokens": 213223619.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13914339845327203, |
|
"grad_norm": 4.457640477421176, |
|
"learning_rate": 4.910371160572305e-05, |
|
"loss": 1.4731, |
|
"num_tokens": 248699816.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15902102680373947, |
|
"grad_norm": 3.8571852421472106, |
|
"learning_rate": 4.864308396768294e-05, |
|
"loss": 1.1962, |
|
"num_tokens": 284242373.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17889865515420691, |
|
"grad_norm": 2.922271088762262, |
|
"learning_rate": 4.8090776387967915e-05, |
|
"loss": 1.1482, |
|
"num_tokens": 319801798.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19877628350467436, |
|
"grad_norm": 3.07929680688637, |
|
"learning_rate": 4.7449183764444924e-05, |
|
"loss": 1.1573, |
|
"num_tokens": 355227417.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2186539118551418, |
|
"grad_norm": 2.986142143054458, |
|
"learning_rate": 4.672108814983597e-05, |
|
"loss": 1.1133, |
|
"num_tokens": 35532164.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2385315402056092, |
|
"grad_norm": 2.347646077232914, |
|
"learning_rate": 4.590964668827306e-05, |
|
"loss": 1.1624, |
|
"num_tokens": 71080907.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.25840916855607665, |
|
"grad_norm": 3.077510825746025, |
|
"learning_rate": 4.501837792539419e-05, |
|
"loss": 1.0658, |
|
"num_tokens": 106602450.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.27828679690654406, |
|
"grad_norm": 1.7347828183409042, |
|
"learning_rate": 4.4051146551342136e-05, |
|
"loss": 1.0483, |
|
"num_tokens": 142169486.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.29816442525701153, |
|
"grad_norm": 2.802057431831608, |
|
"learning_rate": 4.301214664282277e-05, |
|
"loss": 1.0333, |
|
"num_tokens": 177675215.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.31804205360747895, |
|
"grad_norm": 2.268188963759059, |
|
"learning_rate": 4.190588347688833e-05, |
|
"loss": 1.0238, |
|
"num_tokens": 213190309.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3379196819579464, |
|
"grad_norm": 1.6440471046115712, |
|
"learning_rate": 4.073715399530405e-05, |
|
"loss": 0.9957, |
|
"num_tokens": 248802333.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.35779731030841383, |
|
"grad_norm": 1.3717651355528624, |
|
"learning_rate": 3.9511026004207795e-05, |
|
"loss": 0.9574, |
|
"num_tokens": 284367601.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.37767493865888124, |
|
"grad_norm": 1.6711348270758677, |
|
"learning_rate": 3.823281619925673e-05, |
|
"loss": 0.9611, |
|
"num_tokens": 319996464.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.3975525670093487, |
|
"grad_norm": 1.8188583401200056, |
|
"learning_rate": 3.69080671115473e-05, |
|
"loss": 0.9318, |
|
"num_tokens": 355585930.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4174301953598161, |
|
"grad_norm": 1.6314158389343252, |
|
"learning_rate": 3.554252307427526e-05, |
|
"loss": 0.9428, |
|
"num_tokens": 391200376.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4373078237102836, |
|
"grad_norm": 1.406291241468206, |
|
"learning_rate": 3.414210531434781e-05, |
|
"loss": 0.934, |
|
"num_tokens": 426715690.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.457185452060751, |
|
"grad_norm": 1.5568298850166404, |
|
"learning_rate": 3.271288627695541e-05, |
|
"loss": 0.8906, |
|
"num_tokens": 462193838.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4770630804112184, |
|
"grad_norm": 1.6374375121178215, |
|
"learning_rate": 3.126106329443562e-05, |
|
"loss": 0.8869, |
|
"num_tokens": 497775413.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4969407087616859, |
|
"grad_norm": 1.4278396281710222, |
|
"learning_rate": 2.9792931713605558e-05, |
|
"loss": 0.8715, |
|
"num_tokens": 533288808.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5168183371121533, |
|
"grad_norm": 1.3315422073102914, |
|
"learning_rate": 2.831485759808712e-05, |
|
"loss": 0.8535, |
|
"num_tokens": 568733244.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5366959654626208, |
|
"grad_norm": 1.4292589501021156, |
|
"learning_rate": 2.683325012399241e-05, |
|
"loss": 0.8555, |
|
"num_tokens": 604337940.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5565735938130881, |
|
"grad_norm": 1.3191036037659456, |
|
"learning_rate": 2.5354533788666173e-05, |
|
"loss": 0.8258, |
|
"num_tokens": 639897455.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5764512221635556, |
|
"grad_norm": 1.2135834698476922, |
|
"learning_rate": 2.388512055299283e-05, |
|
"loss": 0.8152, |
|
"num_tokens": 675560731.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5963288505140231, |
|
"grad_norm": 1.1849646162622842, |
|
"learning_rate": 2.2431382038063615e-05, |
|
"loss": 0.8033, |
|
"num_tokens": 710895149.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6162064788644904, |
|
"grad_norm": 1.0526968604787708, |
|
"learning_rate": 2.0999621896763955e-05, |
|
"loss": 0.7823, |
|
"num_tokens": 746372135.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6360841072149579, |
|
"grad_norm": 1.0814389110808431, |
|
"learning_rate": 1.959604848008217e-05, |
|
"loss": 0.7653, |
|
"num_tokens": 781964693.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6559617355654254, |
|
"grad_norm": 1.1579079388621223, |
|
"learning_rate": 1.8226747916663184e-05, |
|
"loss": 0.7535, |
|
"num_tokens": 817483499.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6758393639158928, |
|
"grad_norm": 1.0764253018415963, |
|
"learning_rate": 1.6897657722338715e-05, |
|
"loss": 0.7417, |
|
"num_tokens": 853175363.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6957169922663602, |
|
"grad_norm": 1.0645270530767825, |
|
"learning_rate": 1.561454105406746e-05, |
|
"loss": 0.7284, |
|
"num_tokens": 888676759.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7155946206168277, |
|
"grad_norm": 1.0991349699273438, |
|
"learning_rate": 1.4382961719924382e-05, |
|
"loss": 0.7102, |
|
"num_tokens": 924126537.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7354722489672951, |
|
"grad_norm": 1.096361260557103, |
|
"learning_rate": 1.3208260053500118e-05, |
|
"loss": 0.7016, |
|
"num_tokens": 959627625.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7553498773177625, |
|
"grad_norm": 1.2001309900905168, |
|
"learning_rate": 1.2095529757323018e-05, |
|
"loss": 0.6735, |
|
"num_tokens": 995276803.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.77522750566823, |
|
"grad_norm": 1.0243664466225824, |
|
"learning_rate": 1.104959581571463e-05, |
|
"loss": 0.6649, |
|
"num_tokens": 1030735793.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.7951051340186974, |
|
"grad_norm": 1.4526878564718964, |
|
"learning_rate": 1.0074993572852243e-05, |
|
"loss": 0.6713, |
|
"num_tokens": 1066374499.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8149827623691648, |
|
"grad_norm": 1.050605567010305, |
|
"learning_rate": 9.175949066759368e-06, |
|
"loss": 0.6419, |
|
"num_tokens": 1101855303.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8348603907196323, |
|
"grad_norm": 1.125778801254232, |
|
"learning_rate": 8.356360704499399e-06, |
|
"loss": 0.635, |
|
"num_tokens": 1137354673.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8547380190700997, |
|
"grad_norm": 1.1832572011409386, |
|
"learning_rate": 7.619782358031821e-06, |
|
"loss": 0.6267, |
|
"num_tokens": 1172860263.0, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.8746156474205672, |
|
"grad_norm": 1.1054714685819218, |
|
"learning_rate": 6.969407954030057e-06, |
|
"loss": 0.6116, |
|
"num_tokens": 1208508537.0, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.8944932757710345, |
|
"grad_norm": 1.089754156750933, |
|
"learning_rate": 6.40805762448228e-06, |
|
"loss": 0.6051, |
|
"num_tokens": 1244052723.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.914370904121502, |
|
"grad_norm": 1.0612130068358916, |
|
"learning_rate": 5.938165478128298e-06, |
|
"loss": 0.591, |
|
"num_tokens": 1279456435.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9342485324719695, |
|
"grad_norm": 1.1570486650093461, |
|
"learning_rate": 5.5617690457577016e-06, |
|
"loss": 0.5808, |
|
"num_tokens": 1314958319.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.9541261608224368, |
|
"grad_norm": 1.4354838420805704, |
|
"learning_rate": 5.280500445136247e-06, |
|
"loss": 0.5754, |
|
"num_tokens": 1350578878.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.9740037891729043, |
|
"grad_norm": 1.3310227594903838, |
|
"learning_rate": 5.095579303870732e-06, |
|
"loss": 0.5693, |
|
"num_tokens": 1386062688.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.9938814175233718, |
|
"grad_norm": 1.0908684710720695, |
|
"learning_rate": 5.007807470900112e-06, |
|
"loss": 0.5606, |
|
"num_tokens": 1421565738.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9998447060285119, |
|
"num_tokens": 1432289557.0, |
|
"step": 503, |
|
"total_flos": 1.1360204825667672e+20, |
|
"train_loss": 0.6426609048312512, |
|
"train_runtime": 119967.1502, |
|
"train_samples_per_second": 2.147, |
|
"train_steps_per_second": 0.004 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 503, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1360204825667672e+20, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|