|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.05494128150539111, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005494128150539112, |
|
"grad_norm": 0.09600650519132614, |
|
"learning_rate": 1e-05, |
|
"loss": 10.8347, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005494128150539112, |
|
"eval_loss": 10.83652400970459, |
|
"eval_runtime": 25.3592, |
|
"eval_samples_per_second": 120.903, |
|
"eval_steps_per_second": 60.451, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010988256301078224, |
|
"grad_norm": 0.11173133552074432, |
|
"learning_rate": 2e-05, |
|
"loss": 10.8339, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0016482384451617334, |
|
"grad_norm": 0.1001177430152893, |
|
"learning_rate": 3e-05, |
|
"loss": 10.8356, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0021976512602156447, |
|
"grad_norm": 0.1040872111916542, |
|
"learning_rate": 4e-05, |
|
"loss": 10.8343, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.002747064075269556, |
|
"grad_norm": 0.09792021661996841, |
|
"learning_rate": 5e-05, |
|
"loss": 10.8357, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003296476890323467, |
|
"grad_norm": 0.09441577643156052, |
|
"learning_rate": 6e-05, |
|
"loss": 10.835, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003845889705377378, |
|
"grad_norm": 0.11089451611042023, |
|
"learning_rate": 7e-05, |
|
"loss": 10.8338, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0043953025204312894, |
|
"grad_norm": 0.09310828894376755, |
|
"learning_rate": 8e-05, |
|
"loss": 10.8346, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0049447153354852, |
|
"grad_norm": 0.09884480386972427, |
|
"learning_rate": 9e-05, |
|
"loss": 10.835, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.005494128150539112, |
|
"grad_norm": 0.12582162022590637, |
|
"learning_rate": 0.0001, |
|
"loss": 10.8333, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006043540965593022, |
|
"grad_norm": 0.11061394959688187, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 10.8362, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.006592953780646934, |
|
"grad_norm": 0.09800900518894196, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 10.8297, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.007142366595700844, |
|
"grad_norm": 0.11028425395488739, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 10.8357, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.007691779410754756, |
|
"grad_norm": 0.11087824404239655, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 10.8324, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.008241192225808667, |
|
"grad_norm": 0.1042320504784584, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 10.8347, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.008790605040862579, |
|
"grad_norm": 0.10207958519458771, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 10.8362, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00934001785591649, |
|
"grad_norm": 0.10438362509012222, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 10.8327, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0098894306709704, |
|
"grad_norm": 0.1137293353676796, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 10.832, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01043884348602431, |
|
"grad_norm": 0.10011179000139236, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 10.8308, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.010988256301078223, |
|
"grad_norm": 0.11841128021478653, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 10.8312, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.011537669116132134, |
|
"grad_norm": 0.12272407114505768, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 10.8299, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.012087081931186044, |
|
"grad_norm": 0.12626005709171295, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 10.8311, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.012636494746239957, |
|
"grad_norm": 0.11208302527666092, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 10.8293, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.013185907561293867, |
|
"grad_norm": 0.11252301931381226, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 10.8326, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.013735320376347778, |
|
"grad_norm": 0.12755857408046722, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.828, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.013735320376347778, |
|
"eval_loss": 10.828333854675293, |
|
"eval_runtime": 14.0013, |
|
"eval_samples_per_second": 218.98, |
|
"eval_steps_per_second": 109.49, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.014284733191401689, |
|
"grad_norm": 0.10949414223432541, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 10.8323, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.014834146006455601, |
|
"grad_norm": 0.12477957457304001, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 10.826, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.015383558821509512, |
|
"grad_norm": 0.1223072037100792, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 10.8298, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.015932971636563424, |
|
"grad_norm": 0.11999625712633133, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 10.824, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.016482384451617333, |
|
"grad_norm": 0.12540574371814728, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 10.8277, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.017031797266671245, |
|
"grad_norm": 0.13100536167621613, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 10.8219, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.017581210081725158, |
|
"grad_norm": 0.128668412566185, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 10.8246, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.018130622896779067, |
|
"grad_norm": 0.11359424144029617, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 10.8271, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01868003571183298, |
|
"grad_norm": 0.12260546535253525, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 10.8207, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.019229448526886888, |
|
"grad_norm": 0.13337105512619019, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 10.8189, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0197788613419408, |
|
"grad_norm": 0.1408015340566635, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 10.8277, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.020328274156994713, |
|
"grad_norm": 0.12261249870061874, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 10.8242, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02087768697204862, |
|
"grad_norm": 0.12526747584342957, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 10.8273, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.021427099787102534, |
|
"grad_norm": 0.12910766899585724, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 10.824, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.021976512602156446, |
|
"grad_norm": 0.1519177109003067, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.8164, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.022525925417210355, |
|
"grad_norm": 0.14746390283107758, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 10.8221, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.023075338232264268, |
|
"grad_norm": 0.12614132463932037, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 10.8211, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.02362475104731818, |
|
"grad_norm": 0.1671351045370102, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 10.8163, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02417416386237209, |
|
"grad_norm": 0.12974338233470917, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 10.8187, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.024723576677426, |
|
"grad_norm": 0.1563933938741684, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 10.8121, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.025272989492479914, |
|
"grad_norm": 0.15365718305110931, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 10.8147, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.025822402307533823, |
|
"grad_norm": 0.17594094574451447, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 10.8136, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.026371815122587735, |
|
"grad_norm": 0.15102382004261017, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 10.8187, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.026921227937641644, |
|
"grad_norm": 0.15570496022701263, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 10.8184, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.027470640752695556, |
|
"grad_norm": 0.17091411352157593, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 10.8095, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.027470640752695556, |
|
"eval_loss": 10.815298080444336, |
|
"eval_runtime": 13.8507, |
|
"eval_samples_per_second": 221.361, |
|
"eval_steps_per_second": 110.681, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02802005356774947, |
|
"grad_norm": 0.16749072074890137, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 10.8153, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.028569466382803377, |
|
"grad_norm": 0.20695950090885162, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 10.8049, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.02911887919785729, |
|
"grad_norm": 0.17667506635189056, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 10.8138, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.029668292012911202, |
|
"grad_norm": 0.1662524938583374, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 10.8134, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.03021770482796511, |
|
"grad_norm": 0.17990642786026, |
|
"learning_rate": 5e-05, |
|
"loss": 10.8107, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.030767117643019024, |
|
"grad_norm": 0.1645970642566681, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 10.8116, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.031316530458072936, |
|
"grad_norm": 0.16238118708133698, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 10.8175, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.03186594327312685, |
|
"grad_norm": 0.17180070281028748, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 10.8095, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.032415356088180754, |
|
"grad_norm": 0.19333012402057648, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 10.8041, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.032964768903234666, |
|
"grad_norm": 0.1807578206062317, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 10.81, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03351418171828858, |
|
"grad_norm": 0.1692892611026764, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 10.8077, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.03406359453334249, |
|
"grad_norm": 0.17745496332645416, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 10.8102, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0346130073483964, |
|
"grad_norm": 0.1852971315383911, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 10.8103, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.035162420163450316, |
|
"grad_norm": 0.19535671174526215, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 10.8071, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.03571183297850422, |
|
"grad_norm": 0.19186407327651978, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 10.8055, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.03626124579355813, |
|
"grad_norm": 0.18476377427577972, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 10.8102, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.036810658608612046, |
|
"grad_norm": 0.19189932942390442, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 10.8072, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.03736007142366596, |
|
"grad_norm": 0.20658567547798157, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 10.8069, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.03790948423871987, |
|
"grad_norm": 0.17630618810653687, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 10.8047, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.038458897053773776, |
|
"grad_norm": 0.18739624321460724, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.8088, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03900830986882769, |
|
"grad_norm": 0.20612762868404388, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 10.804, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0395577226838816, |
|
"grad_norm": 0.17694811522960663, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 10.8049, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.04010713549893551, |
|
"grad_norm": 0.2062751203775406, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 10.8003, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.040656548313989425, |
|
"grad_norm": 0.2096233367919922, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 10.7996, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.04120596112904334, |
|
"grad_norm": 0.1980363428592682, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 10.8074, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04120596112904334, |
|
"eval_loss": 10.805519104003906, |
|
"eval_runtime": 13.9152, |
|
"eval_samples_per_second": 220.335, |
|
"eval_steps_per_second": 110.168, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04175537394409724, |
|
"grad_norm": 0.18881097435951233, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 10.805, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.042304786759151156, |
|
"grad_norm": 0.208832249045372, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 10.8029, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.04285419957420507, |
|
"grad_norm": 0.19329778850078583, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 10.802, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.04340361238925898, |
|
"grad_norm": 0.20163513720035553, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 10.8032, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.04395302520431289, |
|
"grad_norm": 0.19688774645328522, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 10.8037, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.044502438019366805, |
|
"grad_norm": 0.19582399725914001, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 10.8032, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.04505185083442071, |
|
"grad_norm": 0.16219070553779602, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 10.8132, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.04560126364947462, |
|
"grad_norm": 0.21864815056324005, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 10.8021, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.046150676464528535, |
|
"grad_norm": 0.1872103065252304, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 10.8065, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.04670008927958245, |
|
"grad_norm": 0.21468770503997803, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.8071, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.04724950209463636, |
|
"grad_norm": 0.2009747177362442, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 10.8053, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.047798914909690265, |
|
"grad_norm": 0.20177090167999268, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 10.8031, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.04834832772474418, |
|
"grad_norm": 0.18578961491584778, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 10.8091, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.04889774053979809, |
|
"grad_norm": 0.20493243634700775, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 10.7979, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.049447153354852, |
|
"grad_norm": 0.19400815665721893, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 10.8031, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.049996566169905915, |
|
"grad_norm": 0.20439405739307404, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 10.8008, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.05054597898495983, |
|
"grad_norm": 0.21862730383872986, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 10.8059, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.05109539180001373, |
|
"grad_norm": 0.1954014152288437, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 10.8072, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.051644804615067645, |
|
"grad_norm": 0.19296152889728546, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 10.8057, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.05219421743012156, |
|
"grad_norm": 0.20052501559257507, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 10.7977, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.05274363024517547, |
|
"grad_norm": 0.20131036639213562, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 10.8016, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.05329304306022938, |
|
"grad_norm": 0.20154723525047302, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 10.802, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.05384245587528329, |
|
"grad_norm": 0.171908438205719, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 10.8097, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.0543918686903372, |
|
"grad_norm": 0.18649089336395264, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 10.8051, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.05494128150539111, |
|
"grad_norm": 0.21302233636379242, |
|
"learning_rate": 0.0, |
|
"loss": 10.8014, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05494128150539111, |
|
"eval_loss": 10.80360221862793, |
|
"eval_runtime": 13.7784, |
|
"eval_samples_per_second": 222.522, |
|
"eval_steps_per_second": 111.261, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 57297658970112.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|