|
{ |
|
"best_metric": 0.07105293869972229, |
|
"best_model_checkpoint": "saves/psy-course-doc/Qwen2.5-7B-Instruct/train/fold8/checkpoint-120", |
|
"epoch": 4.938271604938271, |
|
"eval_steps": 10, |
|
"global_step": 125, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03950617283950617, |
|
"grad_norm": 0.1868298202753067, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.2309, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07901234567901234, |
|
"grad_norm": 0.18279753625392914, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.2478, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.11851851851851852, |
|
"grad_norm": 0.21671271324157715, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.2279, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1580246913580247, |
|
"grad_norm": 0.18672101199626923, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.262, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.19753086419753085, |
|
"grad_norm": 0.16858762502670288, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.2781, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.23703703703703705, |
|
"grad_norm": 0.22064264118671417, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.1929, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.2765432098765432, |
|
"grad_norm": 0.17838694155216217, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.2836, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3160493827160494, |
|
"grad_norm": 0.18181541562080383, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.2072, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"grad_norm": 0.16831634938716888, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.2313, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"grad_norm": 0.17103101313114166, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.1678, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"eval_loss": 0.18528540432453156, |
|
"eval_runtime": 21.7653, |
|
"eval_samples_per_second": 2.068, |
|
"eval_steps_per_second": 2.068, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.4345679012345679, |
|
"grad_norm": 0.17013779282569885, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.2071, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.4740740740740741, |
|
"grad_norm": 0.1327929049730301, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.1534, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5135802469135803, |
|
"grad_norm": 0.10475656390190125, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1688, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.5530864197530864, |
|
"grad_norm": 0.09205988794565201, |
|
"learning_rate": 9.998033131915266e-05, |
|
"loss": 0.1518, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.09766896814107895, |
|
"learning_rate": 9.992134075089084e-05, |
|
"loss": 0.1421, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.6320987654320988, |
|
"grad_norm": 0.11214997619390488, |
|
"learning_rate": 9.982307470588098e-05, |
|
"loss": 0.1588, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.671604938271605, |
|
"grad_norm": 0.09874926507472992, |
|
"learning_rate": 9.968561049466214e-05, |
|
"loss": 0.1752, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 0.11309217661619186, |
|
"learning_rate": 9.950905626682228e-05, |
|
"loss": 0.1958, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.7506172839506173, |
|
"grad_norm": 0.09698560833930969, |
|
"learning_rate": 9.92935509259118e-05, |
|
"loss": 0.1521, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"grad_norm": 0.09516795724630356, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 0.2057, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"eval_loss": 0.12000219523906708, |
|
"eval_runtime": 21.7681, |
|
"eval_samples_per_second": 2.067, |
|
"eval_steps_per_second": 2.067, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8296296296296296, |
|
"grad_norm": 0.11490920931100845, |
|
"learning_rate": 9.874639560909117e-05, |
|
"loss": 0.1248, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.8691358024691358, |
|
"grad_norm": 0.09830869734287262, |
|
"learning_rate": 9.841517610611309e-05, |
|
"loss": 0.1291, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.908641975308642, |
|
"grad_norm": 0.09417690336704254, |
|
"learning_rate": 9.804586609725499e-05, |
|
"loss": 0.1249, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.9481481481481482, |
|
"grad_norm": 0.10335355997085571, |
|
"learning_rate": 9.763875613614482e-05, |
|
"loss": 0.1216, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"grad_norm": 0.08114749938249588, |
|
"learning_rate": 9.719416651541839e-05, |
|
"loss": 0.1275, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0271604938271606, |
|
"grad_norm": 0.18091371655464172, |
|
"learning_rate": 9.671244701472999e-05, |
|
"loss": 0.271, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 0.06726603955030441, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.0833, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.106172839506173, |
|
"grad_norm": 0.08096066862344742, |
|
"learning_rate": 9.563916325306594e-05, |
|
"loss": 0.1421, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.145679012345679, |
|
"grad_norm": 0.07401004433631897, |
|
"learning_rate": 9.504844339512095e-05, |
|
"loss": 0.1404, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"grad_norm": 0.0746835395693779, |
|
"learning_rate": 9.442228179894362e-05, |
|
"loss": 0.0673, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"eval_loss": 0.09145325422286987, |
|
"eval_runtime": 21.7763, |
|
"eval_samples_per_second": 2.066, |
|
"eval_steps_per_second": 2.066, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2246913580246914, |
|
"grad_norm": 0.06728620827198029, |
|
"learning_rate": 9.376117109543769e-05, |
|
"loss": 0.0526, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.2641975308641975, |
|
"grad_norm": 0.07975295931100845, |
|
"learning_rate": 9.306563141162046e-05, |
|
"loss": 0.1593, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.3037037037037038, |
|
"grad_norm": 0.05279506370425224, |
|
"learning_rate": 9.233620996141421e-05, |
|
"loss": 0.0579, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.34320987654321, |
|
"grad_norm": 0.07161711901426315, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.1076, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.382716049382716, |
|
"grad_norm": 0.07799088209867477, |
|
"learning_rate": 9.077804344796302e-05, |
|
"loss": 0.1208, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.4222222222222223, |
|
"grad_norm": 0.08278154581785202, |
|
"learning_rate": 8.995052426791247e-05, |
|
"loss": 0.1284, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.4617283950617284, |
|
"grad_norm": 0.08736036717891693, |
|
"learning_rate": 8.90915741234015e-05, |
|
"loss": 0.105, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.5012345679012347, |
|
"grad_norm": 0.09331925958395004, |
|
"learning_rate": 8.820186879108038e-05, |
|
"loss": 0.0793, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.5407407407407407, |
|
"grad_norm": 0.08145543932914734, |
|
"learning_rate": 8.728210824415827e-05, |
|
"loss": 0.1009, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"grad_norm": 0.07455835491418839, |
|
"learning_rate": 8.633301610170135e-05, |
|
"loss": 0.0982, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"eval_loss": 0.08214997500181198, |
|
"eval_runtime": 21.7939, |
|
"eval_samples_per_second": 2.065, |
|
"eval_steps_per_second": 2.065, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6197530864197531, |
|
"grad_norm": 0.08338115364313126, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.1039, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.6592592592592592, |
|
"grad_norm": 0.06493010371923447, |
|
"learning_rate": 8.434984630174509e-05, |
|
"loss": 0.0918, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.6987654320987655, |
|
"grad_norm": 0.06883358955383301, |
|
"learning_rate": 8.33173288976002e-05, |
|
"loss": 0.1034, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.7382716049382716, |
|
"grad_norm": 0.0772809311747551, |
|
"learning_rate": 8.225859917710439e-05, |
|
"loss": 0.127, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 0.06465648114681244, |
|
"learning_rate": 8.117449009293668e-05, |
|
"loss": 0.0396, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.817283950617284, |
|
"grad_norm": 0.05760206654667854, |
|
"learning_rate": 8.006585456492029e-05, |
|
"loss": 0.0637, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.8567901234567903, |
|
"grad_norm": 0.08240170031785965, |
|
"learning_rate": 7.89335648089903e-05, |
|
"loss": 0.1835, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.8962962962962964, |
|
"grad_norm": 0.06131649389863014, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.0865, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.9358024691358025, |
|
"grad_norm": 0.05616225302219391, |
|
"learning_rate": 7.660160382576683e-05, |
|
"loss": 0.0581, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"grad_norm": 0.06130716949701309, |
|
"learning_rate": 7.540376726232648e-05, |
|
"loss": 0.0891, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"eval_loss": 0.0771004855632782, |
|
"eval_runtime": 21.7453, |
|
"eval_samples_per_second": 2.069, |
|
"eval_steps_per_second": 2.069, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.0148148148148146, |
|
"grad_norm": 0.1203281581401825, |
|
"learning_rate": 7.4185944355262e-05, |
|
"loss": 0.1239, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.054320987654321, |
|
"grad_norm": 0.05960683524608612, |
|
"learning_rate": 7.294909322337689e-05, |
|
"loss": 0.0792, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.093827160493827, |
|
"grad_norm": 0.0512566901743412, |
|
"learning_rate": 7.169418695587791e-05, |
|
"loss": 0.0634, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.1333333333333333, |
|
"grad_norm": 0.06991548091173172, |
|
"learning_rate": 7.042221284679982e-05, |
|
"loss": 0.1574, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.1728395061728394, |
|
"grad_norm": 0.06921303272247314, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.1217, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.212345679012346, |
|
"grad_norm": 0.07973436266183853, |
|
"learning_rate": 6.783107663311565e-05, |
|
"loss": 0.1404, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.251851851851852, |
|
"grad_norm": 0.06023266166448593, |
|
"learning_rate": 6.651395309775837e-05, |
|
"loss": 0.1042, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.291358024691358, |
|
"grad_norm": 0.06274905800819397, |
|
"learning_rate": 6.518383725548074e-05, |
|
"loss": 0.099, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.330864197530864, |
|
"grad_norm": 0.06198497861623764, |
|
"learning_rate": 6.384177557124247e-05, |
|
"loss": 0.0401, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"grad_norm": 0.061861295253038406, |
|
"learning_rate": 6.248882390836135e-05, |
|
"loss": 0.0763, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"eval_loss": 0.07488537579774857, |
|
"eval_runtime": 21.7372, |
|
"eval_samples_per_second": 2.07, |
|
"eval_steps_per_second": 2.07, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.4098765432098768, |
|
"grad_norm": 0.06693781167268753, |
|
"learning_rate": 6.112604669781572e-05, |
|
"loss": 0.0948, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.449382716049383, |
|
"grad_norm": 0.04852902889251709, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.0565, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.488888888888889, |
|
"grad_norm": 0.0796937495470047, |
|
"learning_rate": 5.837531116523682e-05, |
|
"loss": 0.0929, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.528395061728395, |
|
"grad_norm": 0.0650988221168518, |
|
"learning_rate": 5.698951697677498e-05, |
|
"loss": 0.0439, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.567901234567901, |
|
"grad_norm": 0.07308197021484375, |
|
"learning_rate": 5.559822380516539e-05, |
|
"loss": 0.0784, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.6074074074074076, |
|
"grad_norm": 0.06614898145198822, |
|
"learning_rate": 5.420252624646238e-05, |
|
"loss": 0.062, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.6469135802469137, |
|
"grad_norm": 0.06226956099271774, |
|
"learning_rate": 5.2803522361859594e-05, |
|
"loss": 0.1083, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.68641975308642, |
|
"grad_norm": 0.06937666982412338, |
|
"learning_rate": 5.140231281379345e-05, |
|
"loss": 0.0661, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.725925925925926, |
|
"grad_norm": 0.07096434384584427, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0883, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"grad_norm": 0.07018528878688812, |
|
"learning_rate": 4.859768718620656e-05, |
|
"loss": 0.0509, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"eval_loss": 0.07292327284812927, |
|
"eval_runtime": 21.7309, |
|
"eval_samples_per_second": 2.071, |
|
"eval_steps_per_second": 2.071, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.8049382716049385, |
|
"grad_norm": 0.04997608810663223, |
|
"learning_rate": 4.7196477638140404e-05, |
|
"loss": 0.0549, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.8444444444444446, |
|
"grad_norm": 0.0775761529803276, |
|
"learning_rate": 4.579747375353763e-05, |
|
"loss": 0.1306, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.8839506172839506, |
|
"grad_norm": 0.05562743544578552, |
|
"learning_rate": 4.4401776194834613e-05, |
|
"loss": 0.072, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.9234567901234567, |
|
"grad_norm": 0.07207901775836945, |
|
"learning_rate": 4.3010483023225045e-05, |
|
"loss": 0.0578, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"grad_norm": 0.05793574079871178, |
|
"learning_rate": 4.162468883476319e-05, |
|
"loss": 0.0531, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.0024691358024693, |
|
"grad_norm": 0.15450118482112885, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.2015, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 3.0419753086419754, |
|
"grad_norm": 0.06680703908205032, |
|
"learning_rate": 3.887395330218429e-05, |
|
"loss": 0.1108, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 3.0814814814814815, |
|
"grad_norm": 0.07221418619155884, |
|
"learning_rate": 3.7511176091638653e-05, |
|
"loss": 0.0599, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 3.1209876543209876, |
|
"grad_norm": 0.08308330923318863, |
|
"learning_rate": 3.6158224428757535e-05, |
|
"loss": 0.0825, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"grad_norm": 0.08082571625709534, |
|
"learning_rate": 3.4816162744519263e-05, |
|
"loss": 0.0463, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.1604938271604937, |
|
"eval_loss": 0.07214194536209106, |
|
"eval_runtime": 21.7346, |
|
"eval_samples_per_second": 2.07, |
|
"eval_steps_per_second": 2.07, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.07321686297655106, |
|
"learning_rate": 3.3486046902241664e-05, |
|
"loss": 0.0983, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 3.2395061728395063, |
|
"grad_norm": 0.05445065349340439, |
|
"learning_rate": 3.216892336688435e-05, |
|
"loss": 0.0576, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 3.2790123456790123, |
|
"grad_norm": 0.08311997354030609, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0711, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 3.3185185185185184, |
|
"grad_norm": 0.06639759987592697, |
|
"learning_rate": 2.9577787153200197e-05, |
|
"loss": 0.0702, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 3.3580246913580245, |
|
"grad_norm": 0.07754135876893997, |
|
"learning_rate": 2.8305813044122097e-05, |
|
"loss": 0.1092, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.397530864197531, |
|
"grad_norm": 0.05395815521478653, |
|
"learning_rate": 2.705090677662311e-05, |
|
"loss": 0.0483, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 3.437037037037037, |
|
"grad_norm": 0.05955814570188522, |
|
"learning_rate": 2.581405564473801e-05, |
|
"loss": 0.0425, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 3.476543209876543, |
|
"grad_norm": 0.06142286956310272, |
|
"learning_rate": 2.459623273767354e-05, |
|
"loss": 0.1308, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 3.5160493827160493, |
|
"grad_norm": 0.05392776429653168, |
|
"learning_rate": 2.3398396174233178e-05, |
|
"loss": 0.0543, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"grad_norm": 0.07074186205863953, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.0305, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.5555555555555554, |
|
"eval_loss": 0.07150960713624954, |
|
"eval_runtime": 21.753, |
|
"eval_samples_per_second": 2.069, |
|
"eval_steps_per_second": 2.069, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.595061728395062, |
|
"grad_norm": 0.07016747444868088, |
|
"learning_rate": 2.1066435191009715e-05, |
|
"loss": 0.0483, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 3.634567901234568, |
|
"grad_norm": 0.06681767851114273, |
|
"learning_rate": 1.9934145435079702e-05, |
|
"loss": 0.0769, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 3.674074074074074, |
|
"grad_norm": 0.06709405034780502, |
|
"learning_rate": 1.8825509907063327e-05, |
|
"loss": 0.0661, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 3.71358024691358, |
|
"grad_norm": 0.0665164664387703, |
|
"learning_rate": 1.774140082289563e-05, |
|
"loss": 0.112, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 3.753086419753086, |
|
"grad_norm": 0.0771573930978775, |
|
"learning_rate": 1.6682671102399805e-05, |
|
"loss": 0.0793, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 3.7925925925925927, |
|
"grad_norm": 0.062010809779167175, |
|
"learning_rate": 1.5650153698254916e-05, |
|
"loss": 0.0254, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 3.832098765432099, |
|
"grad_norm": 0.07360262423753738, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.0921, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 3.871604938271605, |
|
"grad_norm": 0.07366364449262619, |
|
"learning_rate": 1.3666983898298657e-05, |
|
"loss": 0.1451, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 3.911111111111111, |
|
"grad_norm": 0.07421759516000748, |
|
"learning_rate": 1.2717891755841722e-05, |
|
"loss": 0.1039, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"grad_norm": 0.05935444310307503, |
|
"learning_rate": 1.1798131208919627e-05, |
|
"loss": 0.0505, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.950617283950617, |
|
"eval_loss": 0.07133489102125168, |
|
"eval_runtime": 21.7373, |
|
"eval_samples_per_second": 2.07, |
|
"eval_steps_per_second": 2.07, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.9901234567901236, |
|
"grad_norm": 0.09624148905277252, |
|
"learning_rate": 1.090842587659851e-05, |
|
"loss": 0.144, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 4.029629629629629, |
|
"grad_norm": 0.12751835584640503, |
|
"learning_rate": 1.004947573208756e-05, |
|
"loss": 0.1655, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 4.069135802469136, |
|
"grad_norm": 0.07455002516508102, |
|
"learning_rate": 9.221956552036992e-06, |
|
"loss": 0.1134, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 4.108641975308642, |
|
"grad_norm": 0.06696426123380661, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.0481, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 4.148148148148148, |
|
"grad_norm": 0.06412757933139801, |
|
"learning_rate": 7.663790038585793e-06, |
|
"loss": 0.0808, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.187654320987654, |
|
"grad_norm": 0.06346351653337479, |
|
"learning_rate": 6.934368588379553e-06, |
|
"loss": 0.0488, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 4.22716049382716, |
|
"grad_norm": 0.06537764519453049, |
|
"learning_rate": 6.238828904562316e-06, |
|
"loss": 0.0194, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 4.266666666666667, |
|
"grad_norm": 0.056524328887462616, |
|
"learning_rate": 5.577718201056392e-06, |
|
"loss": 0.0253, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 4.306172839506173, |
|
"grad_norm": 0.062042903155088425, |
|
"learning_rate": 4.951556604879048e-06, |
|
"loss": 0.1144, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"grad_norm": 0.059958767145872116, |
|
"learning_rate": 4.360836746934055e-06, |
|
"loss": 0.0645, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.345679012345679, |
|
"eval_loss": 0.07106943428516388, |
|
"eval_runtime": 21.7532, |
|
"eval_samples_per_second": 2.069, |
|
"eval_steps_per_second": 2.069, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.385185185185185, |
|
"grad_norm": 0.060797806829214096, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.094, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 4.424691358024692, |
|
"grad_norm": 0.056455742567777634, |
|
"learning_rate": 3.2875529852700147e-06, |
|
"loss": 0.0744, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 4.4641975308641975, |
|
"grad_norm": 0.06785272806882858, |
|
"learning_rate": 2.8058334845816213e-06, |
|
"loss": 0.0933, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 4.503703703703704, |
|
"grad_norm": 0.06774047017097473, |
|
"learning_rate": 2.361243863855184e-06, |
|
"loss": 0.0541, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 4.54320987654321, |
|
"grad_norm": 0.05584665387868881, |
|
"learning_rate": 1.9541339027450256e-06, |
|
"loss": 0.0748, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 4.582716049382716, |
|
"grad_norm": 0.07345107942819595, |
|
"learning_rate": 1.584823893886933e-06, |
|
"loss": 0.0781, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 4.622222222222222, |
|
"grad_norm": 0.0609910674393177, |
|
"learning_rate": 1.2536043909088191e-06, |
|
"loss": 0.1058, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 4.661728395061728, |
|
"grad_norm": 0.08062499016523361, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.1542, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 4.701234567901235, |
|
"grad_norm": 0.06287279725074768, |
|
"learning_rate": 7.064490740882057e-07, |
|
"loss": 0.0482, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"grad_norm": 0.07574119418859482, |
|
"learning_rate": 4.909437331777179e-07, |
|
"loss": 0.0858, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.7407407407407405, |
|
"eval_loss": 0.07105293869972229, |
|
"eval_runtime": 21.7278, |
|
"eval_samples_per_second": 2.071, |
|
"eval_steps_per_second": 2.071, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.780246913580247, |
|
"grad_norm": 0.07911939918994904, |
|
"learning_rate": 3.143895053378698e-07, |
|
"loss": 0.0174, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 4.8197530864197535, |
|
"grad_norm": 0.06001194566488266, |
|
"learning_rate": 1.7692529411904578e-07, |
|
"loss": 0.0623, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 4.859259259259259, |
|
"grad_norm": 0.054641127586364746, |
|
"learning_rate": 7.865924910916977e-08, |
|
"loss": 0.0797, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 4.898765432098766, |
|
"grad_norm": 0.07878218591213226, |
|
"learning_rate": 1.9668680847356735e-08, |
|
"loss": 0.0763, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"grad_norm": 0.06516701728105545, |
|
"learning_rate": 0.0, |
|
"loss": 0.1182, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 4.938271604938271, |
|
"step": 125, |
|
"total_flos": 1.4521433822274355e+17, |
|
"train_loss": 0.10799009439349175, |
|
"train_runtime": 3470.8125, |
|
"train_samples_per_second": 0.583, |
|
"train_steps_per_second": 0.036 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.4521433822274355e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|