|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9820359281437125, |
|
"eval_steps": 500, |
|
"global_step": 166, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.011976047904191617, |
|
"grad_norm": 2.5615657136716083, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3086, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.023952095808383235, |
|
"grad_norm": 2.5773761069534853, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3322, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03592814371257485, |
|
"grad_norm": 2.5489911065299875, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3189, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04790419161676647, |
|
"grad_norm": 2.5641980239668305, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3221, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.059880239520958084, |
|
"grad_norm": 2.495418092335473, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3203, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0718562874251497, |
|
"grad_norm": 2.5165101811100494, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3189, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.08383233532934131, |
|
"grad_norm": 2.504162196802967, |
|
"learning_rate": 1e-06, |
|
"loss": 2.314, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09580838323353294, |
|
"grad_norm": 2.4814364171413796, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3104, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.10778443113772455, |
|
"grad_norm": 2.3774878160101096, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3067, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.11976047904191617, |
|
"grad_norm": 2.3475602586392705, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3168, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1317365269461078, |
|
"grad_norm": 2.333993760427704, |
|
"learning_rate": 1e-06, |
|
"loss": 2.322, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1437125748502994, |
|
"grad_norm": 2.316543273634108, |
|
"learning_rate": 1e-06, |
|
"loss": 2.321, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.15568862275449102, |
|
"grad_norm": 2.302388927121058, |
|
"learning_rate": 1e-06, |
|
"loss": 2.312, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.16766467065868262, |
|
"grad_norm": 2.2987978050220024, |
|
"learning_rate": 1e-06, |
|
"loss": 2.303, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.17964071856287425, |
|
"grad_norm": 2.2639488675683603, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3105, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.19161676646706588, |
|
"grad_norm": 2.261718181341385, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2994, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.20359281437125748, |
|
"grad_norm": 1.8660446500221628, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3085, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2155688622754491, |
|
"grad_norm": 1.8596074219892773, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3006, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2275449101796407, |
|
"grad_norm": 1.8643371283543244, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2872, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.23952095808383234, |
|
"grad_norm": 1.8534509057908637, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3008, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.25149700598802394, |
|
"grad_norm": 1.8138301856421146, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2948, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2634730538922156, |
|
"grad_norm": 1.81412136269699, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2899, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2754491017964072, |
|
"grad_norm": 1.821596307651106, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2967, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2874251497005988, |
|
"grad_norm": 1.8118117090950443, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2969, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2994011976047904, |
|
"grad_norm": 1.780362304046831, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3023, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.31137724550898205, |
|
"grad_norm": 1.747692085337293, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2952, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.32335329341317365, |
|
"grad_norm": 1.7383178165500712, |
|
"learning_rate": 1e-06, |
|
"loss": 2.286, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.33532934131736525, |
|
"grad_norm": 1.7694584322479294, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2964, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3473053892215569, |
|
"grad_norm": 1.7307827415862227, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2871, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.3592814371257485, |
|
"grad_norm": 1.7756077058372248, |
|
"learning_rate": 1e-06, |
|
"loss": 2.3018, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3712574850299401, |
|
"grad_norm": 1.6569877610122026, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2784, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.38323353293413176, |
|
"grad_norm": 1.4051537618421257, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2764, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.39520958083832336, |
|
"grad_norm": 1.2745401979535236, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2706, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.40718562874251496, |
|
"grad_norm": 1.2299996626422867, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2593, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.41916167664670656, |
|
"grad_norm": 1.2580507659520745, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2705, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4311377245508982, |
|
"grad_norm": 1.2481843435811928, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2708, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4431137724550898, |
|
"grad_norm": 1.2626123054128822, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2636, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.4550898203592814, |
|
"grad_norm": 1.2419949588465158, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2593, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.46706586826347307, |
|
"grad_norm": 1.244572241926842, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2531, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.47904191616766467, |
|
"grad_norm": 1.2345334641611154, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2651, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.49101796407185627, |
|
"grad_norm": 1.2179094002836452, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2721, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5029940119760479, |
|
"grad_norm": 1.2027541000732906, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2568, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5149700598802395, |
|
"grad_norm": 1.209440540437922, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2504, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5269461077844312, |
|
"grad_norm": 1.2156193632106038, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2611, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5389221556886228, |
|
"grad_norm": 1.1809317447027605, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2637, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5508982035928144, |
|
"grad_norm": 1.1826529774139363, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2536, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.562874251497006, |
|
"grad_norm": 1.173882364883566, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2543, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5748502994011976, |
|
"grad_norm": 1.1473458364746005, |
|
"learning_rate": 1e-06, |
|
"loss": 2.252, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5868263473053892, |
|
"grad_norm": 1.1551199243421189, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2604, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5988023952095808, |
|
"grad_norm": 1.1520053185511139, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2571, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6107784431137725, |
|
"grad_norm": 1.1237813312947265, |
|
"learning_rate": 1e-06, |
|
"loss": 2.247, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6227544910179641, |
|
"grad_norm": 1.1322867564382475, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2607, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6347305389221557, |
|
"grad_norm": 1.1241702784408114, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2461, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.6467065868263473, |
|
"grad_norm": 1.1030714149741052, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2569, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.6586826347305389, |
|
"grad_norm": 1.1147423461510102, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2419, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.6706586826347305, |
|
"grad_norm": 1.0729801001524597, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2511, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.6826347305389222, |
|
"grad_norm": 1.0438039117359488, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2388, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.6946107784431138, |
|
"grad_norm": 0.9763927603089774, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2307, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7065868263473054, |
|
"grad_norm": 0.9164962634656145, |
|
"learning_rate": 1e-06, |
|
"loss": 2.227, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.718562874251497, |
|
"grad_norm": 0.848311700014838, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2153, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7305389221556886, |
|
"grad_norm": 0.7776204108137212, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2227, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.7425149700598802, |
|
"grad_norm": 0.7319953234131772, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2182, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.7544910179640718, |
|
"grad_norm": 0.6909482211546656, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2145, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.7664670658682635, |
|
"grad_norm": 0.6738176870137997, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2083, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.7784431137724551, |
|
"grad_norm": 0.6464473261385877, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2032, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.7904191616766467, |
|
"grad_norm": 0.626337164367754, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2189, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.8023952095808383, |
|
"grad_norm": 0.6207173763836306, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2093, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.8143712574850299, |
|
"grad_norm": 0.6168240564884324, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2067, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.8263473053892215, |
|
"grad_norm": 0.6148534111975588, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2146, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.8383233532934131, |
|
"grad_norm": 0.611544755836136, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2055, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.8502994011976048, |
|
"grad_norm": 0.611863129737121, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2144, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.8622754491017964, |
|
"grad_norm": 0.6028090268614178, |
|
"learning_rate": 1e-06, |
|
"loss": 2.209, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.874251497005988, |
|
"grad_norm": 0.6091038720653699, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2048, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.8862275449101796, |
|
"grad_norm": 0.5937914977596926, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2108, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.8982035928143712, |
|
"grad_norm": 0.595528112324968, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2022, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9101796407185628, |
|
"grad_norm": 0.5883119960964677, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2084, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.9221556886227545, |
|
"grad_norm": 0.5773396259001046, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2036, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.9341317365269461, |
|
"grad_norm": 0.5734709642037124, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2052, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.9461077844311377, |
|
"grad_norm": 0.5591056938189867, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2089, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.9580838323353293, |
|
"grad_norm": 0.546274882638652, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1942, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.9700598802395209, |
|
"grad_norm": 0.5480518878076608, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2069, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.9820359281437125, |
|
"grad_norm": 0.5463287286169074, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1974, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.9940119760479041, |
|
"grad_norm": 0.533820388108904, |
|
"learning_rate": 1e-06, |
|
"loss": 2.199, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.533820388108904, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2047, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.0119760479041917, |
|
"grad_norm": 0.8062312296444792, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1951, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.0239520958083832, |
|
"grad_norm": 0.5284897388941314, |
|
"learning_rate": 1e-06, |
|
"loss": 2.193, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.035928143712575, |
|
"grad_norm": 0.5229421018335979, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1932, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.0479041916167664, |
|
"grad_norm": 0.5133887468703301, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1943, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.0598802395209581, |
|
"grad_norm": 0.5171326387183323, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2006, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.0718562874251496, |
|
"grad_norm": 0.51794405572636, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1955, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0838323353293413, |
|
"grad_norm": 0.499854985040916, |
|
"learning_rate": 1e-06, |
|
"loss": 2.2038, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.095808383233533, |
|
"grad_norm": 0.507043821717436, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1898, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.1077844311377245, |
|
"grad_norm": 0.49951015907518004, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1974, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.1197604790419162, |
|
"grad_norm": 0.48701630934015266, |
|
"learning_rate": 1e-06, |
|
"loss": 2.19, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.1317365269461077, |
|
"grad_norm": 0.5023244374647904, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1901, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.1437125748502994, |
|
"grad_norm": 0.4893947097377309, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1959, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.1556886227544911, |
|
"grad_norm": 0.4921551531091861, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1856, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.1676646706586826, |
|
"grad_norm": 0.4775299084079388, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1946, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.1796407185628743, |
|
"grad_norm": 0.47443238852305514, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1829, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.1916167664670658, |
|
"grad_norm": 0.4738686464339643, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1882, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.2035928143712575, |
|
"grad_norm": 0.47247955693487775, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1789, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.215568862275449, |
|
"grad_norm": 0.48664618836275864, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1901, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.2275449101796407, |
|
"grad_norm": 0.624671707718351, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1786, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.2395209580838324, |
|
"grad_norm": 0.47270583079574985, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1815, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.251497005988024, |
|
"grad_norm": 0.4850659145115984, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1766, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.2634730538922156, |
|
"grad_norm": 0.4774338329878164, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1798, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.2754491017964071, |
|
"grad_norm": 0.4703506888310317, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1784, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.2874251497005988, |
|
"grad_norm": 0.4518841904928292, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1739, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.2994011976047903, |
|
"grad_norm": 0.43979454708832033, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1723, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.311377245508982, |
|
"grad_norm": 0.42560783774660166, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1654, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.3233532934131738, |
|
"grad_norm": 0.4266096810118197, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1714, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.3353293413173652, |
|
"grad_norm": 0.42819974246838616, |
|
"learning_rate": 1e-06, |
|
"loss": 2.172, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.347305389221557, |
|
"grad_norm": 0.4339005043697042, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1701, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.3592814371257484, |
|
"grad_norm": 0.41711410890047346, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1516, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.3712574850299402, |
|
"grad_norm": 0.4161851963765622, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1695, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.3832335329341316, |
|
"grad_norm": 0.42232760575717626, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1619, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.3952095808383234, |
|
"grad_norm": 0.4108856947054096, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1709, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.407185628742515, |
|
"grad_norm": 0.4063084876440514, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1659, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.4191616766467066, |
|
"grad_norm": 0.39591659891238995, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1682, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.4311377245508983, |
|
"grad_norm": 0.3858440857290615, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1664, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.4431137724550898, |
|
"grad_norm": 0.39406408386479896, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1545, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.4550898203592815, |
|
"grad_norm": 0.3822517420137481, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1645, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.467065868263473, |
|
"grad_norm": 0.37432990540151057, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1533, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.4790419161676647, |
|
"grad_norm": 0.35952783539679156, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1602, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.4910179640718564, |
|
"grad_norm": 0.349483918037425, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1629, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.5029940119760479, |
|
"grad_norm": 0.3490604286181681, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1552, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.5149700598802394, |
|
"grad_norm": 0.33963339160823175, |
|
"learning_rate": 1e-06, |
|
"loss": 2.153, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.5269461077844313, |
|
"grad_norm": 0.33753223564447027, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1568, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.5389221556886228, |
|
"grad_norm": 0.3277900338924429, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1392, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.5508982035928143, |
|
"grad_norm": 0.32271967472163604, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1585, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.562874251497006, |
|
"grad_norm": 0.32956474924020723, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1505, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.5748502994011977, |
|
"grad_norm": 0.32470897469456195, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1517, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.5868263473053892, |
|
"grad_norm": 0.31917230445024986, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1443, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.5988023952095807, |
|
"grad_norm": 0.31698015491909903, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1335, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.6107784431137726, |
|
"grad_norm": 0.31923463116678646, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1442, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.622754491017964, |
|
"grad_norm": 0.31790389854102113, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1452, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.6347305389221556, |
|
"grad_norm": 0.31099968418112267, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1399, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.6467065868263473, |
|
"grad_norm": 0.30776386838369674, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1349, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.658682634730539, |
|
"grad_norm": 0.3168082274335414, |
|
"learning_rate": 1e-06, |
|
"loss": 2.142, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.6706586826347305, |
|
"grad_norm": 0.3052089279296624, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1574, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.6826347305389222, |
|
"grad_norm": 0.3070848321728449, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1463, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.694610778443114, |
|
"grad_norm": 0.3085315997419007, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1392, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.7065868263473054, |
|
"grad_norm": 0.31067743980701024, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1402, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.718562874251497, |
|
"grad_norm": 0.2989320995928163, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1445, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.7305389221556886, |
|
"grad_norm": 0.2982955647213182, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1431, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.7425149700598803, |
|
"grad_norm": 0.29925296385799993, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1283, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.7544910179640718, |
|
"grad_norm": 0.3015654043734243, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1357, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.7664670658682635, |
|
"grad_norm": 0.30091222118288874, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1317, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.7784431137724552, |
|
"grad_norm": 0.30288018639543524, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1289, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.7904191616766467, |
|
"grad_norm": 0.2940090172709131, |
|
"learning_rate": 1e-06, |
|
"loss": 2.135, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.8023952095808382, |
|
"grad_norm": 0.2943497577009154, |
|
"learning_rate": 1e-06, |
|
"loss": 2.141, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.81437125748503, |
|
"grad_norm": 0.28667775972376125, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1407, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.8263473053892216, |
|
"grad_norm": 0.2927289770957084, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1293, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.8383233532934131, |
|
"grad_norm": 0.2958951815085863, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1282, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.8502994011976048, |
|
"grad_norm": 0.2965499758116299, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1287, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.8622754491017965, |
|
"grad_norm": 0.2920320753533295, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1318, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.874251497005988, |
|
"grad_norm": 0.28603541435698676, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1395, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.8862275449101795, |
|
"grad_norm": 0.2824672679392089, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1222, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.8982035928143712, |
|
"grad_norm": 0.2859965178914711, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1325, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.910179640718563, |
|
"grad_norm": 0.29491238806087866, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1308, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.9221556886227544, |
|
"grad_norm": 0.28099144782715074, |
|
"learning_rate": 1e-06, |
|
"loss": 2.125, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.9341317365269461, |
|
"grad_norm": 0.2847751193649672, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1375, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.9461077844311379, |
|
"grad_norm": 0.28542718365406394, |
|
"learning_rate": 1e-06, |
|
"loss": 2.13, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.9580838323353293, |
|
"grad_norm": 0.28194319978617144, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1362, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.9700598802395208, |
|
"grad_norm": 0.28496688347361787, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1214, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.9820359281437125, |
|
"grad_norm": 0.28383383257004224, |
|
"learning_rate": 1e-06, |
|
"loss": 2.1383, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.9820359281437125, |
|
"step": 166, |
|
"total_flos": 58170361774080.0, |
|
"train_loss": 2.2099866565451562, |
|
"train_runtime": 2944.772, |
|
"train_samples_per_second": 11.335, |
|
"train_steps_per_second": 0.056 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 166, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 58170361774080.0, |
|
"train_batch_size": 10, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|