|
{ |
|
"best_metric": 0.25296375155448914, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.013832693571255663, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00013832693571255662, |
|
"grad_norm": 0.5414836406707764, |
|
"learning_rate": 1.5000000000000002e-07, |
|
"loss": 0.2862, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00013832693571255662, |
|
"eval_loss": 0.32557761669158936, |
|
"eval_runtime": 864.4397, |
|
"eval_samples_per_second": 14.085, |
|
"eval_steps_per_second": 1.761, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00027665387142511324, |
|
"grad_norm": 0.5004603266716003, |
|
"learning_rate": 3.0000000000000004e-07, |
|
"loss": 0.3187, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00041498080713766987, |
|
"grad_norm": 0.6289068460464478, |
|
"learning_rate": 4.5e-07, |
|
"loss": 0.3039, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0005533077428502265, |
|
"grad_norm": 0.5262965559959412, |
|
"learning_rate": 6.000000000000001e-07, |
|
"loss": 0.3065, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0006916346785627831, |
|
"grad_norm": 0.6534644365310669, |
|
"learning_rate": 7.5e-07, |
|
"loss": 0.3509, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0008299616142753397, |
|
"grad_norm": 0.7118496894836426, |
|
"learning_rate": 9e-07, |
|
"loss": 0.3991, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0009682885499878964, |
|
"grad_norm": 0.4656999707221985, |
|
"learning_rate": 1.0500000000000001e-06, |
|
"loss": 0.2937, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.001106615485700453, |
|
"grad_norm": 0.451833575963974, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"loss": 0.2822, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0012449424214130097, |
|
"grad_norm": 0.5716612935066223, |
|
"learning_rate": 1.35e-06, |
|
"loss": 0.3687, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0013832693571255662, |
|
"grad_norm": 0.5744236707687378, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.3069, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.001521596292838123, |
|
"grad_norm": 0.6165915727615356, |
|
"learning_rate": 1.65e-06, |
|
"loss": 0.4473, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0016599232285506795, |
|
"grad_norm": 0.5897412300109863, |
|
"learning_rate": 1.8e-06, |
|
"loss": 0.304, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0017982501642632362, |
|
"grad_norm": 0.5174123048782349, |
|
"learning_rate": 1.95e-06, |
|
"loss": 0.3018, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0019365770999757927, |
|
"grad_norm": 0.5476775765419006, |
|
"learning_rate": 2.1000000000000002e-06, |
|
"loss": 0.411, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0020749040356883492, |
|
"grad_norm": 0.5217826962471008, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.2919, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002213230971400906, |
|
"grad_norm": 0.450664758682251, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"loss": 0.292, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0023515579071134627, |
|
"grad_norm": 0.47869348526000977, |
|
"learning_rate": 2.55e-06, |
|
"loss": 0.3593, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0024898848428260194, |
|
"grad_norm": 0.5078862905502319, |
|
"learning_rate": 2.7e-06, |
|
"loss": 0.3426, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0026282117785385757, |
|
"grad_norm": 0.5059868693351746, |
|
"learning_rate": 2.8500000000000002e-06, |
|
"loss": 0.302, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0027665387142511324, |
|
"grad_norm": 0.38030219078063965, |
|
"learning_rate": 3e-06, |
|
"loss": 0.2697, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.002904865649963689, |
|
"grad_norm": 0.5299117565155029, |
|
"learning_rate": 3.15e-06, |
|
"loss": 0.309, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003043192585676246, |
|
"grad_norm": 0.4995821416378021, |
|
"learning_rate": 3.3e-06, |
|
"loss": 0.3418, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0031815195213888026, |
|
"grad_norm": 0.4045211970806122, |
|
"learning_rate": 3.4500000000000004e-06, |
|
"loss": 0.3161, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.003319846457101359, |
|
"grad_norm": 0.4190620183944702, |
|
"learning_rate": 3.6e-06, |
|
"loss": 0.3402, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0034581733928139157, |
|
"grad_norm": 0.40234580636024475, |
|
"learning_rate": 3.75e-06, |
|
"loss": 0.3907, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0035965003285264724, |
|
"grad_norm": 0.4151726961135864, |
|
"learning_rate": 3.9e-06, |
|
"loss": 0.3512, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.003734827264239029, |
|
"grad_norm": 0.4049225449562073, |
|
"learning_rate": 4.05e-06, |
|
"loss": 0.316, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0038731541999515854, |
|
"grad_norm": 0.4368723928928375, |
|
"learning_rate": 4.2000000000000004e-06, |
|
"loss": 0.2541, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004011481135664142, |
|
"grad_norm": 0.44128212332725525, |
|
"learning_rate": 4.35e-06, |
|
"loss": 0.3171, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0041498080713766984, |
|
"grad_norm": 0.32956743240356445, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.3075, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004288135007089256, |
|
"grad_norm": 0.3135199248790741, |
|
"learning_rate": 4.65e-06, |
|
"loss": 0.3135, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.004426461942801812, |
|
"grad_norm": 0.263225793838501, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 0.2816, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.004564788878514369, |
|
"grad_norm": 0.2696998417377472, |
|
"learning_rate": 4.95e-06, |
|
"loss": 0.2978, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.004703115814226925, |
|
"grad_norm": 0.22326156497001648, |
|
"learning_rate": 5.1e-06, |
|
"loss": 0.2716, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.004841442749939482, |
|
"grad_norm": 0.2183511108160019, |
|
"learning_rate": 5.25e-06, |
|
"loss": 0.267, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.004979769685652039, |
|
"grad_norm": 0.23443461954593658, |
|
"learning_rate": 5.4e-06, |
|
"loss": 0.306, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005118096621364595, |
|
"grad_norm": 0.2592571973800659, |
|
"learning_rate": 5.55e-06, |
|
"loss": 0.3548, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.005256423557077151, |
|
"grad_norm": 0.2267267256975174, |
|
"learning_rate": 5.7000000000000005e-06, |
|
"loss": 0.3722, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.005394750492789709, |
|
"grad_norm": 0.20199447870254517, |
|
"learning_rate": 5.850000000000001e-06, |
|
"loss": 0.2761, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.005533077428502265, |
|
"grad_norm": 0.20377960801124573, |
|
"learning_rate": 6e-06, |
|
"loss": 0.2585, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.005671404364214822, |
|
"grad_norm": 0.19677956402301788, |
|
"learning_rate": 6.1499999999999996e-06, |
|
"loss": 0.3644, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.005809731299927378, |
|
"grad_norm": 0.18353554606437683, |
|
"learning_rate": 6.3e-06, |
|
"loss": 0.3253, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.005948058235639935, |
|
"grad_norm": 0.18772590160369873, |
|
"learning_rate": 6.45e-06, |
|
"loss": 0.2842, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.006086385171352492, |
|
"grad_norm": 0.18426308035850525, |
|
"learning_rate": 6.6e-06, |
|
"loss": 0.3357, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.006224712107065048, |
|
"grad_norm": 0.18350034952163696, |
|
"learning_rate": 6.750000000000001e-06, |
|
"loss": 0.2412, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.006363039042777605, |
|
"grad_norm": 0.18429340422153473, |
|
"learning_rate": 6.900000000000001e-06, |
|
"loss": 0.2598, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0065013659784901616, |
|
"grad_norm": 0.16520313918590546, |
|
"learning_rate": 7.049999999999999e-06, |
|
"loss": 0.2797, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.006639692914202718, |
|
"grad_norm": 0.1929628700017929, |
|
"learning_rate": 7.2e-06, |
|
"loss": 0.3005, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.006778019849915275, |
|
"grad_norm": 0.1845887005329132, |
|
"learning_rate": 7.35e-06, |
|
"loss": 0.2614, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.006916346785627831, |
|
"grad_norm": 0.1492261290550232, |
|
"learning_rate": 7.5e-06, |
|
"loss": 0.2698, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.006916346785627831, |
|
"eval_loss": 0.2843204140663147, |
|
"eval_runtime": 886.4909, |
|
"eval_samples_per_second": 13.735, |
|
"eval_steps_per_second": 1.717, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007054673721340388, |
|
"grad_norm": 0.17172540724277496, |
|
"learning_rate": 7.65e-06, |
|
"loss": 0.2737, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.007193000657052945, |
|
"grad_norm": 0.18842163681983948, |
|
"learning_rate": 7.8e-06, |
|
"loss": 0.2943, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.007331327592765501, |
|
"grad_norm": 0.18067596852779388, |
|
"learning_rate": 7.95e-06, |
|
"loss": 0.308, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.007469654528478058, |
|
"grad_norm": 0.16696469485759735, |
|
"learning_rate": 8.1e-06, |
|
"loss": 0.2641, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0076079814641906145, |
|
"grad_norm": 0.15525111556053162, |
|
"learning_rate": 8.25e-06, |
|
"loss": 0.2417, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.007746308399903171, |
|
"grad_norm": 0.17325535416603088, |
|
"learning_rate": 8.400000000000001e-06, |
|
"loss": 0.3275, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.007884635335615727, |
|
"grad_norm": 0.1737009733915329, |
|
"learning_rate": 8.55e-06, |
|
"loss": 0.3544, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.008022962271328284, |
|
"grad_norm": 0.16440212726593018, |
|
"learning_rate": 8.7e-06, |
|
"loss": 0.3049, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.008161289207040841, |
|
"grad_norm": 0.15149055421352386, |
|
"learning_rate": 8.85e-06, |
|
"loss": 0.2669, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.008299616142753397, |
|
"grad_norm": 0.15021531283855438, |
|
"learning_rate": 9e-06, |
|
"loss": 0.2852, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.008437943078465954, |
|
"grad_norm": 0.14351071417331696, |
|
"learning_rate": 9.15e-06, |
|
"loss": 0.2018, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.008576270014178511, |
|
"grad_norm": 0.12986423075199127, |
|
"learning_rate": 9.3e-06, |
|
"loss": 0.2416, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.008714596949891068, |
|
"grad_norm": 0.1482655256986618, |
|
"learning_rate": 9.450000000000001e-06, |
|
"loss": 0.2492, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.008852923885603624, |
|
"grad_norm": 0.14514705538749695, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 0.2925, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.008991250821316181, |
|
"grad_norm": 0.15891727805137634, |
|
"learning_rate": 9.75e-06, |
|
"loss": 0.234, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.009129577757028738, |
|
"grad_norm": 0.12783700227737427, |
|
"learning_rate": 9.9e-06, |
|
"loss": 0.2144, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.009267904692741294, |
|
"grad_norm": 0.11698262393474579, |
|
"learning_rate": 1.005e-05, |
|
"loss": 0.2107, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.00940623162845385, |
|
"grad_norm": 0.14814496040344238, |
|
"learning_rate": 1.02e-05, |
|
"loss": 0.296, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.009544558564166408, |
|
"grad_norm": 0.1301187127828598, |
|
"learning_rate": 1.035e-05, |
|
"loss": 0.1869, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.009682885499878963, |
|
"grad_norm": 0.17036621272563934, |
|
"learning_rate": 1.05e-05, |
|
"loss": 0.3304, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.00982121243559152, |
|
"grad_norm": 0.12910433113574982, |
|
"learning_rate": 1.065e-05, |
|
"loss": 0.2495, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.009959539371304078, |
|
"grad_norm": 0.14236624538898468, |
|
"learning_rate": 1.08e-05, |
|
"loss": 0.2275, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.010097866307016633, |
|
"grad_norm": 0.15861648321151733, |
|
"learning_rate": 1.095e-05, |
|
"loss": 0.3031, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.01023619324272919, |
|
"grad_norm": 0.1290421336889267, |
|
"learning_rate": 1.11e-05, |
|
"loss": 0.2598, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.010374520178441747, |
|
"grad_norm": 0.13748377561569214, |
|
"learning_rate": 1.125e-05, |
|
"loss": 0.2692, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.010512847114154303, |
|
"grad_norm": 0.1323186159133911, |
|
"learning_rate": 1.1400000000000001e-05, |
|
"loss": 0.2333, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.01065117404986686, |
|
"grad_norm": 0.14683866500854492, |
|
"learning_rate": 1.1550000000000001e-05, |
|
"loss": 0.2783, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.010789500985579417, |
|
"grad_norm": 0.14012585580348969, |
|
"learning_rate": 1.1700000000000001e-05, |
|
"loss": 0.3069, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.010927827921291974, |
|
"grad_norm": 0.14110729098320007, |
|
"learning_rate": 1.185e-05, |
|
"loss": 0.3, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.01106615485700453, |
|
"grad_norm": 0.15559019148349762, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.3066, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.011204481792717087, |
|
"grad_norm": 0.11998707801103592, |
|
"learning_rate": 1.215e-05, |
|
"loss": 0.2358, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.011342808728429644, |
|
"grad_norm": 0.1280561089515686, |
|
"learning_rate": 1.2299999999999999e-05, |
|
"loss": 0.2721, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.0114811356641422, |
|
"grad_norm": 0.14386573433876038, |
|
"learning_rate": 1.245e-05, |
|
"loss": 0.2976, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.011619462599854757, |
|
"grad_norm": 0.1161927729845047, |
|
"learning_rate": 1.26e-05, |
|
"loss": 0.2059, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.011757789535567314, |
|
"grad_norm": 0.1201709434390068, |
|
"learning_rate": 1.275e-05, |
|
"loss": 0.2446, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.01189611647127987, |
|
"grad_norm": 0.1458764672279358, |
|
"learning_rate": 1.29e-05, |
|
"loss": 0.2376, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.012034443406992426, |
|
"grad_norm": 0.13553838431835175, |
|
"learning_rate": 1.305e-05, |
|
"loss": 0.2875, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.012172770342704984, |
|
"grad_norm": 0.13249175250530243, |
|
"learning_rate": 1.32e-05, |
|
"loss": 0.2974, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.012311097278417539, |
|
"grad_norm": 0.15065519511699677, |
|
"learning_rate": 1.3350000000000001e-05, |
|
"loss": 0.2945, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.012449424214130096, |
|
"grad_norm": 0.1529104858636856, |
|
"learning_rate": 1.3500000000000001e-05, |
|
"loss": 0.2972, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.012587751149842653, |
|
"grad_norm": 0.1217237114906311, |
|
"learning_rate": 1.3650000000000001e-05, |
|
"loss": 0.2322, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.01272607808555521, |
|
"grad_norm": 0.13060763478279114, |
|
"learning_rate": 1.3800000000000002e-05, |
|
"loss": 0.2931, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.012864405021267766, |
|
"grad_norm": 0.1278468370437622, |
|
"learning_rate": 1.395e-05, |
|
"loss": 0.245, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.013002731956980323, |
|
"grad_norm": 0.11934634298086166, |
|
"learning_rate": 1.4099999999999999e-05, |
|
"loss": 0.2238, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.01314105889269288, |
|
"grad_norm": 0.13119642436504364, |
|
"learning_rate": 1.4249999999999999e-05, |
|
"loss": 0.2394, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.013279385828405436, |
|
"grad_norm": 0.13287314772605896, |
|
"learning_rate": 1.44e-05, |
|
"loss": 0.2134, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.013417712764117993, |
|
"grad_norm": 0.13096174597740173, |
|
"learning_rate": 1.455e-05, |
|
"loss": 0.2449, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.01355603969983055, |
|
"grad_norm": 0.1295308768749237, |
|
"learning_rate": 1.47e-05, |
|
"loss": 0.2517, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.013694366635543105, |
|
"grad_norm": 0.1313454955816269, |
|
"learning_rate": 1.485e-05, |
|
"loss": 0.2596, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.013832693571255663, |
|
"grad_norm": 0.12004859000444412, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.2258, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.013832693571255663, |
|
"eval_loss": 0.25296375155448914, |
|
"eval_runtime": 873.1576, |
|
"eval_samples_per_second": 13.945, |
|
"eval_steps_per_second": 1.743, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.0372935956955136e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|