|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.999983800508664, |
|
"eval_steps": 500, |
|
"global_step": 46297, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0025, |
|
"loss": 6.1249, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.005, |
|
"loss": 4.8254, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.004944808706978387, |
|
"loss": 4.2925, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.004889617413956775, |
|
"loss": 4.0454, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.004834426120935161, |
|
"loss": 3.906, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.004779234827913549, |
|
"loss": 3.8162, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.004724043534891935, |
|
"loss": 3.7444, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.004668852241870323, |
|
"loss": 3.696, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00461366094884871, |
|
"loss": 3.651, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0045584696558270965, |
|
"loss": 3.6204, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.004503278362805484, |
|
"loss": 3.5844, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.004448087069783871, |
|
"loss": 3.5632, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.004392895776762258, |
|
"loss": 3.5396, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.004337704483740645, |
|
"loss": 3.5185, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0042825131907190325, |
|
"loss": 3.4975, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.004227321897697419, |
|
"loss": 3.4855, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.004172130604675806, |
|
"loss": 3.4744, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.004116939311654194, |
|
"loss": 3.4505, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00406174801863258, |
|
"loss": 3.4417, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.004006556725610968, |
|
"loss": 3.4298, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.003951365432589355, |
|
"loss": 3.4162, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.003896174139567742, |
|
"loss": 3.4018, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.003840982846546129, |
|
"loss": 3.3938, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0037857915535245163, |
|
"loss": 3.3894, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0037306002605029033, |
|
"loss": 3.3755, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00367540896748129, |
|
"loss": 3.3671, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.003620217674459677, |
|
"loss": 3.3617, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0035650263814380645, |
|
"loss": 3.356, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0035098350884164515, |
|
"loss": 3.3497, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0034546437953948384, |
|
"loss": 3.3382, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0033994525023732258, |
|
"loss": 3.3315, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0033442612093516127, |
|
"loss": 3.3179, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0032890699163299997, |
|
"loss": 3.317, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0032338786233083866, |
|
"loss": 3.3162, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.003178687330286774, |
|
"loss": 3.3065, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.003123496037265161, |
|
"loss": 3.3011, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.003068304744243548, |
|
"loss": 3.2966, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0030131134512219357, |
|
"loss": 3.2864, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0029579221582003226, |
|
"loss": 3.2818, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.002902730865178709, |
|
"loss": 3.2774, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.002847539572157097, |
|
"loss": 3.268, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.002792348279135484, |
|
"loss": 3.2678, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.002737156986113871, |
|
"loss": 3.2658, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0026819656930922578, |
|
"loss": 3.2574, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.002626774400070645, |
|
"loss": 3.251, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.002571583107049032, |
|
"loss": 3.2492, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.002516391814027419, |
|
"loss": 3.2418, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.002461200521005806, |
|
"loss": 3.2417, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0024060092279841933, |
|
"loss": 3.2284, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0023508179349625803, |
|
"loss": 3.2319, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0022956266419409672, |
|
"loss": 3.2256, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0022404353489193546, |
|
"loss": 3.2174, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0021852440558977415, |
|
"loss": 3.2171, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.002130052762876129, |
|
"loss": 3.2085, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.002074861469854516, |
|
"loss": 3.2026, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.002019670176832903, |
|
"loss": 3.1985, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00196447888381129, |
|
"loss": 3.1999, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.001909287590789677, |
|
"loss": 3.1934, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0018540962977680643, |
|
"loss": 3.1862, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0017989050047464512, |
|
"loss": 3.181, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0017437137117248384, |
|
"loss": 3.1773, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0016885224187032255, |
|
"loss": 3.1727, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0016333311256816125, |
|
"loss": 3.1697, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0015781398326599996, |
|
"loss": 3.1615, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0015229485396383866, |
|
"loss": 3.1629, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0014677572466167737, |
|
"loss": 3.1587, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.001412565953595161, |
|
"loss": 3.1499, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0013573746605735479, |
|
"loss": 3.147, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.001302183367551935, |
|
"loss": 3.1447, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0012469920745303222, |
|
"loss": 3.1374, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0011918007815087093, |
|
"loss": 3.1397, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0011366094884870963, |
|
"loss": 3.1281, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0010814181954654834, |
|
"loss": 3.1262, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0010262269024438704, |
|
"loss": 3.1198, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0009710356094222576, |
|
"loss": 3.1126, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0009158443164006447, |
|
"loss": 3.1091, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0008606530233790317, |
|
"loss": 3.1086, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0008054617303574188, |
|
"loss": 3.1048, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0007502704373358058, |
|
"loss": 3.0977, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0006950791443141929, |
|
"loss": 3.0983, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0006398878512925802, |
|
"loss": 3.0903, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0005846965582709672, |
|
"loss": 3.0851, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0005295052652493543, |
|
"loss": 3.0847, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0004743139722277413, |
|
"loss": 3.0722, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0004191226792061285, |
|
"loss": 3.0718, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00036393138618451553, |
|
"loss": 3.0705, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00030874009316290263, |
|
"loss": 3.0653, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00025354880014128974, |
|
"loss": 3.064, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0001983575071196768, |
|
"loss": 3.0514, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001431662140980639, |
|
"loss": 3.0493, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 8.797492107645099e-05, |
|
"loss": 3.0482, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 3.278362805483807e-05, |
|
"loss": 3.051, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 46297, |
|
"total_flos": 1.5484207870676828e+18, |
|
"train_loss": 3.3410520185483805, |
|
"train_runtime": 176422.7402, |
|
"train_samples_per_second": 16.795, |
|
"train_steps_per_second": 0.262 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 46297, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5000, |
|
"total_flos": 1.5484207870676828e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|