|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 1000, |
|
"global_step": 19113, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.026160205096007954, |
|
"grad_norm": 2.7528529167175293, |
|
"learning_rate": 4.869198974519961e-05, |
|
"loss": 4.403, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.05232041019201591, |
|
"grad_norm": 2.5874102115631104, |
|
"learning_rate": 4.7383979490399206e-05, |
|
"loss": 2.92, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.05232041019201591, |
|
"eval_accuracy": 0.4470485610347474, |
|
"eval_loss": 2.4565796852111816, |
|
"eval_runtime": 53.659, |
|
"eval_samples_per_second": 114.631, |
|
"eval_steps_per_second": 3.597, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.07848061528802386, |
|
"grad_norm": 2.103119134902954, |
|
"learning_rate": 4.607596923559881e-05, |
|
"loss": 2.2955, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.10464082038403182, |
|
"grad_norm": 2.2540347576141357, |
|
"learning_rate": 4.476795898079841e-05, |
|
"loss": 1.9818, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.10464082038403182, |
|
"eval_accuracy": 0.5672200738882789, |
|
"eval_loss": 1.8089067935943604, |
|
"eval_runtime": 53.7114, |
|
"eval_samples_per_second": 114.52, |
|
"eval_steps_per_second": 3.593, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.13080102548003977, |
|
"grad_norm": 1.4521691799163818, |
|
"learning_rate": 4.345994872599801e-05, |
|
"loss": 1.83, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.15696123057604772, |
|
"grad_norm": 1.3930741548538208, |
|
"learning_rate": 4.2151938471197614e-05, |
|
"loss": 1.7321, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.15696123057604772, |
|
"eval_accuracy": 0.6020537553359386, |
|
"eval_loss": 1.613312005996704, |
|
"eval_runtime": 53.6022, |
|
"eval_samples_per_second": 114.753, |
|
"eval_steps_per_second": 3.601, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.18312143567205566, |
|
"grad_norm": 1.4211032390594482, |
|
"learning_rate": 4.084392821639722e-05, |
|
"loss": 1.6624, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.20928164076806363, |
|
"grad_norm": 1.275313138961792, |
|
"learning_rate": 3.953591796159682e-05, |
|
"loss": 1.6137, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.20928164076806363, |
|
"eval_accuracy": 0.6195058763065014, |
|
"eval_loss": 1.5170681476593018, |
|
"eval_runtime": 53.3002, |
|
"eval_samples_per_second": 115.403, |
|
"eval_steps_per_second": 3.621, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.23544184586407158, |
|
"grad_norm": 1.2738176584243774, |
|
"learning_rate": 3.8227907706796424e-05, |
|
"loss": 1.5721, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.26160205096007955, |
|
"grad_norm": 1.2026641368865967, |
|
"learning_rate": 3.691989745199603e-05, |
|
"loss": 1.5353, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.26160205096007955, |
|
"eval_accuracy": 0.6311744206133264, |
|
"eval_loss": 1.4516141414642334, |
|
"eval_runtime": 53.8002, |
|
"eval_samples_per_second": 114.33, |
|
"eval_steps_per_second": 3.587, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.28776225605608746, |
|
"grad_norm": 1.1667968034744263, |
|
"learning_rate": 3.561188719719563e-05, |
|
"loss": 1.5091, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.31392246115209543, |
|
"grad_norm": 1.4095014333724976, |
|
"learning_rate": 3.430387694239523e-05, |
|
"loss": 1.4845, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.31392246115209543, |
|
"eval_accuracy": 0.6400102153795495, |
|
"eval_loss": 1.4056029319763184, |
|
"eval_runtime": 53.6423, |
|
"eval_samples_per_second": 114.667, |
|
"eval_steps_per_second": 3.598, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.3400826662481034, |
|
"grad_norm": 1.2620683908462524, |
|
"learning_rate": 3.299586668759483e-05, |
|
"loss": 1.4606, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.3662428713441113, |
|
"grad_norm": 1.2207545042037964, |
|
"learning_rate": 3.168785643279443e-05, |
|
"loss": 1.4443, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.3662428713441113, |
|
"eval_accuracy": 0.646570593151532, |
|
"eval_loss": 1.371826171875, |
|
"eval_runtime": 53.6024, |
|
"eval_samples_per_second": 114.752, |
|
"eval_steps_per_second": 3.601, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.3924030764401193, |
|
"grad_norm": 1.1445685625076294, |
|
"learning_rate": 3.0379846177994036e-05, |
|
"loss": 1.4255, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.41856328153612726, |
|
"grad_norm": 1.1666637659072876, |
|
"learning_rate": 2.907183592319364e-05, |
|
"loss": 1.4118, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.41856328153612726, |
|
"eval_accuracy": 0.6524860734404423, |
|
"eval_loss": 1.3419677019119263, |
|
"eval_runtime": 53.4631, |
|
"eval_samples_per_second": 115.051, |
|
"eval_steps_per_second": 3.61, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.4447234866321352, |
|
"grad_norm": 1.2419095039367676, |
|
"learning_rate": 2.776382566839324e-05, |
|
"loss": 1.4015, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.47088369172814315, |
|
"grad_norm": 1.1277480125427246, |
|
"learning_rate": 2.6455815413592845e-05, |
|
"loss": 1.3878, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.47088369172814315, |
|
"eval_accuracy": 0.6569193066064805, |
|
"eval_loss": 1.3188564777374268, |
|
"eval_runtime": 53.2918, |
|
"eval_samples_per_second": 115.421, |
|
"eval_steps_per_second": 3.622, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.4970438968241511, |
|
"grad_norm": 1.0969843864440918, |
|
"learning_rate": 2.5147805158792447e-05, |
|
"loss": 1.3773, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.5232041019201591, |
|
"grad_norm": 1.1453758478164673, |
|
"learning_rate": 2.383979490399205e-05, |
|
"loss": 1.3661, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.5232041019201591, |
|
"eval_accuracy": 0.6608493989564993, |
|
"eval_loss": 1.2987806797027588, |
|
"eval_runtime": 53.4371, |
|
"eval_samples_per_second": 115.107, |
|
"eval_steps_per_second": 3.612, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.549364307016167, |
|
"grad_norm": 1.1494508981704712, |
|
"learning_rate": 2.253178464919165e-05, |
|
"loss": 1.3563, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.5755245121121749, |
|
"grad_norm": 1.1540991067886353, |
|
"learning_rate": 2.1223774394391253e-05, |
|
"loss": 1.3485, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.5755245121121749, |
|
"eval_accuracy": 0.6638841358556485, |
|
"eval_loss": 1.2833938598632812, |
|
"eval_runtime": 53.55, |
|
"eval_samples_per_second": 114.865, |
|
"eval_steps_per_second": 3.604, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.601684717208183, |
|
"grad_norm": 1.1493678092956543, |
|
"learning_rate": 1.9915764139590855e-05, |
|
"loss": 1.3393, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.6278449223041909, |
|
"grad_norm": 1.1292744874954224, |
|
"learning_rate": 1.8607753884790457e-05, |
|
"loss": 1.3326, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.6278449223041909, |
|
"eval_accuracy": 0.6669099732330993, |
|
"eval_loss": 1.2675042152404785, |
|
"eval_runtime": 54.1653, |
|
"eval_samples_per_second": 113.56, |
|
"eval_steps_per_second": 3.563, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.6540051274001988, |
|
"grad_norm": 1.1094636917114258, |
|
"learning_rate": 1.729974362999006e-05, |
|
"loss": 1.3249, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.6801653324962068, |
|
"grad_norm": 1.1196491718292236, |
|
"learning_rate": 1.5991733375189664e-05, |
|
"loss": 1.319, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.6801653324962068, |
|
"eval_accuracy": 0.6694385498356529, |
|
"eval_loss": 1.2554900646209717, |
|
"eval_runtime": 53.6683, |
|
"eval_samples_per_second": 114.611, |
|
"eval_steps_per_second": 3.596, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.7063255375922147, |
|
"grad_norm": 1.1508065462112427, |
|
"learning_rate": 1.4683723120389265e-05, |
|
"loss": 1.3118, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.7324857426882226, |
|
"grad_norm": 1.0860583782196045, |
|
"learning_rate": 1.3375712865588865e-05, |
|
"loss": 1.3068, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.7324857426882226, |
|
"eval_accuracy": 0.6718871896629512, |
|
"eval_loss": 1.2440038919448853, |
|
"eval_runtime": 53.2979, |
|
"eval_samples_per_second": 115.408, |
|
"eval_steps_per_second": 3.621, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.7586459477842307, |
|
"grad_norm": 1.126051902770996, |
|
"learning_rate": 1.2067702610788469e-05, |
|
"loss": 1.3014, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.7848061528802386, |
|
"grad_norm": 1.100677490234375, |
|
"learning_rate": 1.075969235598807e-05, |
|
"loss": 1.2932, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.7848061528802386, |
|
"eval_accuracy": 0.6737417864168825, |
|
"eval_loss": 1.2349998950958252, |
|
"eval_runtime": 53.6159, |
|
"eval_samples_per_second": 114.723, |
|
"eval_steps_per_second": 3.6, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.8109663579762465, |
|
"grad_norm": 1.1026638746261597, |
|
"learning_rate": 9.451682101187674e-06, |
|
"loss": 1.2913, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.8371265630722545, |
|
"grad_norm": 1.152256727218628, |
|
"learning_rate": 8.143671846387275e-06, |
|
"loss": 1.2868, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.8371265630722545, |
|
"eval_accuracy": 0.6755100895943456, |
|
"eval_loss": 1.2262605428695679, |
|
"eval_runtime": 53.3362, |
|
"eval_samples_per_second": 115.325, |
|
"eval_steps_per_second": 3.619, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.8632867681682624, |
|
"grad_norm": 1.1048023700714111, |
|
"learning_rate": 6.835661591586878e-06, |
|
"loss": 1.2809, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.8894469732642704, |
|
"grad_norm": 1.1195833683013916, |
|
"learning_rate": 5.527651336786481e-06, |
|
"loss": 1.2791, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.8894469732642704, |
|
"eval_accuracy": 0.6771291668633302, |
|
"eval_loss": 1.219257116317749, |
|
"eval_runtime": 53.5262, |
|
"eval_samples_per_second": 114.916, |
|
"eval_steps_per_second": 3.606, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.9156071783602784, |
|
"grad_norm": 1.1175094842910767, |
|
"learning_rate": 4.219641081986083e-06, |
|
"loss": 1.2743, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.9417673834562863, |
|
"grad_norm": 1.1169252395629883, |
|
"learning_rate": 2.911630827185685e-06, |
|
"loss": 1.2725, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.9417673834562863, |
|
"eval_accuracy": 0.6780440694779302, |
|
"eval_loss": 1.214146614074707, |
|
"eval_runtime": 53.7753, |
|
"eval_samples_per_second": 114.383, |
|
"eval_steps_per_second": 3.589, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.9679275885522942, |
|
"grad_norm": 1.1183288097381592, |
|
"learning_rate": 1.6036205723852876e-06, |
|
"loss": 1.2693, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.9940877936483022, |
|
"grad_norm": 1.10493803024292, |
|
"learning_rate": 2.956103175848899e-07, |
|
"loss": 1.2711, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.9940877936483022, |
|
"eval_accuracy": 0.6788121299845069, |
|
"eval_loss": 1.2107646465301514, |
|
"eval_runtime": 53.4355, |
|
"eval_samples_per_second": 115.111, |
|
"eval_steps_per_second": 3.612, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 19113, |
|
"total_flos": 3.19615549046784e+17, |
|
"train_loss": 1.5581016430754755, |
|
"train_runtime": 7493.9423, |
|
"train_samples_per_second": 81.613, |
|
"train_steps_per_second": 2.55 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 19113, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.19615549046784e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|