sandernotenbaert's picture
Training in progress, step 2000, checkpoint
8f7a579 verified
{
"best_global_step": 800,
"best_metric": 9.927443504333496,
"best_model_checkpoint": "./models/v-004/checkpoint-800",
"epoch": 64.52173913043478,
"eval_steps": 100,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.6521739130434783,
"grad_norm": 0.830869197845459,
"learning_rate": 3.166666666666667e-06,
"loss": 10.3478,
"step": 20
},
{
"epoch": 1.2934782608695652,
"grad_norm": 0.9053573608398438,
"learning_rate": 6.5000000000000004e-06,
"loss": 10.3371,
"step": 40
},
{
"epoch": 1.9456521739130435,
"grad_norm": 0.9227997660636902,
"learning_rate": 9.833333333333333e-06,
"loss": 10.3133,
"step": 60
},
{
"epoch": 2.5869565217391304,
"grad_norm": 0.8464251160621643,
"learning_rate": 1.3166666666666665e-05,
"loss": 10.2729,
"step": 80
},
{
"epoch": 3.2282608695652173,
"grad_norm": 0.6436305642127991,
"learning_rate": 1.65e-05,
"loss": 10.2092,
"step": 100
},
{
"epoch": 3.2282608695652173,
"eval_accuracy": 0.0004036802852141104,
"eval_loss": 10.276432037353516,
"eval_runtime": 23.4172,
"eval_samples_per_second": 12.896,
"eval_steps_per_second": 0.811,
"step": 100
},
{
"epoch": 3.880434782608696,
"grad_norm": 0.5253298282623291,
"learning_rate": 1.9833333333333335e-05,
"loss": 10.1241,
"step": 120
},
{
"epoch": 4.521739130434782,
"grad_norm": 0.5082888603210449,
"learning_rate": 2.3166666666666666e-05,
"loss": 10.0315,
"step": 140
},
{
"epoch": 5.163043478260869,
"grad_norm": 0.4747254252433777,
"learning_rate": 2.6500000000000004e-05,
"loss": 9.9378,
"step": 160
},
{
"epoch": 5.815217391304348,
"grad_norm": 0.45188403129577637,
"learning_rate": 2.9833333333333335e-05,
"loss": 9.8383,
"step": 180
},
{
"epoch": 6.456521739130435,
"grad_norm": 0.4596181809902191,
"learning_rate": 3.316666666666667e-05,
"loss": 9.7373,
"step": 200
},
{
"epoch": 6.456521739130435,
"eval_accuracy": 0.0019784330809998482,
"eval_loss": 10.077253341674805,
"eval_runtime": 49.8628,
"eval_samples_per_second": 6.057,
"eval_steps_per_second": 0.381,
"step": 200
},
{
"epoch": 7.0978260869565215,
"grad_norm": 0.43448299169540405,
"learning_rate": 3.65e-05,
"loss": 9.6192,
"step": 220
},
{
"epoch": 7.75,
"grad_norm": 0.4260006546974182,
"learning_rate": 3.983333333333333e-05,
"loss": 9.5207,
"step": 240
},
{
"epoch": 8.391304347826088,
"grad_norm": 0.4230858385562897,
"learning_rate": 4.316666666666667e-05,
"loss": 9.409,
"step": 260
},
{
"epoch": 9.032608695652174,
"grad_norm": 0.6662802696228027,
"learning_rate": 4.6500000000000005e-05,
"loss": 9.3162,
"step": 280
},
{
"epoch": 9.684782608695652,
"grad_norm": 0.46965736150741577,
"learning_rate": 4.9833333333333336e-05,
"loss": 9.2158,
"step": 300
},
{
"epoch": 9.684782608695652,
"eval_accuracy": 0.0023421450211432546,
"eval_loss": 9.940443992614746,
"eval_runtime": 33.824,
"eval_samples_per_second": 8.929,
"eval_steps_per_second": 0.562,
"step": 300
},
{
"epoch": 10.326086956521738,
"grad_norm": 0.6454819440841675,
"learning_rate": 5.316666666666667e-05,
"loss": 9.1422,
"step": 320
},
{
"epoch": 10.978260869565217,
"grad_norm": 1.104384183883667,
"learning_rate": 5.65e-05,
"loss": 9.0368,
"step": 340
},
{
"epoch": 11.619565217391305,
"grad_norm": 0.6691080927848816,
"learning_rate": 5.983333333333334e-05,
"loss": 8.9517,
"step": 360
},
{
"epoch": 12.26086956521739,
"grad_norm": 0.669059157371521,
"learning_rate": 6.316666666666668e-05,
"loss": 8.8888,
"step": 380
},
{
"epoch": 12.91304347826087,
"grad_norm": 0.8564794659614563,
"learning_rate": 6.65e-05,
"loss": 8.8101,
"step": 400
},
{
"epoch": 12.91304347826087,
"eval_accuracy": 0.002657894947201816,
"eval_loss": 9.944500923156738,
"eval_runtime": 30.6925,
"eval_samples_per_second": 9.84,
"eval_steps_per_second": 0.619,
"step": 400
},
{
"epoch": 13.554347826086957,
"grad_norm": 0.9737327098846436,
"learning_rate": 6.983333333333334e-05,
"loss": 8.7212,
"step": 420
},
{
"epoch": 14.195652173913043,
"grad_norm": 0.8952248096466064,
"learning_rate": 7.316666666666668e-05,
"loss": 8.6471,
"step": 440
},
{
"epoch": 14.847826086956522,
"grad_norm": 1.1972002983093262,
"learning_rate": 7.65e-05,
"loss": 8.5526,
"step": 460
},
{
"epoch": 15.48913043478261,
"grad_norm": 1.041368842124939,
"learning_rate": 7.983333333333334e-05,
"loss": 8.4403,
"step": 480
},
{
"epoch": 16.130434782608695,
"grad_norm": 1.307642936706543,
"learning_rate": 8.316666666666666e-05,
"loss": 8.3439,
"step": 500
},
{
"epoch": 16.130434782608695,
"eval_accuracy": 0.0016307084788847234,
"eval_loss": 9.954625129699707,
"eval_runtime": 28.708,
"eval_samples_per_second": 10.52,
"eval_steps_per_second": 0.662,
"step": 500
},
{
"epoch": 16.782608695652176,
"grad_norm": 1.4131497144699097,
"learning_rate": 8.65e-05,
"loss": 8.2149,
"step": 520
},
{
"epoch": 17.42391304347826,
"grad_norm": 1.2959651947021484,
"learning_rate": 8.983333333333334e-05,
"loss": 8.1152,
"step": 540
},
{
"epoch": 18.065217391304348,
"grad_norm": 1.1717864274978638,
"learning_rate": 9.316666666666666e-05,
"loss": 7.9669,
"step": 560
},
{
"epoch": 18.717391304347824,
"grad_norm": 1.3156694173812866,
"learning_rate": 9.65e-05,
"loss": 7.8164,
"step": 580
},
{
"epoch": 19.358695652173914,
"grad_norm": 1.3559956550598145,
"learning_rate": 9.983333333333334e-05,
"loss": 7.7001,
"step": 600
},
{
"epoch": 19.358695652173914,
"eval_accuracy": 0.0012270281936706128,
"eval_loss": 9.93155574798584,
"eval_runtime": 59.927,
"eval_samples_per_second": 5.039,
"eval_steps_per_second": 0.317,
"step": 600
},
{
"epoch": 20.0,
"grad_norm": 1.7204277515411377,
"learning_rate": 9.995456138403733e-05,
"loss": 7.5688,
"step": 620
},
{
"epoch": 20.652173913043477,
"grad_norm": 1.426324725151062,
"learning_rate": 9.980864681729001e-05,
"loss": 7.4108,
"step": 640
},
{
"epoch": 21.293478260869566,
"grad_norm": 1.4127748012542725,
"learning_rate": 9.956242426451834e-05,
"loss": 7.2349,
"step": 660
},
{
"epoch": 21.945652173913043,
"grad_norm": 1.3751271963119507,
"learning_rate": 9.921638958517565e-05,
"loss": 7.1472,
"step": 680
},
{
"epoch": 22.58695652173913,
"grad_norm": 1.4017088413238525,
"learning_rate": 9.877123964705497e-05,
"loss": 6.9652,
"step": 700
},
{
"epoch": 22.58695652173913,
"eval_accuracy": 0.0007234270457797424,
"eval_loss": 9.93958568572998,
"eval_runtime": 59.9271,
"eval_samples_per_second": 5.039,
"eval_steps_per_second": 0.317,
"step": 700
},
{
"epoch": 23.22826086956522,
"grad_norm": 1.25152587890625,
"learning_rate": 9.822787092288991e-05,
"loss": 6.8018,
"step": 720
},
{
"epoch": 23.880434782608695,
"grad_norm": 1.3836736679077148,
"learning_rate": 9.758737768497802e-05,
"loss": 6.6247,
"step": 740
},
{
"epoch": 24.52173913043478,
"grad_norm": 1.4601590633392334,
"learning_rate": 9.685104980146193e-05,
"loss": 6.5264,
"step": 760
},
{
"epoch": 25.16304347826087,
"grad_norm": 1.3552601337432861,
"learning_rate": 9.60203701387066e-05,
"loss": 6.3179,
"step": 780
},
{
"epoch": 25.815217391304348,
"grad_norm": 1.391802191734314,
"learning_rate": 9.509701157500376e-05,
"loss": 6.2067,
"step": 800
},
{
"epoch": 25.815217391304348,
"eval_accuracy": 0.0007274238802868129,
"eval_loss": 9.927443504333496,
"eval_runtime": 32.7444,
"eval_samples_per_second": 9.223,
"eval_steps_per_second": 0.58,
"step": 800
},
{
"epoch": 26.456521739130434,
"grad_norm": 1.4579627513885498,
"learning_rate": 9.408283363161774e-05,
"loss": 5.9989,
"step": 820
},
{
"epoch": 27.097826086956523,
"grad_norm": 1.6435540914535522,
"learning_rate": 9.297987872795705e-05,
"loss": 5.9059,
"step": 840
},
{
"epoch": 27.75,
"grad_norm": 1.4759621620178223,
"learning_rate": 9.179036806841353e-05,
"loss": 5.7574,
"step": 860
},
{
"epoch": 28.391304347826086,
"grad_norm": 1.960924506187439,
"learning_rate": 9.051669716915227e-05,
"loss": 5.6674,
"step": 880
},
{
"epoch": 29.032608695652176,
"grad_norm": 1.803576111793518,
"learning_rate": 8.916143103386093e-05,
"loss": 5.5185,
"step": 900
},
{
"epoch": 29.032608695652176,
"eval_accuracy": 0.000659477693666616,
"eval_loss": 9.943469047546387,
"eval_runtime": 44.712,
"eval_samples_per_second": 6.754,
"eval_steps_per_second": 0.425,
"step": 900
},
{
"epoch": 29.684782608695652,
"grad_norm": 1.7185384035110474,
"learning_rate": 8.77272989881736e-05,
"loss": 5.3861,
"step": 920
},
{
"epoch": 30.32608695652174,
"grad_norm": 1.8554881811141968,
"learning_rate": 8.621718918317225e-05,
"loss": 5.1988,
"step": 940
},
{
"epoch": 30.97826086956522,
"grad_norm": 1.824589490890503,
"learning_rate": 8.463414277903475e-05,
"loss": 5.1284,
"step": 960
},
{
"epoch": 31.619565217391305,
"grad_norm": 1.7106842994689941,
"learning_rate": 8.298134782054305e-05,
"loss": 4.9489,
"step": 980
},
{
"epoch": 32.26086956521739,
"grad_norm": 2.0789778232574463,
"learning_rate": 8.126213281678526e-05,
"loss": 4.8318,
"step": 1000
},
{
"epoch": 32.26086956521739,
"eval_accuracy": 0.0004436486302848144,
"eval_loss": 9.991825103759766,
"eval_runtime": 36.8715,
"eval_samples_per_second": 8.191,
"eval_steps_per_second": 0.515,
"step": 1000
},
{
"epoch": 32.91304347826087,
"grad_norm": 1.6994128227233887,
"learning_rate": 7.94799600379813e-05,
"loss": 4.7451,
"step": 1020
},
{
"epoch": 33.55434782608695,
"grad_norm": 1.636572241783142,
"learning_rate": 7.763841854293145e-05,
"loss": 4.7058,
"step": 1040
},
{
"epoch": 34.19565217391305,
"grad_norm": 1.6913096904754639,
"learning_rate": 7.574121695112954e-05,
"loss": 4.5013,
"step": 1060
},
{
"epoch": 34.84782608695652,
"grad_norm": 1.8612340688705444,
"learning_rate": 7.379217597409688e-05,
"loss": 4.3998,
"step": 1080
},
{
"epoch": 35.48913043478261,
"grad_norm": 1.8019436597824097,
"learning_rate": 7.179522072097774e-05,
"loss": 4.343,
"step": 1100
},
{
"epoch": 35.48913043478261,
"eval_accuracy": 0.0004476454647918848,
"eval_loss": 10.02548885345459,
"eval_runtime": 43.794,
"eval_samples_per_second": 6.896,
"eval_steps_per_second": 0.434,
"step": 1100
},
{
"epoch": 36.130434782608695,
"grad_norm": 2.059352159500122,
"learning_rate": 6.975437279389181e-05,
"loss": 4.231,
"step": 1120
},
{
"epoch": 36.78260869565217,
"grad_norm": 1.8938504457473755,
"learning_rate": 6.767374218896286e-05,
"loss": 4.1478,
"step": 1140
},
{
"epoch": 37.42391304347826,
"grad_norm": 1.8322842121124268,
"learning_rate": 6.555751901933342e-05,
"loss": 4.0147,
"step": 1160
},
{
"epoch": 38.06521739130435,
"grad_norm": 1.706099271774292,
"learning_rate": 6.340996507683458e-05,
"loss": 3.9337,
"step": 1180
},
{
"epoch": 38.71739130434783,
"grad_norm": 1.79011070728302,
"learning_rate": 6.123540524930442e-05,
"loss": 3.9477,
"step": 1200
},
{
"epoch": 38.71739130434783,
"eval_accuracy": 0.00040767711972118084,
"eval_loss": 10.079157829284668,
"eval_runtime": 47.0549,
"eval_samples_per_second": 6.418,
"eval_steps_per_second": 0.404,
"step": 1200
},
{
"epoch": 39.358695652173914,
"grad_norm": 1.7470873594284058,
"learning_rate": 5.903821881083942e-05,
"loss": 3.7965,
"step": 1220
},
{
"epoch": 40.0,
"grad_norm": 2.18630313873291,
"learning_rate": 5.682283060251932e-05,
"loss": 3.6986,
"step": 1240
},
{
"epoch": 40.65217391304348,
"grad_norm": 1.515807032585144,
"learning_rate": 5.4593702121365955e-05,
"loss": 3.6499,
"step": 1260
},
{
"epoch": 41.29347826086956,
"grad_norm": 1.7721681594848633,
"learning_rate": 5.235532253548213e-05,
"loss": 3.559,
"step": 1280
},
{
"epoch": 41.94565217391305,
"grad_norm": 1.5675290822982788,
"learning_rate": 5.0112199643464376e-05,
"loss": 3.5394,
"step": 1300
},
{
"epoch": 41.94565217391305,
"eval_accuracy": 0.00018785122183230882,
"eval_loss": 10.124716758728027,
"eval_runtime": 92.8739,
"eval_samples_per_second": 3.252,
"eval_steps_per_second": 0.205,
"step": 1300
},
{
"epoch": 42.58695652173913,
"grad_norm": 1.6255255937576294,
"learning_rate": 4.7868850796296495e-05,
"loss": 3.5101,
"step": 1320
},
{
"epoch": 43.22826086956522,
"grad_norm": 1.5351227521896362,
"learning_rate": 4.5629793800005945e-05,
"loss": 3.3166,
"step": 1340
},
{
"epoch": 43.880434782608695,
"grad_norm": 1.4253877401351929,
"learning_rate": 4.339953781740363e-05,
"loss": 3.3786,
"step": 1360
},
{
"epoch": 44.52173913043478,
"grad_norm": 1.4174528121948242,
"learning_rate": 4.1182574287230224e-05,
"loss": 3.2175,
"step": 1380
},
{
"epoch": 45.16304347826087,
"grad_norm": 1.6721420288085938,
"learning_rate": 3.898336787899612e-05,
"loss": 3.2964,
"step": 1400
},
{
"epoch": 45.16304347826087,
"eval_accuracy": 0.000319746760565632,
"eval_loss": 10.182437896728516,
"eval_runtime": 76.0884,
"eval_samples_per_second": 3.969,
"eval_steps_per_second": 0.25,
"step": 1400
},
{
"epoch": 45.81521739130435,
"grad_norm": 1.4447053670883179,
"learning_rate": 3.680634750173137e-05,
"loss": 3.1779,
"step": 1420
},
{
"epoch": 46.45652173913044,
"grad_norm": 1.295456051826477,
"learning_rate": 3.4655897384752146e-05,
"loss": 3.1888,
"step": 1440
},
{
"epoch": 47.09782608695652,
"grad_norm": 1.1828927993774414,
"learning_rate": 3.2536348248406534e-05,
"loss": 3.0698,
"step": 1460
},
{
"epoch": 47.75,
"grad_norm": 1.3669146299362183,
"learning_rate": 3.0451968582579915e-05,
"loss": 3.0414,
"step": 1480
},
{
"epoch": 48.391304347826086,
"grad_norm": 1.146458387374878,
"learning_rate": 2.840695605052458e-05,
"loss": 3.0237,
"step": 1500
},
{
"epoch": 48.391304347826086,
"eval_accuracy": 0.00031574992605856164,
"eval_loss": 10.224663734436035,
"eval_runtime": 83.422,
"eval_samples_per_second": 3.62,
"eval_steps_per_second": 0.228,
"step": 1500
},
{
"epoch": 49.03260869565217,
"grad_norm": 1.1331515312194824,
"learning_rate": 2.6405429035324403e-05,
"loss": 3.0009,
"step": 1520
},
{
"epoch": 49.68478260869565,
"grad_norm": 1.286600112915039,
"learning_rate": 2.4451418346019573e-05,
"loss": 2.9825,
"step": 1540
},
{
"epoch": 50.32608695652174,
"grad_norm": 1.0778050422668457,
"learning_rate": 2.2548859100093407e-05,
"loss": 2.9051,
"step": 1560
},
{
"epoch": 50.97826086956522,
"grad_norm": 1.0530340671539307,
"learning_rate": 2.0701582798669676e-05,
"loss": 2.8988,
"step": 1580
},
{
"epoch": 51.619565217391305,
"grad_norm": 1.0122408866882324,
"learning_rate": 1.8913309610379015e-05,
"loss": 2.8621,
"step": 1600
},
{
"epoch": 51.619565217391305,
"eval_accuracy": 0.0002917689190161392,
"eval_loss": 10.255624771118164,
"eval_runtime": 76.4508,
"eval_samples_per_second": 3.95,
"eval_steps_per_second": 0.249,
"step": 1600
},
{
"epoch": 52.26086956521739,
"grad_norm": 1.008852243423462,
"learning_rate": 1.7187640879434553e-05,
"loss": 2.845,
"step": 1620
},
{
"epoch": 52.91304347826087,
"grad_norm": 1.016990303993225,
"learning_rate": 1.552805187300389e-05,
"loss": 2.8895,
"step": 1640
},
{
"epoch": 53.55434782608695,
"grad_norm": 0.9810470342636108,
"learning_rate": 1.3937884782483484e-05,
"loss": 2.7809,
"step": 1660
},
{
"epoch": 54.19565217391305,
"grad_norm": 1.027442216873169,
"learning_rate": 1.242034199277008e-05,
"loss": 2.8908,
"step": 1680
},
{
"epoch": 54.84782608695652,
"grad_norm": 1.0243561267852783,
"learning_rate": 1.097847963308351e-05,
"loss": 2.765,
"step": 1700
},
{
"epoch": 54.84782608695652,
"eval_accuracy": 0.0001438860422545344,
"eval_loss": 10.27558422088623,
"eval_runtime": 50.0398,
"eval_samples_per_second": 6.035,
"eval_steps_per_second": 0.38,
"step": 1700
},
{
"epoch": 55.48913043478261,
"grad_norm": 0.9682217240333557,
"learning_rate": 9.615201422329406e-06,
"loss": 2.726,
"step": 1720
},
{
"epoch": 56.130434782608695,
"grad_norm": 0.9422224760055542,
"learning_rate": 8.333252821395526e-06,
"loss": 2.7822,
"step": 1740
},
{
"epoch": 56.78260869565217,
"grad_norm": 0.9879501461982727,
"learning_rate": 7.135215504159115e-06,
"loss": 2.8115,
"step": 1760
},
{
"epoch": 57.42391304347826,
"grad_norm": 0.938834011554718,
"learning_rate": 6.023502158339078e-06,
"loss": 2.7159,
"step": 1780
},
{
"epoch": 58.06521739130435,
"grad_norm": 0.9302940368652344,
"learning_rate": 5.000351626664207e-06,
"loss": 2.7383,
"step": 1800
},
{
"epoch": 58.06521739130435,
"eval_accuracy": 0.0003037594225373504,
"eval_loss": 10.28667163848877,
"eval_runtime": 26.8193,
"eval_samples_per_second": 11.261,
"eval_steps_per_second": 0.708,
"step": 1800
},
{
"epoch": 58.71739130434783,
"grad_norm": 0.9569380879402161,
"learning_rate": 4.067824398141701e-06,
"loss": 2.7457,
"step": 1820
},
{
"epoch": 59.358695652173914,
"grad_norm": 0.9395641088485718,
"learning_rate": 3.2277984585066366e-06,
"loss": 2.7463,
"step": 1840
},
{
"epoch": 60.0,
"grad_norm": 1.2450119256973267,
"learning_rate": 2.4819655082085835e-06,
"loss": 2.7273,
"step": 1860
},
{
"epoch": 60.65217391304348,
"grad_norm": 1.0745036602020264,
"learning_rate": 1.8318275555520237e-06,
"loss": 2.6887,
"step": 1880
},
{
"epoch": 61.29347826086956,
"grad_norm": 0.9726008176803589,
"learning_rate": 1.2786938918515568e-06,
"loss": 2.7324,
"step": 1900
},
{
"epoch": 61.29347826086956,
"eval_accuracy": 0.0003037594225373504,
"eval_loss": 10.290609359741211,
"eval_runtime": 42.7369,
"eval_samples_per_second": 7.067,
"eval_steps_per_second": 0.445,
"step": 1900
},
{
"epoch": 61.94565217391305,
"grad_norm": 0.9399953484535217,
"learning_rate": 8.236784546933718e-07,
"loss": 2.7602,
"step": 1920
},
{
"epoch": 62.58695652173913,
"grad_norm": 0.9184156656265259,
"learning_rate": 4.676975846132692e-07,
"loss": 2.7586,
"step": 1940
},
{
"epoch": 63.22826086956522,
"grad_norm": 0.9342102408409119,
"learning_rate": 2.1146817970871258e-07,
"loss": 2.6681,
"step": 1960
},
{
"epoch": 63.880434782608695,
"grad_norm": 0.8547150492668152,
"learning_rate": 5.550625190150483e-08,
"loss": 2.7384,
"step": 1980
},
{
"epoch": 64.52173913043478,
"grad_norm": 0.9187237024307251,
"learning_rate": 1.2588775841204658e-10,
"loss": 2.6918,
"step": 2000
},
{
"epoch": 64.52173913043478,
"eval_accuracy": 0.0003037594225373504,
"eval_loss": 10.291138648986816,
"eval_runtime": 106.7866,
"eval_samples_per_second": 2.828,
"eval_steps_per_second": 0.178,
"step": 2000
}
],
"logging_steps": 20,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 65,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4054390970018304.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}