DewiBrynJones's picture
End of training
9f4e1ee verified
raw
history blame
36.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.537318712415989,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017686593562079942,
"grad_norm": 4.990772724151611,
"learning_rate": 5.000000000000001e-07,
"loss": 0.9549,
"step": 25
},
{
"epoch": 0.035373187124159884,
"grad_norm": 4.042325019836426,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.8294,
"step": 50
},
{
"epoch": 0.05305978068623983,
"grad_norm": 3.429729461669922,
"learning_rate": 1.5e-06,
"loss": 0.5478,
"step": 75
},
{
"epoch": 0.07074637424831977,
"grad_norm": 3.084613800048828,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.482,
"step": 100
},
{
"epoch": 0.08843296781039972,
"grad_norm": 3.2256970405578613,
"learning_rate": 2.5e-06,
"loss": 0.4206,
"step": 125
},
{
"epoch": 0.10611956137247966,
"grad_norm": 3.0460891723632812,
"learning_rate": 3e-06,
"loss": 0.4171,
"step": 150
},
{
"epoch": 0.1238061549345596,
"grad_norm": 2.976763963699341,
"learning_rate": 3.5e-06,
"loss": 0.3896,
"step": 175
},
{
"epoch": 0.14149274849663954,
"grad_norm": 2.8915836811065674,
"learning_rate": 4.000000000000001e-06,
"loss": 0.4149,
"step": 200
},
{
"epoch": 0.1591793420587195,
"grad_norm": 3.01548433303833,
"learning_rate": 4.5e-06,
"loss": 0.3811,
"step": 225
},
{
"epoch": 0.17686593562079944,
"grad_norm": 3.8455846309661865,
"learning_rate": 5e-06,
"loss": 0.3872,
"step": 250
},
{
"epoch": 0.19455252918287938,
"grad_norm": 2.805778741836548,
"learning_rate": 5.500000000000001e-06,
"loss": 0.3714,
"step": 275
},
{
"epoch": 0.21223912274495932,
"grad_norm": 3.138227939605713,
"learning_rate": 6e-06,
"loss": 0.3768,
"step": 300
},
{
"epoch": 0.22992571630703926,
"grad_norm": 3.5028634071350098,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.3835,
"step": 325
},
{
"epoch": 0.2476123098691192,
"grad_norm": 3.8886911869049072,
"learning_rate": 7e-06,
"loss": 0.359,
"step": 350
},
{
"epoch": 0.26529890343119916,
"grad_norm": 2.8521852493286133,
"learning_rate": 7.500000000000001e-06,
"loss": 0.3507,
"step": 375
},
{
"epoch": 0.2829854969932791,
"grad_norm": 2.901258945465088,
"learning_rate": 8.000000000000001e-06,
"loss": 0.3586,
"step": 400
},
{
"epoch": 0.30067209055535904,
"grad_norm": 2.550757884979248,
"learning_rate": 8.5e-06,
"loss": 0.3658,
"step": 425
},
{
"epoch": 0.318358684117439,
"grad_norm": 3.0283052921295166,
"learning_rate": 9e-06,
"loss": 0.3527,
"step": 450
},
{
"epoch": 0.3360452776795189,
"grad_norm": 3.272336721420288,
"learning_rate": 9.5e-06,
"loss": 0.3674,
"step": 475
},
{
"epoch": 0.3537318712415989,
"grad_norm": 2.8109354972839355,
"learning_rate": 1e-05,
"loss": 0.3592,
"step": 500
},
{
"epoch": 0.3714184648036788,
"grad_norm": 2.21569561958313,
"learning_rate": 9.944444444444445e-06,
"loss": 0.3219,
"step": 525
},
{
"epoch": 0.38910505836575876,
"grad_norm": 2.5520081520080566,
"learning_rate": 9.88888888888889e-06,
"loss": 0.3681,
"step": 550
},
{
"epoch": 0.40679165192783867,
"grad_norm": 2.903778076171875,
"learning_rate": 9.833333333333333e-06,
"loss": 0.3424,
"step": 575
},
{
"epoch": 0.42447824548991864,
"grad_norm": 2.4832980632781982,
"learning_rate": 9.777777777777779e-06,
"loss": 0.3269,
"step": 600
},
{
"epoch": 0.4421648390519986,
"grad_norm": 2.759129285812378,
"learning_rate": 9.722222222222223e-06,
"loss": 0.327,
"step": 625
},
{
"epoch": 0.4598514326140785,
"grad_norm": 2.3846707344055176,
"learning_rate": 9.666666666666667e-06,
"loss": 0.3367,
"step": 650
},
{
"epoch": 0.4775380261761585,
"grad_norm": 2.462186574935913,
"learning_rate": 9.611111111111112e-06,
"loss": 0.3175,
"step": 675
},
{
"epoch": 0.4952246197382384,
"grad_norm": 2.5651514530181885,
"learning_rate": 9.555555555555556e-06,
"loss": 0.3156,
"step": 700
},
{
"epoch": 0.5129112133003184,
"grad_norm": 2.6540584564208984,
"learning_rate": 9.5e-06,
"loss": 0.3098,
"step": 725
},
{
"epoch": 0.5305978068623983,
"grad_norm": 2.713265895843506,
"learning_rate": 9.444444444444445e-06,
"loss": 0.3226,
"step": 750
},
{
"epoch": 0.5482844004244782,
"grad_norm": 2.128851890563965,
"learning_rate": 9.38888888888889e-06,
"loss": 0.3244,
"step": 775
},
{
"epoch": 0.5659709939865581,
"grad_norm": 2.5764191150665283,
"learning_rate": 9.333333333333334e-06,
"loss": 0.3156,
"step": 800
},
{
"epoch": 0.5836575875486382,
"grad_norm": 2.1402037143707275,
"learning_rate": 9.277777777777778e-06,
"loss": 0.3029,
"step": 825
},
{
"epoch": 0.6013441811107181,
"grad_norm": 2.4773480892181396,
"learning_rate": 9.222222222222224e-06,
"loss": 0.293,
"step": 850
},
{
"epoch": 0.619030774672798,
"grad_norm": 2.6644790172576904,
"learning_rate": 9.166666666666666e-06,
"loss": 0.3111,
"step": 875
},
{
"epoch": 0.636717368234878,
"grad_norm": 2.3241961002349854,
"learning_rate": 9.111111111111112e-06,
"loss": 0.2726,
"step": 900
},
{
"epoch": 0.6544039617969579,
"grad_norm": 2.6867661476135254,
"learning_rate": 9.055555555555556e-06,
"loss": 0.3083,
"step": 925
},
{
"epoch": 0.6720905553590378,
"grad_norm": 2.138195037841797,
"learning_rate": 9e-06,
"loss": 0.3019,
"step": 950
},
{
"epoch": 0.6897771489211177,
"grad_norm": 2.6360018253326416,
"learning_rate": 8.944444444444446e-06,
"loss": 0.2917,
"step": 975
},
{
"epoch": 0.7074637424831978,
"grad_norm": 2.5191752910614014,
"learning_rate": 8.888888888888888e-06,
"loss": 0.294,
"step": 1000
},
{
"epoch": 0.7074637424831978,
"eval_loss": 0.30541032552719116,
"eval_runtime": 4620.7401,
"eval_samples_per_second": 2.329,
"eval_steps_per_second": 0.146,
"eval_wer": 0.21139403895799108,
"step": 1000
},
{
"epoch": 0.7251503360452777,
"grad_norm": 2.696348190307617,
"learning_rate": 8.833333333333334e-06,
"loss": 0.2912,
"step": 1025
},
{
"epoch": 0.7428369296073576,
"grad_norm": 2.3700759410858154,
"learning_rate": 8.777777777777778e-06,
"loss": 0.2756,
"step": 1050
},
{
"epoch": 0.7605235231694376,
"grad_norm": 2.314389705657959,
"learning_rate": 8.722222222222224e-06,
"loss": 0.2881,
"step": 1075
},
{
"epoch": 0.7782101167315175,
"grad_norm": 2.3540844917297363,
"learning_rate": 8.666666666666668e-06,
"loss": 0.291,
"step": 1100
},
{
"epoch": 0.7958967102935974,
"grad_norm": 2.3578970432281494,
"learning_rate": 8.611111111111112e-06,
"loss": 0.2846,
"step": 1125
},
{
"epoch": 0.8135833038556773,
"grad_norm": 1.976126790046692,
"learning_rate": 8.555555555555556e-06,
"loss": 0.27,
"step": 1150
},
{
"epoch": 0.8312698974177574,
"grad_norm": 2.194822072982788,
"learning_rate": 8.5e-06,
"loss": 0.2763,
"step": 1175
},
{
"epoch": 0.8489564909798373,
"grad_norm": 2.1482298374176025,
"learning_rate": 8.444444444444446e-06,
"loss": 0.2731,
"step": 1200
},
{
"epoch": 0.8666430845419172,
"grad_norm": 2.163198471069336,
"learning_rate": 8.38888888888889e-06,
"loss": 0.2602,
"step": 1225
},
{
"epoch": 0.8843296781039972,
"grad_norm": 2.482328414916992,
"learning_rate": 8.333333333333334e-06,
"loss": 0.2896,
"step": 1250
},
{
"epoch": 0.9020162716660771,
"grad_norm": 2.2900679111480713,
"learning_rate": 8.277777777777778e-06,
"loss": 0.2472,
"step": 1275
},
{
"epoch": 0.919702865228157,
"grad_norm": 2.4348435401916504,
"learning_rate": 8.222222222222222e-06,
"loss": 0.2754,
"step": 1300
},
{
"epoch": 0.937389458790237,
"grad_norm": 2.4213294982910156,
"learning_rate": 8.166666666666668e-06,
"loss": 0.2585,
"step": 1325
},
{
"epoch": 0.955076052352317,
"grad_norm": 2.6255362033843994,
"learning_rate": 8.111111111111112e-06,
"loss": 0.25,
"step": 1350
},
{
"epoch": 0.9727626459143969,
"grad_norm": 2.187410354614258,
"learning_rate": 8.055555555555557e-06,
"loss": 0.2809,
"step": 1375
},
{
"epoch": 0.9904492394764768,
"grad_norm": 2.669609546661377,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2707,
"step": 1400
},
{
"epoch": 1.0081358330385568,
"grad_norm": 1.841189980506897,
"learning_rate": 7.944444444444445e-06,
"loss": 0.2272,
"step": 1425
},
{
"epoch": 1.0258224266006368,
"grad_norm": 1.8432203531265259,
"learning_rate": 7.88888888888889e-06,
"loss": 0.2036,
"step": 1450
},
{
"epoch": 1.0435090201627166,
"grad_norm": 1.961900234222412,
"learning_rate": 7.833333333333333e-06,
"loss": 0.1758,
"step": 1475
},
{
"epoch": 1.0611956137247966,
"grad_norm": 1.7587320804595947,
"learning_rate": 7.77777777777778e-06,
"loss": 0.1847,
"step": 1500
},
{
"epoch": 1.0788822072868764,
"grad_norm": 2.330167531967163,
"learning_rate": 7.722222222222223e-06,
"loss": 0.1602,
"step": 1525
},
{
"epoch": 1.0965688008489565,
"grad_norm": 2.3043272495269775,
"learning_rate": 7.666666666666667e-06,
"loss": 0.1772,
"step": 1550
},
{
"epoch": 1.1142553944110365,
"grad_norm": 2.093520164489746,
"learning_rate": 7.611111111111111e-06,
"loss": 0.1742,
"step": 1575
},
{
"epoch": 1.1319419879731163,
"grad_norm": 2.2821807861328125,
"learning_rate": 7.555555555555556e-06,
"loss": 0.1685,
"step": 1600
},
{
"epoch": 1.1496285815351963,
"grad_norm": 2.087881326675415,
"learning_rate": 7.500000000000001e-06,
"loss": 0.1722,
"step": 1625
},
{
"epoch": 1.1673151750972763,
"grad_norm": 1.7071295976638794,
"learning_rate": 7.444444444444445e-06,
"loss": 0.168,
"step": 1650
},
{
"epoch": 1.1850017686593561,
"grad_norm": 2.3514926433563232,
"learning_rate": 7.38888888888889e-06,
"loss": 0.1786,
"step": 1675
},
{
"epoch": 1.2026883622214362,
"grad_norm": 2.0674052238464355,
"learning_rate": 7.333333333333333e-06,
"loss": 0.1625,
"step": 1700
},
{
"epoch": 1.2203749557835162,
"grad_norm": 1.9389206171035767,
"learning_rate": 7.277777777777778e-06,
"loss": 0.1686,
"step": 1725
},
{
"epoch": 1.238061549345596,
"grad_norm": 2.185607433319092,
"learning_rate": 7.222222222222223e-06,
"loss": 0.168,
"step": 1750
},
{
"epoch": 1.255748142907676,
"grad_norm": 1.953815221786499,
"learning_rate": 7.166666666666667e-06,
"loss": 0.1634,
"step": 1775
},
{
"epoch": 1.2734347364697558,
"grad_norm": 2.0842204093933105,
"learning_rate": 7.111111111111112e-06,
"loss": 0.1749,
"step": 1800
},
{
"epoch": 1.2911213300318358,
"grad_norm": 2.197368621826172,
"learning_rate": 7.055555555555557e-06,
"loss": 0.173,
"step": 1825
},
{
"epoch": 1.3088079235939158,
"grad_norm": 1.8195652961730957,
"learning_rate": 7e-06,
"loss": 0.1575,
"step": 1850
},
{
"epoch": 1.3264945171559956,
"grad_norm": 1.9379323720932007,
"learning_rate": 6.944444444444445e-06,
"loss": 0.1591,
"step": 1875
},
{
"epoch": 1.3441811107180757,
"grad_norm": 1.8081250190734863,
"learning_rate": 6.88888888888889e-06,
"loss": 0.1534,
"step": 1900
},
{
"epoch": 1.3618677042801557,
"grad_norm": 2.040506601333618,
"learning_rate": 6.833333333333334e-06,
"loss": 0.1548,
"step": 1925
},
{
"epoch": 1.3795542978422355,
"grad_norm": 1.8651851415634155,
"learning_rate": 6.777777777777779e-06,
"loss": 0.161,
"step": 1950
},
{
"epoch": 1.3972408914043155,
"grad_norm": 1.9047472476959229,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.1651,
"step": 1975
},
{
"epoch": 1.4149274849663955,
"grad_norm": 1.6781543493270874,
"learning_rate": 6.666666666666667e-06,
"loss": 0.1572,
"step": 2000
},
{
"epoch": 1.4149274849663955,
"eval_loss": 0.27680015563964844,
"eval_runtime": 4951.943,
"eval_samples_per_second": 2.173,
"eval_steps_per_second": 0.136,
"eval_wer": 0.18979308456543847,
"step": 2000
},
{
"epoch": 1.4326140785284753,
"grad_norm": 1.9535053968429565,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.1586,
"step": 2025
},
{
"epoch": 1.4503006720905554,
"grad_norm": 1.7220150232315063,
"learning_rate": 6.555555555555556e-06,
"loss": 0.1634,
"step": 2050
},
{
"epoch": 1.4679872656526354,
"grad_norm": 2.427858352661133,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.1659,
"step": 2075
},
{
"epoch": 1.4856738592147152,
"grad_norm": 1.9716796875,
"learning_rate": 6.444444444444445e-06,
"loss": 0.1511,
"step": 2100
},
{
"epoch": 1.5033604527767952,
"grad_norm": 1.8595249652862549,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.1556,
"step": 2125
},
{
"epoch": 1.5210470463388752,
"grad_norm": 2.3424742221832275,
"learning_rate": 6.333333333333333e-06,
"loss": 0.1717,
"step": 2150
},
{
"epoch": 1.538733639900955,
"grad_norm": 2.172686815261841,
"learning_rate": 6.277777777777778e-06,
"loss": 0.1586,
"step": 2175
},
{
"epoch": 1.556420233463035,
"grad_norm": 1.7644214630126953,
"learning_rate": 6.222222222222223e-06,
"loss": 0.1521,
"step": 2200
},
{
"epoch": 1.574106827025115,
"grad_norm": 2.1562883853912354,
"learning_rate": 6.166666666666667e-06,
"loss": 0.1578,
"step": 2225
},
{
"epoch": 1.5917934205871949,
"grad_norm": 2.087270736694336,
"learning_rate": 6.111111111111112e-06,
"loss": 0.1556,
"step": 2250
},
{
"epoch": 1.6094800141492749,
"grad_norm": 2.0405049324035645,
"learning_rate": 6.055555555555555e-06,
"loss": 0.1654,
"step": 2275
},
{
"epoch": 1.627166607711355,
"grad_norm": 1.6984881162643433,
"learning_rate": 6e-06,
"loss": 0.1573,
"step": 2300
},
{
"epoch": 1.6448532012734347,
"grad_norm": 2.290949583053589,
"learning_rate": 5.944444444444445e-06,
"loss": 0.1655,
"step": 2325
},
{
"epoch": 1.6625397948355147,
"grad_norm": 1.9502941370010376,
"learning_rate": 5.88888888888889e-06,
"loss": 0.1462,
"step": 2350
},
{
"epoch": 1.6802263883975948,
"grad_norm": 1.9741941690444946,
"learning_rate": 5.833333333333334e-06,
"loss": 0.1636,
"step": 2375
},
{
"epoch": 1.6979129819596745,
"grad_norm": 1.9639830589294434,
"learning_rate": 5.777777777777778e-06,
"loss": 0.155,
"step": 2400
},
{
"epoch": 1.7155995755217543,
"grad_norm": 1.6349612474441528,
"learning_rate": 5.722222222222222e-06,
"loss": 0.1659,
"step": 2425
},
{
"epoch": 1.7332861690838346,
"grad_norm": 1.8674352169036865,
"learning_rate": 5.666666666666667e-06,
"loss": 0.152,
"step": 2450
},
{
"epoch": 1.7509727626459144,
"grad_norm": 2.1415135860443115,
"learning_rate": 5.611111111111112e-06,
"loss": 0.1699,
"step": 2475
},
{
"epoch": 1.7686593562079942,
"grad_norm": 1.5318387746810913,
"learning_rate": 5.555555555555557e-06,
"loss": 0.1592,
"step": 2500
},
{
"epoch": 1.7863459497700744,
"grad_norm": 1.758480191230774,
"learning_rate": 5.500000000000001e-06,
"loss": 0.1537,
"step": 2525
},
{
"epoch": 1.8040325433321542,
"grad_norm": 1.815434455871582,
"learning_rate": 5.444444444444445e-06,
"loss": 0.1533,
"step": 2550
},
{
"epoch": 1.821719136894234,
"grad_norm": 1.5665532350540161,
"learning_rate": 5.388888888888889e-06,
"loss": 0.1601,
"step": 2575
},
{
"epoch": 1.839405730456314,
"grad_norm": 2.0574448108673096,
"learning_rate": 5.333333333333334e-06,
"loss": 0.1548,
"step": 2600
},
{
"epoch": 1.857092324018394,
"grad_norm": 1.7183318138122559,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.1477,
"step": 2625
},
{
"epoch": 1.8747789175804739,
"grad_norm": 1.7662074565887451,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.1486,
"step": 2650
},
{
"epoch": 1.892465511142554,
"grad_norm": 1.840253472328186,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.1516,
"step": 2675
},
{
"epoch": 1.910152104704634,
"grad_norm": 1.9791252613067627,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.1431,
"step": 2700
},
{
"epoch": 1.9278386982667137,
"grad_norm": 1.5719287395477295,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.1539,
"step": 2725
},
{
"epoch": 1.9455252918287937,
"grad_norm": 2.328838348388672,
"learning_rate": 5e-06,
"loss": 0.1502,
"step": 2750
},
{
"epoch": 1.9632118853908738,
"grad_norm": 1.7131285667419434,
"learning_rate": 4.944444444444445e-06,
"loss": 0.1469,
"step": 2775
},
{
"epoch": 1.9808984789529536,
"grad_norm": 1.751104712486267,
"learning_rate": 4.888888888888889e-06,
"loss": 0.151,
"step": 2800
},
{
"epoch": 1.9985850725150336,
"grad_norm": 1.8015530109405518,
"learning_rate": 4.833333333333333e-06,
"loss": 0.1446,
"step": 2825
},
{
"epoch": 2.0162716660771136,
"grad_norm": 1.4693965911865234,
"learning_rate": 4.777777777777778e-06,
"loss": 0.0913,
"step": 2850
},
{
"epoch": 2.0339582596391934,
"grad_norm": 1.5922882556915283,
"learning_rate": 4.722222222222222e-06,
"loss": 0.0824,
"step": 2875
},
{
"epoch": 2.0516448532012737,
"grad_norm": 1.0818850994110107,
"learning_rate": 4.666666666666667e-06,
"loss": 0.08,
"step": 2900
},
{
"epoch": 2.0693314467633535,
"grad_norm": 1.5331037044525146,
"learning_rate": 4.611111111111112e-06,
"loss": 0.0761,
"step": 2925
},
{
"epoch": 2.0870180403254333,
"grad_norm": 1.4420839548110962,
"learning_rate": 4.555555555555556e-06,
"loss": 0.072,
"step": 2950
},
{
"epoch": 2.104704633887513,
"grad_norm": 1.4210786819458008,
"learning_rate": 4.5e-06,
"loss": 0.073,
"step": 2975
},
{
"epoch": 2.1223912274495933,
"grad_norm": 1.2918660640716553,
"learning_rate": 4.444444444444444e-06,
"loss": 0.0714,
"step": 3000
},
{
"epoch": 2.1223912274495933,
"eval_loss": 0.27894869446754456,
"eval_runtime": 4698.3805,
"eval_samples_per_second": 2.291,
"eval_steps_per_second": 0.143,
"eval_wer": 0.18066963936478134,
"step": 3000
},
{
"epoch": 2.140077821011673,
"grad_norm": 1.2981915473937988,
"learning_rate": 4.388888888888889e-06,
"loss": 0.0705,
"step": 3025
},
{
"epoch": 2.157764414573753,
"grad_norm": 1.5843642950057983,
"learning_rate": 4.333333333333334e-06,
"loss": 0.0801,
"step": 3050
},
{
"epoch": 2.175451008135833,
"grad_norm": 1.4696415662765503,
"learning_rate": 4.277777777777778e-06,
"loss": 0.0828,
"step": 3075
},
{
"epoch": 2.193137601697913,
"grad_norm": 1.4634337425231934,
"learning_rate": 4.222222222222223e-06,
"loss": 0.0744,
"step": 3100
},
{
"epoch": 2.2108241952599927,
"grad_norm": 1.241773247718811,
"learning_rate": 4.166666666666667e-06,
"loss": 0.0735,
"step": 3125
},
{
"epoch": 2.228510788822073,
"grad_norm": 1.7622851133346558,
"learning_rate": 4.111111111111111e-06,
"loss": 0.0755,
"step": 3150
},
{
"epoch": 2.246197382384153,
"grad_norm": 1.7847949266433716,
"learning_rate": 4.055555555555556e-06,
"loss": 0.0697,
"step": 3175
},
{
"epoch": 2.2638839759462326,
"grad_norm": 1.7188225984573364,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0783,
"step": 3200
},
{
"epoch": 2.281570569508313,
"grad_norm": 1.4745049476623535,
"learning_rate": 3.944444444444445e-06,
"loss": 0.0782,
"step": 3225
},
{
"epoch": 2.2992571630703926,
"grad_norm": 1.488161563873291,
"learning_rate": 3.88888888888889e-06,
"loss": 0.081,
"step": 3250
},
{
"epoch": 2.3169437566324724,
"grad_norm": 1.4803715944290161,
"learning_rate": 3.833333333333334e-06,
"loss": 0.0734,
"step": 3275
},
{
"epoch": 2.3346303501945527,
"grad_norm": 1.7518547773361206,
"learning_rate": 3.777777777777778e-06,
"loss": 0.0791,
"step": 3300
},
{
"epoch": 2.3523169437566325,
"grad_norm": 1.9586306810379028,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.0814,
"step": 3325
},
{
"epoch": 2.3700035373187123,
"grad_norm": 1.8565925359725952,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.0728,
"step": 3350
},
{
"epoch": 2.3876901308807925,
"grad_norm": 1.5781797170639038,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.0826,
"step": 3375
},
{
"epoch": 2.4053767244428723,
"grad_norm": 1.5568901300430298,
"learning_rate": 3.555555555555556e-06,
"loss": 0.0802,
"step": 3400
},
{
"epoch": 2.423063318004952,
"grad_norm": 1.2662155628204346,
"learning_rate": 3.5e-06,
"loss": 0.074,
"step": 3425
},
{
"epoch": 2.4407499115670324,
"grad_norm": 1.559577226638794,
"learning_rate": 3.444444444444445e-06,
"loss": 0.0758,
"step": 3450
},
{
"epoch": 2.458436505129112,
"grad_norm": 1.5125735998153687,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.0756,
"step": 3475
},
{
"epoch": 2.476123098691192,
"grad_norm": 1.4088867902755737,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.0713,
"step": 3500
},
{
"epoch": 2.493809692253272,
"grad_norm": 1.6018534898757935,
"learning_rate": 3.277777777777778e-06,
"loss": 0.0656,
"step": 3525
},
{
"epoch": 2.511496285815352,
"grad_norm": 1.5624502897262573,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.0802,
"step": 3550
},
{
"epoch": 2.529182879377432,
"grad_norm": 1.4694902896881104,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.0713,
"step": 3575
},
{
"epoch": 2.5468694729395116,
"grad_norm": 1.6186368465423584,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.0864,
"step": 3600
},
{
"epoch": 2.564556066501592,
"grad_norm": 1.6998704671859741,
"learning_rate": 3.055555555555556e-06,
"loss": 0.0721,
"step": 3625
},
{
"epoch": 2.5822426600636716,
"grad_norm": 1.7324111461639404,
"learning_rate": 3e-06,
"loss": 0.0747,
"step": 3650
},
{
"epoch": 2.599929253625752,
"grad_norm": 1.2067221403121948,
"learning_rate": 2.944444444444445e-06,
"loss": 0.0738,
"step": 3675
},
{
"epoch": 2.6176158471878317,
"grad_norm": 1.8782434463500977,
"learning_rate": 2.888888888888889e-06,
"loss": 0.0699,
"step": 3700
},
{
"epoch": 2.6353024407499115,
"grad_norm": 1.240626335144043,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.0747,
"step": 3725
},
{
"epoch": 2.6529890343119913,
"grad_norm": 1.2365247011184692,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.0734,
"step": 3750
},
{
"epoch": 2.6706756278740715,
"grad_norm": 1.5169546604156494,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.0734,
"step": 3775
},
{
"epoch": 2.6883622214361513,
"grad_norm": 1.3478069305419922,
"learning_rate": 2.666666666666667e-06,
"loss": 0.0757,
"step": 3800
},
{
"epoch": 2.7060488149982316,
"grad_norm": 1.938991665840149,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.0774,
"step": 3825
},
{
"epoch": 2.7237354085603114,
"grad_norm": 1.7492825984954834,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.0679,
"step": 3850
},
{
"epoch": 2.741422002122391,
"grad_norm": 1.7618768215179443,
"learning_rate": 2.5e-06,
"loss": 0.0702,
"step": 3875
},
{
"epoch": 2.759108595684471,
"grad_norm": 1.836369276046753,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.0707,
"step": 3900
},
{
"epoch": 2.776795189246551,
"grad_norm": 1.0155326128005981,
"learning_rate": 2.388888888888889e-06,
"loss": 0.0692,
"step": 3925
},
{
"epoch": 2.794481782808631,
"grad_norm": 1.5357517004013062,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.064,
"step": 3950
},
{
"epoch": 2.8121683763707113,
"grad_norm": 1.413317322731018,
"learning_rate": 2.277777777777778e-06,
"loss": 0.0761,
"step": 3975
},
{
"epoch": 2.829854969932791,
"grad_norm": 1.81116783618927,
"learning_rate": 2.222222222222222e-06,
"loss": 0.0772,
"step": 4000
},
{
"epoch": 2.829854969932791,
"eval_loss": 0.27590474486351013,
"eval_runtime": 4660.1057,
"eval_samples_per_second": 2.31,
"eval_steps_per_second": 0.144,
"eval_wer": 0.18098255495580068,
"step": 4000
},
{
"epoch": 2.847541563494871,
"grad_norm": 1.819291591644287,
"learning_rate": 2.166666666666667e-06,
"loss": 0.0739,
"step": 4025
},
{
"epoch": 2.8652281570569507,
"grad_norm": 1.4449914693832397,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.0672,
"step": 4050
},
{
"epoch": 2.882914750619031,
"grad_norm": 1.617101788520813,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.072,
"step": 4075
},
{
"epoch": 2.9006013441811107,
"grad_norm": 1.7751729488372803,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.075,
"step": 4100
},
{
"epoch": 2.9182879377431905,
"grad_norm": 1.939385175704956,
"learning_rate": 1.944444444444445e-06,
"loss": 0.0746,
"step": 4125
},
{
"epoch": 2.9359745313052708,
"grad_norm": 1.2767341136932373,
"learning_rate": 1.888888888888889e-06,
"loss": 0.065,
"step": 4150
},
{
"epoch": 2.9536611248673506,
"grad_norm": 1.384352445602417,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.0729,
"step": 4175
},
{
"epoch": 2.9713477184294304,
"grad_norm": 1.707626223564148,
"learning_rate": 1.777777777777778e-06,
"loss": 0.0699,
"step": 4200
},
{
"epoch": 2.9890343119915106,
"grad_norm": 1.5926012992858887,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.0653,
"step": 4225
},
{
"epoch": 3.0067209055535904,
"grad_norm": 0.7629415988922119,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.0602,
"step": 4250
},
{
"epoch": 3.02440749911567,
"grad_norm": 1.042358636856079,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.0343,
"step": 4275
},
{
"epoch": 3.0420940926777504,
"grad_norm": 1.4969948530197144,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.0375,
"step": 4300
},
{
"epoch": 3.0597806862398302,
"grad_norm": 2.0350911617279053,
"learning_rate": 1.5e-06,
"loss": 0.0343,
"step": 4325
},
{
"epoch": 3.07746727980191,
"grad_norm": 1.1933727264404297,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.0419,
"step": 4350
},
{
"epoch": 3.0951538733639903,
"grad_norm": 1.2081432342529297,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.0349,
"step": 4375
},
{
"epoch": 3.11284046692607,
"grad_norm": 1.3141449689865112,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.0357,
"step": 4400
},
{
"epoch": 3.13052706048815,
"grad_norm": 1.156983494758606,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.0313,
"step": 4425
},
{
"epoch": 3.14821365405023,
"grad_norm": 0.8983932137489319,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.0324,
"step": 4450
},
{
"epoch": 3.16590024761231,
"grad_norm": 0.7461453676223755,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.0414,
"step": 4475
},
{
"epoch": 3.1835868411743897,
"grad_norm": 1.092787265777588,
"learning_rate": 1.111111111111111e-06,
"loss": 0.0363,
"step": 4500
},
{
"epoch": 3.2012734347364695,
"grad_norm": 1.2066584825515747,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.0361,
"step": 4525
},
{
"epoch": 3.2189600282985498,
"grad_norm": 0.7660400867462158,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.0338,
"step": 4550
},
{
"epoch": 3.2366466218606296,
"grad_norm": 0.8779363632202148,
"learning_rate": 9.444444444444445e-07,
"loss": 0.0416,
"step": 4575
},
{
"epoch": 3.25433321542271,
"grad_norm": 1.0976966619491577,
"learning_rate": 8.88888888888889e-07,
"loss": 0.0381,
"step": 4600
},
{
"epoch": 3.2720198089847896,
"grad_norm": 0.977162778377533,
"learning_rate": 8.333333333333333e-07,
"loss": 0.0318,
"step": 4625
},
{
"epoch": 3.2897064025468694,
"grad_norm": 1.5296571254730225,
"learning_rate": 7.777777777777779e-07,
"loss": 0.037,
"step": 4650
},
{
"epoch": 3.307392996108949,
"grad_norm": 1.0584012269973755,
"learning_rate": 7.222222222222222e-07,
"loss": 0.0369,
"step": 4675
},
{
"epoch": 3.3250795896710295,
"grad_norm": 0.8479712009429932,
"learning_rate": 6.666666666666667e-07,
"loss": 0.0343,
"step": 4700
},
{
"epoch": 3.3427661832331093,
"grad_norm": 1.176857352256775,
"learning_rate": 6.111111111111112e-07,
"loss": 0.0349,
"step": 4725
},
{
"epoch": 3.360452776795189,
"grad_norm": 1.1220146417617798,
"learning_rate": 5.555555555555555e-07,
"loss": 0.0307,
"step": 4750
},
{
"epoch": 3.3781393703572693,
"grad_norm": 0.9692522883415222,
"learning_rate": 5.000000000000001e-07,
"loss": 0.0345,
"step": 4775
},
{
"epoch": 3.395825963919349,
"grad_norm": 0.8425891995429993,
"learning_rate": 4.444444444444445e-07,
"loss": 0.0373,
"step": 4800
},
{
"epoch": 3.413512557481429,
"grad_norm": 1.2310644388198853,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.032,
"step": 4825
},
{
"epoch": 3.431199151043509,
"grad_norm": 1.2488874197006226,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.033,
"step": 4850
},
{
"epoch": 3.448885744605589,
"grad_norm": 0.8309725522994995,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.035,
"step": 4875
},
{
"epoch": 3.4665723381676687,
"grad_norm": 0.8922958970069885,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.0306,
"step": 4900
},
{
"epoch": 3.484258931729749,
"grad_norm": 0.7373726963996887,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.0324,
"step": 4925
},
{
"epoch": 3.501945525291829,
"grad_norm": 1.3404922485351562,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.0362,
"step": 4950
},
{
"epoch": 3.5196321188539086,
"grad_norm": 1.187138319015503,
"learning_rate": 5.555555555555556e-08,
"loss": 0.0373,
"step": 4975
},
{
"epoch": 3.537318712415989,
"grad_norm": 0.8413584232330322,
"learning_rate": 0.0,
"loss": 0.0337,
"step": 5000
},
{
"epoch": 3.537318712415989,
"eval_loss": 0.3035752773284912,
"eval_runtime": 4623.5314,
"eval_samples_per_second": 2.328,
"eval_steps_per_second": 0.146,
"eval_wer": 0.18264491903309082,
"step": 5000
},
{
"epoch": 3.537318712415989,
"step": 5000,
"total_flos": 5.435079965953229e+20,
"train_loss": 0.17054095619916915,
"train_runtime": 54364.2112,
"train_samples_per_second": 2.943,
"train_steps_per_second": 0.092
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.435079965953229e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}