agentlans's picture
Upload 12 files
b676a2a verified
{
"best_global_step": 40000,
"best_metric": 0.198301300406456,
"best_model_checkpoint": "/media/user/Expansion1/multilingual-e5-small-aligned-v2-fineweb2hq-vs-c4-classifier-run2/checkpoint-40000",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 120000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0125,
"grad_norm": 5.874658584594727,
"learning_rate": 4.979208333333333e-05,
"loss": 0.3436,
"num_input_tokens_seen": 512000,
"step": 500
},
{
"epoch": 0.025,
"grad_norm": 0.6375065445899963,
"learning_rate": 4.958375e-05,
"loss": 0.281,
"num_input_tokens_seen": 1024000,
"step": 1000
},
{
"epoch": 0.0375,
"grad_norm": 0.48116546869277954,
"learning_rate": 4.937541666666667e-05,
"loss": 0.2595,
"num_input_tokens_seen": 1536000,
"step": 1500
},
{
"epoch": 0.05,
"grad_norm": 0.6305193305015564,
"learning_rate": 4.9167083333333336e-05,
"loss": 0.2466,
"num_input_tokens_seen": 2048000,
"step": 2000
},
{
"epoch": 0.0625,
"grad_norm": 3.560100555419922,
"learning_rate": 4.8958750000000006e-05,
"loss": 0.2283,
"num_input_tokens_seen": 2560000,
"step": 2500
},
{
"epoch": 0.075,
"grad_norm": 43.007347106933594,
"learning_rate": 4.875041666666667e-05,
"loss": 0.2106,
"num_input_tokens_seen": 3072000,
"step": 3000
},
{
"epoch": 0.0875,
"grad_norm": 27.75348472595215,
"learning_rate": 4.854208333333334e-05,
"loss": 0.2198,
"num_input_tokens_seen": 3584000,
"step": 3500
},
{
"epoch": 0.1,
"grad_norm": 0.11089582741260529,
"learning_rate": 4.833375e-05,
"loss": 0.2145,
"num_input_tokens_seen": 4096000,
"step": 4000
},
{
"epoch": 0.1125,
"grad_norm": 0.08869020640850067,
"learning_rate": 4.812541666666667e-05,
"loss": 0.2033,
"num_input_tokens_seen": 4608000,
"step": 4500
},
{
"epoch": 0.125,
"grad_norm": 22.97330665588379,
"learning_rate": 4.791708333333333e-05,
"loss": 0.2007,
"num_input_tokens_seen": 5120000,
"step": 5000
},
{
"epoch": 0.1375,
"grad_norm": 0.12722039222717285,
"learning_rate": 4.770875e-05,
"loss": 0.1893,
"num_input_tokens_seen": 5632000,
"step": 5500
},
{
"epoch": 0.15,
"grad_norm": 24.923276901245117,
"learning_rate": 4.750041666666667e-05,
"loss": 0.2073,
"num_input_tokens_seen": 6144000,
"step": 6000
},
{
"epoch": 0.1625,
"grad_norm": 0.17447435855865479,
"learning_rate": 4.729208333333334e-05,
"loss": 0.2024,
"num_input_tokens_seen": 6656000,
"step": 6500
},
{
"epoch": 0.175,
"grad_norm": 5.2057671546936035,
"learning_rate": 4.708375e-05,
"loss": 0.2168,
"num_input_tokens_seen": 7168000,
"step": 7000
},
{
"epoch": 0.1875,
"grad_norm": 0.35154542326927185,
"learning_rate": 4.687541666666667e-05,
"loss": 0.2002,
"num_input_tokens_seen": 7680000,
"step": 7500
},
{
"epoch": 0.2,
"grad_norm": 0.17855411767959595,
"learning_rate": 4.6667083333333336e-05,
"loss": 0.1863,
"num_input_tokens_seen": 8192000,
"step": 8000
},
{
"epoch": 0.2125,
"grad_norm": 0.26302626729011536,
"learning_rate": 4.645875e-05,
"loss": 0.2023,
"num_input_tokens_seen": 8704000,
"step": 8500
},
{
"epoch": 0.225,
"grad_norm": 1.0355998277664185,
"learning_rate": 4.625041666666667e-05,
"loss": 0.1996,
"num_input_tokens_seen": 9216000,
"step": 9000
},
{
"epoch": 0.2375,
"grad_norm": 3.5437514781951904,
"learning_rate": 4.6042083333333334e-05,
"loss": 0.1928,
"num_input_tokens_seen": 9728000,
"step": 9500
},
{
"epoch": 0.25,
"grad_norm": 18.394441604614258,
"learning_rate": 4.5833750000000005e-05,
"loss": 0.1968,
"num_input_tokens_seen": 10240000,
"step": 10000
},
{
"epoch": 0.2625,
"grad_norm": 2.9275922775268555,
"learning_rate": 4.562541666666667e-05,
"loss": 0.2081,
"num_input_tokens_seen": 10752000,
"step": 10500
},
{
"epoch": 0.275,
"grad_norm": 3.922402858734131,
"learning_rate": 4.541708333333334e-05,
"loss": 0.1769,
"num_input_tokens_seen": 11264000,
"step": 11000
},
{
"epoch": 0.2875,
"grad_norm": 0.25219565629959106,
"learning_rate": 4.5208749999999996e-05,
"loss": 0.1722,
"num_input_tokens_seen": 11776000,
"step": 11500
},
{
"epoch": 0.3,
"grad_norm": 0.10917416214942932,
"learning_rate": 4.500041666666667e-05,
"loss": 0.1638,
"num_input_tokens_seen": 12288000,
"step": 12000
},
{
"epoch": 0.3125,
"grad_norm": 0.1928466558456421,
"learning_rate": 4.479208333333334e-05,
"loss": 0.1826,
"num_input_tokens_seen": 12800000,
"step": 12500
},
{
"epoch": 0.325,
"grad_norm": 1.1688095331192017,
"learning_rate": 4.458375e-05,
"loss": 0.186,
"num_input_tokens_seen": 13312000,
"step": 13000
},
{
"epoch": 0.3375,
"grad_norm": 0.07879551500082016,
"learning_rate": 4.437541666666667e-05,
"loss": 0.1638,
"num_input_tokens_seen": 13824000,
"step": 13500
},
{
"epoch": 0.35,
"grad_norm": 1.294481635093689,
"learning_rate": 4.4167083333333336e-05,
"loss": 0.1695,
"num_input_tokens_seen": 14336000,
"step": 14000
},
{
"epoch": 0.3625,
"grad_norm": 21.836345672607422,
"learning_rate": 4.395875000000001e-05,
"loss": 0.1713,
"num_input_tokens_seen": 14848000,
"step": 14500
},
{
"epoch": 0.375,
"grad_norm": 0.1329026073217392,
"learning_rate": 4.375041666666667e-05,
"loss": 0.178,
"num_input_tokens_seen": 15360000,
"step": 15000
},
{
"epoch": 0.3875,
"grad_norm": 2.196962594985962,
"learning_rate": 4.3542083333333334e-05,
"loss": 0.1616,
"num_input_tokens_seen": 15872000,
"step": 15500
},
{
"epoch": 0.4,
"grad_norm": 0.06707575172185898,
"learning_rate": 4.333375e-05,
"loss": 0.1563,
"num_input_tokens_seen": 16384000,
"step": 16000
},
{
"epoch": 0.4125,
"grad_norm": 4.03335428237915,
"learning_rate": 4.312541666666667e-05,
"loss": 0.1644,
"num_input_tokens_seen": 16896000,
"step": 16500
},
{
"epoch": 0.425,
"grad_norm": 0.7723323702812195,
"learning_rate": 4.291708333333333e-05,
"loss": 0.1695,
"num_input_tokens_seen": 17408000,
"step": 17000
},
{
"epoch": 0.4375,
"grad_norm": 0.14005227386951447,
"learning_rate": 4.270875e-05,
"loss": 0.1771,
"num_input_tokens_seen": 17920000,
"step": 17500
},
{
"epoch": 0.45,
"grad_norm": 0.22879253327846527,
"learning_rate": 4.250041666666667e-05,
"loss": 0.1652,
"num_input_tokens_seen": 18432000,
"step": 18000
},
{
"epoch": 0.4625,
"grad_norm": 41.407352447509766,
"learning_rate": 4.229208333333334e-05,
"loss": 0.1697,
"num_input_tokens_seen": 18944000,
"step": 18500
},
{
"epoch": 0.475,
"grad_norm": 0.2552178204059601,
"learning_rate": 4.208375e-05,
"loss": 0.169,
"num_input_tokens_seen": 19456000,
"step": 19000
},
{
"epoch": 0.4875,
"grad_norm": 4.208723545074463,
"learning_rate": 4.1875416666666666e-05,
"loss": 0.1887,
"num_input_tokens_seen": 19968000,
"step": 19500
},
{
"epoch": 0.5,
"grad_norm": 0.6019179821014404,
"learning_rate": 4.1667083333333336e-05,
"loss": 0.149,
"num_input_tokens_seen": 20480000,
"step": 20000
},
{
"epoch": 0.5125,
"grad_norm": 11.652756690979004,
"learning_rate": 4.145875e-05,
"loss": 0.1485,
"num_input_tokens_seen": 20992000,
"step": 20500
},
{
"epoch": 0.525,
"grad_norm": 3.790767192840576,
"learning_rate": 4.125041666666667e-05,
"loss": 0.1613,
"num_input_tokens_seen": 21504000,
"step": 21000
},
{
"epoch": 0.5375,
"grad_norm": 0.0643579512834549,
"learning_rate": 4.1042083333333335e-05,
"loss": 0.156,
"num_input_tokens_seen": 22016000,
"step": 21500
},
{
"epoch": 0.55,
"grad_norm": 6.347803115844727,
"learning_rate": 4.0833750000000005e-05,
"loss": 0.1743,
"num_input_tokens_seen": 22528000,
"step": 22000
},
{
"epoch": 0.5625,
"grad_norm": 0.45395129919052124,
"learning_rate": 4.062541666666667e-05,
"loss": 0.1566,
"num_input_tokens_seen": 23040000,
"step": 22500
},
{
"epoch": 0.575,
"grad_norm": 4.8587470054626465,
"learning_rate": 4.041708333333333e-05,
"loss": 0.156,
"num_input_tokens_seen": 23552000,
"step": 23000
},
{
"epoch": 0.5875,
"grad_norm": 0.8771836757659912,
"learning_rate": 4.0208750000000004e-05,
"loss": 0.1473,
"num_input_tokens_seen": 24064000,
"step": 23500
},
{
"epoch": 0.6,
"grad_norm": 7.632213592529297,
"learning_rate": 4.000041666666667e-05,
"loss": 0.1523,
"num_input_tokens_seen": 24576000,
"step": 24000
},
{
"epoch": 0.6125,
"grad_norm": 4.013466835021973,
"learning_rate": 3.979208333333334e-05,
"loss": 0.1646,
"num_input_tokens_seen": 25088000,
"step": 24500
},
{
"epoch": 0.625,
"grad_norm": 0.08471076935529709,
"learning_rate": 3.958375e-05,
"loss": 0.1653,
"num_input_tokens_seen": 25600000,
"step": 25000
},
{
"epoch": 0.6375,
"grad_norm": 4.263392448425293,
"learning_rate": 3.937541666666667e-05,
"loss": 0.1513,
"num_input_tokens_seen": 26112000,
"step": 25500
},
{
"epoch": 0.65,
"grad_norm": 17.002439498901367,
"learning_rate": 3.9167083333333336e-05,
"loss": 0.144,
"num_input_tokens_seen": 26624000,
"step": 26000
},
{
"epoch": 0.6625,
"grad_norm": 0.056964244693517685,
"learning_rate": 3.895875e-05,
"loss": 0.1517,
"num_input_tokens_seen": 27136000,
"step": 26500
},
{
"epoch": 0.675,
"grad_norm": 0.4403606653213501,
"learning_rate": 3.8750416666666664e-05,
"loss": 0.1501,
"num_input_tokens_seen": 27648000,
"step": 27000
},
{
"epoch": 0.6875,
"grad_norm": 0.11431742459535599,
"learning_rate": 3.8542083333333335e-05,
"loss": 0.1597,
"num_input_tokens_seen": 28160000,
"step": 27500
},
{
"epoch": 0.7,
"grad_norm": 7.810299396514893,
"learning_rate": 3.833375e-05,
"loss": 0.1437,
"num_input_tokens_seen": 28672000,
"step": 28000
},
{
"epoch": 0.7125,
"grad_norm": 5.501016616821289,
"learning_rate": 3.812541666666667e-05,
"loss": 0.1415,
"num_input_tokens_seen": 29184000,
"step": 28500
},
{
"epoch": 0.725,
"grad_norm": 0.20471607148647308,
"learning_rate": 3.791708333333333e-05,
"loss": 0.1545,
"num_input_tokens_seen": 29696000,
"step": 29000
},
{
"epoch": 0.7375,
"grad_norm": 4.6759748458862305,
"learning_rate": 3.7708750000000004e-05,
"loss": 0.1431,
"num_input_tokens_seen": 30208000,
"step": 29500
},
{
"epoch": 0.75,
"grad_norm": 0.12430200725793839,
"learning_rate": 3.7500416666666674e-05,
"loss": 0.1337,
"num_input_tokens_seen": 30720000,
"step": 30000
},
{
"epoch": 0.7625,
"grad_norm": 0.19309231638908386,
"learning_rate": 3.729208333333333e-05,
"loss": 0.1338,
"num_input_tokens_seen": 31232000,
"step": 30500
},
{
"epoch": 0.775,
"grad_norm": 0.18272100389003754,
"learning_rate": 3.708375e-05,
"loss": 0.1504,
"num_input_tokens_seen": 31744000,
"step": 31000
},
{
"epoch": 0.7875,
"grad_norm": 0.08077079057693481,
"learning_rate": 3.6875416666666666e-05,
"loss": 0.13,
"num_input_tokens_seen": 32256000,
"step": 31500
},
{
"epoch": 0.8,
"grad_norm": 0.3213728368282318,
"learning_rate": 3.666708333333334e-05,
"loss": 0.1406,
"num_input_tokens_seen": 32768000,
"step": 32000
},
{
"epoch": 0.8125,
"grad_norm": 2.6233737468719482,
"learning_rate": 3.645875e-05,
"loss": 0.1435,
"num_input_tokens_seen": 33280000,
"step": 32500
},
{
"epoch": 0.825,
"grad_norm": 0.1083766371011734,
"learning_rate": 3.625041666666667e-05,
"loss": 0.1361,
"num_input_tokens_seen": 33792000,
"step": 33000
},
{
"epoch": 0.8375,
"grad_norm": 0.08282948285341263,
"learning_rate": 3.6042083333333335e-05,
"loss": 0.1483,
"num_input_tokens_seen": 34304000,
"step": 33500
},
{
"epoch": 0.85,
"grad_norm": 0.10612857341766357,
"learning_rate": 3.583375e-05,
"loss": 0.1419,
"num_input_tokens_seen": 34816000,
"step": 34000
},
{
"epoch": 0.8625,
"grad_norm": 1.134366750717163,
"learning_rate": 3.562541666666667e-05,
"loss": 0.1381,
"num_input_tokens_seen": 35328000,
"step": 34500
},
{
"epoch": 0.875,
"grad_norm": 32.71923065185547,
"learning_rate": 3.5417083333333333e-05,
"loss": 0.1395,
"num_input_tokens_seen": 35840000,
"step": 35000
},
{
"epoch": 0.8875,
"grad_norm": 0.14211738109588623,
"learning_rate": 3.5208750000000004e-05,
"loss": 0.1543,
"num_input_tokens_seen": 36352000,
"step": 35500
},
{
"epoch": 0.9,
"grad_norm": 0.04908756539225578,
"learning_rate": 3.500041666666667e-05,
"loss": 0.1229,
"num_input_tokens_seen": 36864000,
"step": 36000
},
{
"epoch": 0.9125,
"grad_norm": 0.47926560044288635,
"learning_rate": 3.479208333333334e-05,
"loss": 0.1251,
"num_input_tokens_seen": 37376000,
"step": 36500
},
{
"epoch": 0.925,
"grad_norm": 1.389073371887207,
"learning_rate": 3.458375e-05,
"loss": 0.1718,
"num_input_tokens_seen": 37888000,
"step": 37000
},
{
"epoch": 0.9375,
"grad_norm": 0.14962467551231384,
"learning_rate": 3.437541666666667e-05,
"loss": 0.1579,
"num_input_tokens_seen": 38400000,
"step": 37500
},
{
"epoch": 0.95,
"grad_norm": 3.8970487117767334,
"learning_rate": 3.416708333333333e-05,
"loss": 0.1294,
"num_input_tokens_seen": 38912000,
"step": 38000
},
{
"epoch": 0.9625,
"grad_norm": 0.1765402853488922,
"learning_rate": 3.395875e-05,
"loss": 0.1612,
"num_input_tokens_seen": 39424000,
"step": 38500
},
{
"epoch": 0.975,
"grad_norm": 0.1407538652420044,
"learning_rate": 3.3750416666666665e-05,
"loss": 0.1336,
"num_input_tokens_seen": 39936000,
"step": 39000
},
{
"epoch": 0.9875,
"grad_norm": 8.721837997436523,
"learning_rate": 3.3542083333333335e-05,
"loss": 0.1268,
"num_input_tokens_seen": 40448000,
"step": 39500
},
{
"epoch": 1.0,
"grad_norm": 0.15711814165115356,
"learning_rate": 3.333375e-05,
"loss": 0.1387,
"num_input_tokens_seen": 40960000,
"step": 40000
},
{
"epoch": 1.0,
"eval_accuracy": 0.951525,
"eval_combined_score": 1.3493705490187269,
"eval_loss": 0.198301300406456,
"eval_runtime": 39.36,
"eval_samples_per_second": 2032.519,
"eval_steps_per_second": 254.065,
"num_input_tokens_seen": 40960000,
"step": 40000
},
{
"epoch": 1.0125,
"grad_norm": 0.02203565090894699,
"learning_rate": 3.312541666666667e-05,
"loss": 0.1092,
"num_input_tokens_seen": 41472000,
"step": 40500
},
{
"epoch": 1.025,
"grad_norm": 328.5529479980469,
"learning_rate": 3.291708333333334e-05,
"loss": 0.0844,
"num_input_tokens_seen": 41984000,
"step": 41000
},
{
"epoch": 1.0375,
"grad_norm": 0.11391662806272507,
"learning_rate": 3.270875e-05,
"loss": 0.1115,
"num_input_tokens_seen": 42496000,
"step": 41500
},
{
"epoch": 1.05,
"grad_norm": 0.07919144630432129,
"learning_rate": 3.250041666666667e-05,
"loss": 0.0878,
"num_input_tokens_seen": 43008000,
"step": 42000
},
{
"epoch": 1.0625,
"grad_norm": 0.029733452945947647,
"learning_rate": 3.229208333333333e-05,
"loss": 0.0914,
"num_input_tokens_seen": 43520000,
"step": 42500
},
{
"epoch": 1.075,
"grad_norm": 0.277174711227417,
"learning_rate": 3.208375e-05,
"loss": 0.1028,
"num_input_tokens_seen": 44032000,
"step": 43000
},
{
"epoch": 1.0875,
"grad_norm": 0.11592718958854675,
"learning_rate": 3.1875416666666666e-05,
"loss": 0.1032,
"num_input_tokens_seen": 44544000,
"step": 43500
},
{
"epoch": 1.1,
"grad_norm": 0.2565874755382538,
"learning_rate": 3.166708333333334e-05,
"loss": 0.0912,
"num_input_tokens_seen": 45056000,
"step": 44000
},
{
"epoch": 1.1125,
"grad_norm": 0.03557795658707619,
"learning_rate": 3.145875e-05,
"loss": 0.0882,
"num_input_tokens_seen": 45568000,
"step": 44500
},
{
"epoch": 1.125,
"grad_norm": 4.72824764251709,
"learning_rate": 3.125041666666667e-05,
"loss": 0.0918,
"num_input_tokens_seen": 46080000,
"step": 45000
},
{
"epoch": 1.1375,
"grad_norm": 0.10075237601995468,
"learning_rate": 3.1042083333333335e-05,
"loss": 0.1117,
"num_input_tokens_seen": 46592000,
"step": 45500
},
{
"epoch": 1.15,
"grad_norm": 0.07608671486377716,
"learning_rate": 3.083375e-05,
"loss": 0.1383,
"num_input_tokens_seen": 47104000,
"step": 46000
},
{
"epoch": 1.1625,
"grad_norm": 14.187911033630371,
"learning_rate": 3.062541666666667e-05,
"loss": 0.1062,
"num_input_tokens_seen": 47616000,
"step": 46500
},
{
"epoch": 1.175,
"grad_norm": 0.0889461487531662,
"learning_rate": 3.0417083333333334e-05,
"loss": 0.1255,
"num_input_tokens_seen": 48128000,
"step": 47000
},
{
"epoch": 1.1875,
"grad_norm": 0.10722101479768753,
"learning_rate": 3.020875e-05,
"loss": 0.0987,
"num_input_tokens_seen": 48640000,
"step": 47500
},
{
"epoch": 1.2,
"grad_norm": 42.01722717285156,
"learning_rate": 3.000041666666667e-05,
"loss": 0.1047,
"num_input_tokens_seen": 49152000,
"step": 48000
},
{
"epoch": 1.2125,
"grad_norm": 0.08817047625780106,
"learning_rate": 2.9792083333333336e-05,
"loss": 0.0931,
"num_input_tokens_seen": 49664000,
"step": 48500
},
{
"epoch": 1.225,
"grad_norm": 0.058988332748413086,
"learning_rate": 2.958375e-05,
"loss": 0.1048,
"num_input_tokens_seen": 50176000,
"step": 49000
},
{
"epoch": 1.2375,
"grad_norm": 0.025500474497675896,
"learning_rate": 2.9375416666666667e-05,
"loss": 0.0845,
"num_input_tokens_seen": 50688000,
"step": 49500
},
{
"epoch": 1.25,
"grad_norm": 0.07621040940284729,
"learning_rate": 2.9167083333333334e-05,
"loss": 0.0893,
"num_input_tokens_seen": 51200000,
"step": 50000
},
{
"epoch": 1.2625,
"grad_norm": 0.14059029519557953,
"learning_rate": 2.895875e-05,
"loss": 0.0911,
"num_input_tokens_seen": 51712000,
"step": 50500
},
{
"epoch": 1.275,
"grad_norm": 0.04900716617703438,
"learning_rate": 2.875041666666667e-05,
"loss": 0.0984,
"num_input_tokens_seen": 52224000,
"step": 51000
},
{
"epoch": 1.2875,
"grad_norm": 0.0568259134888649,
"learning_rate": 2.8542083333333336e-05,
"loss": 0.0995,
"num_input_tokens_seen": 52736000,
"step": 51500
},
{
"epoch": 1.3,
"grad_norm": 0.052453652024269104,
"learning_rate": 2.8333750000000003e-05,
"loss": 0.0875,
"num_input_tokens_seen": 53248000,
"step": 52000
},
{
"epoch": 1.3125,
"grad_norm": 0.05766982212662697,
"learning_rate": 2.812541666666667e-05,
"loss": 0.0772,
"num_input_tokens_seen": 53760000,
"step": 52500
},
{
"epoch": 1.325,
"grad_norm": 0.06809753179550171,
"learning_rate": 2.7917083333333334e-05,
"loss": 0.0849,
"num_input_tokens_seen": 54272000,
"step": 53000
},
{
"epoch": 1.3375,
"grad_norm": 0.5180730223655701,
"learning_rate": 2.770875e-05,
"loss": 0.089,
"num_input_tokens_seen": 54784000,
"step": 53500
},
{
"epoch": 1.35,
"grad_norm": 12.439111709594727,
"learning_rate": 2.750041666666667e-05,
"loss": 0.0927,
"num_input_tokens_seen": 55296000,
"step": 54000
},
{
"epoch": 1.3625,
"grad_norm": 0.12473966181278229,
"learning_rate": 2.7292083333333336e-05,
"loss": 0.1109,
"num_input_tokens_seen": 55808000,
"step": 54500
},
{
"epoch": 1.375,
"grad_norm": 1.5988309383392334,
"learning_rate": 2.7083750000000003e-05,
"loss": 0.0962,
"num_input_tokens_seen": 56320000,
"step": 55000
},
{
"epoch": 1.3875,
"grad_norm": 0.09568974375724792,
"learning_rate": 2.687541666666667e-05,
"loss": 0.1041,
"num_input_tokens_seen": 56832000,
"step": 55500
},
{
"epoch": 1.4,
"grad_norm": 105.98346710205078,
"learning_rate": 2.6667083333333338e-05,
"loss": 0.0895,
"num_input_tokens_seen": 57344000,
"step": 56000
},
{
"epoch": 1.4125,
"grad_norm": 0.04284173250198364,
"learning_rate": 2.6458749999999998e-05,
"loss": 0.09,
"num_input_tokens_seen": 57856000,
"step": 56500
},
{
"epoch": 1.425,
"grad_norm": 0.06827585399150848,
"learning_rate": 2.6250416666666665e-05,
"loss": 0.1064,
"num_input_tokens_seen": 58368000,
"step": 57000
},
{
"epoch": 1.4375,
"grad_norm": 0.10431079566478729,
"learning_rate": 2.6042083333333333e-05,
"loss": 0.1033,
"num_input_tokens_seen": 58880000,
"step": 57500
},
{
"epoch": 1.45,
"grad_norm": 0.08796288073062897,
"learning_rate": 2.583375e-05,
"loss": 0.0781,
"num_input_tokens_seen": 59392000,
"step": 58000
},
{
"epoch": 1.4625,
"grad_norm": 1.883025884628296,
"learning_rate": 2.5625416666666667e-05,
"loss": 0.1001,
"num_input_tokens_seen": 59904000,
"step": 58500
},
{
"epoch": 1.475,
"grad_norm": 0.2901429533958435,
"learning_rate": 2.5417083333333334e-05,
"loss": 0.0965,
"num_input_tokens_seen": 60416000,
"step": 59000
},
{
"epoch": 1.4875,
"grad_norm": 0.05163797363638878,
"learning_rate": 2.520875e-05,
"loss": 0.1064,
"num_input_tokens_seen": 60928000,
"step": 59500
},
{
"epoch": 1.5,
"grad_norm": 0.03811231628060341,
"learning_rate": 2.5000416666666672e-05,
"loss": 0.0865,
"num_input_tokens_seen": 61440000,
"step": 60000
},
{
"epoch": 1.5125,
"grad_norm": 8.308381080627441,
"learning_rate": 2.4792083333333336e-05,
"loss": 0.093,
"num_input_tokens_seen": 61952000,
"step": 60500
},
{
"epoch": 1.525,
"grad_norm": 0.03729177638888359,
"learning_rate": 2.458375e-05,
"loss": 0.077,
"num_input_tokens_seen": 62464000,
"step": 61000
},
{
"epoch": 1.5375,
"grad_norm": 5.803088188171387,
"learning_rate": 2.4375416666666667e-05,
"loss": 0.101,
"num_input_tokens_seen": 62976000,
"step": 61500
},
{
"epoch": 1.55,
"grad_norm": 2.017042636871338,
"learning_rate": 2.4167083333333334e-05,
"loss": 0.0896,
"num_input_tokens_seen": 63488000,
"step": 62000
},
{
"epoch": 1.5625,
"grad_norm": 32.310630798339844,
"learning_rate": 2.395875e-05,
"loss": 0.1145,
"num_input_tokens_seen": 64000000,
"step": 62500
},
{
"epoch": 1.575,
"grad_norm": 0.37863266468048096,
"learning_rate": 2.3750416666666665e-05,
"loss": 0.1017,
"num_input_tokens_seen": 64512000,
"step": 63000
},
{
"epoch": 1.5875,
"grad_norm": 0.05939273163676262,
"learning_rate": 2.3542083333333333e-05,
"loss": 0.0962,
"num_input_tokens_seen": 65024000,
"step": 63500
},
{
"epoch": 1.6,
"grad_norm": 0.045398563146591187,
"learning_rate": 2.333375e-05,
"loss": 0.0895,
"num_input_tokens_seen": 65536000,
"step": 64000
},
{
"epoch": 1.6125,
"grad_norm": 1.5717942714691162,
"learning_rate": 2.3125416666666667e-05,
"loss": 0.0788,
"num_input_tokens_seen": 66048000,
"step": 64500
},
{
"epoch": 1.625,
"grad_norm": 6.278552532196045,
"learning_rate": 2.2917083333333334e-05,
"loss": 0.0876,
"num_input_tokens_seen": 66560000,
"step": 65000
},
{
"epoch": 1.6375,
"grad_norm": 0.048090457916259766,
"learning_rate": 2.2708750000000002e-05,
"loss": 0.0709,
"num_input_tokens_seen": 67072000,
"step": 65500
},
{
"epoch": 1.65,
"grad_norm": 0.5121225714683533,
"learning_rate": 2.250041666666667e-05,
"loss": 0.0906,
"num_input_tokens_seen": 67584000,
"step": 66000
},
{
"epoch": 1.6625,
"grad_norm": 0.04399065673351288,
"learning_rate": 2.2292083333333336e-05,
"loss": 0.097,
"num_input_tokens_seen": 68096000,
"step": 66500
},
{
"epoch": 1.675,
"grad_norm": 0.051211412996053696,
"learning_rate": 2.208375e-05,
"loss": 0.0931,
"num_input_tokens_seen": 68608000,
"step": 67000
},
{
"epoch": 1.6875,
"grad_norm": 58.19650650024414,
"learning_rate": 2.1875416666666667e-05,
"loss": 0.0794,
"num_input_tokens_seen": 69120000,
"step": 67500
},
{
"epoch": 1.7,
"grad_norm": 0.07303386926651001,
"learning_rate": 2.1667083333333335e-05,
"loss": 0.1015,
"num_input_tokens_seen": 69632000,
"step": 68000
},
{
"epoch": 1.7125,
"grad_norm": 0.02853875607252121,
"learning_rate": 2.145875e-05,
"loss": 0.0684,
"num_input_tokens_seen": 70144000,
"step": 68500
},
{
"epoch": 1.725,
"grad_norm": 0.06810135394334793,
"learning_rate": 2.1250416666666666e-05,
"loss": 0.101,
"num_input_tokens_seen": 70656000,
"step": 69000
},
{
"epoch": 1.7375,
"grad_norm": 0.019835174083709717,
"learning_rate": 2.1042083333333333e-05,
"loss": 0.0719,
"num_input_tokens_seen": 71168000,
"step": 69500
},
{
"epoch": 1.75,
"grad_norm": 0.07850214093923569,
"learning_rate": 2.083375e-05,
"loss": 0.0808,
"num_input_tokens_seen": 71680000,
"step": 70000
},
{
"epoch": 1.7625,
"grad_norm": 0.050091035664081573,
"learning_rate": 2.0625416666666667e-05,
"loss": 0.0835,
"num_input_tokens_seen": 72192000,
"step": 70500
},
{
"epoch": 1.775,
"grad_norm": 0.01498348731547594,
"learning_rate": 2.0417083333333335e-05,
"loss": 0.0918,
"num_input_tokens_seen": 72704000,
"step": 71000
},
{
"epoch": 1.7875,
"grad_norm": 0.034435465931892395,
"learning_rate": 2.0208750000000002e-05,
"loss": 0.0777,
"num_input_tokens_seen": 73216000,
"step": 71500
},
{
"epoch": 1.8,
"grad_norm": 0.045340586453676224,
"learning_rate": 2.000041666666667e-05,
"loss": 0.0894,
"num_input_tokens_seen": 73728000,
"step": 72000
},
{
"epoch": 1.8125,
"grad_norm": 0.28705134987831116,
"learning_rate": 1.9792083333333333e-05,
"loss": 0.0741,
"num_input_tokens_seen": 74240000,
"step": 72500
},
{
"epoch": 1.825,
"grad_norm": 155.12445068359375,
"learning_rate": 1.958375e-05,
"loss": 0.0813,
"num_input_tokens_seen": 74752000,
"step": 73000
},
{
"epoch": 1.8375,
"grad_norm": 0.08065774291753769,
"learning_rate": 1.9375416666666668e-05,
"loss": 0.0819,
"num_input_tokens_seen": 75264000,
"step": 73500
},
{
"epoch": 1.85,
"grad_norm": 0.40842482447624207,
"learning_rate": 1.9167083333333335e-05,
"loss": 0.0669,
"num_input_tokens_seen": 75776000,
"step": 74000
},
{
"epoch": 1.8625,
"grad_norm": 0.09376771003007889,
"learning_rate": 1.895875e-05,
"loss": 0.0749,
"num_input_tokens_seen": 76288000,
"step": 74500
},
{
"epoch": 1.875,
"grad_norm": 0.035654786974191666,
"learning_rate": 1.8750416666666666e-05,
"loss": 0.0757,
"num_input_tokens_seen": 76800000,
"step": 75000
},
{
"epoch": 1.8875,
"grad_norm": 0.04763418436050415,
"learning_rate": 1.8542083333333337e-05,
"loss": 0.0652,
"num_input_tokens_seen": 77312000,
"step": 75500
},
{
"epoch": 1.9,
"grad_norm": 0.04799911379814148,
"learning_rate": 1.833375e-05,
"loss": 0.0688,
"num_input_tokens_seen": 77824000,
"step": 76000
},
{
"epoch": 1.9125,
"grad_norm": 0.22011104226112366,
"learning_rate": 1.8125416666666668e-05,
"loss": 0.0797,
"num_input_tokens_seen": 78336000,
"step": 76500
},
{
"epoch": 1.925,
"grad_norm": 0.05009845644235611,
"learning_rate": 1.7917083333333335e-05,
"loss": 0.0542,
"num_input_tokens_seen": 78848000,
"step": 77000
},
{
"epoch": 1.9375,
"grad_norm": 1.8440918922424316,
"learning_rate": 1.7708750000000002e-05,
"loss": 0.0773,
"num_input_tokens_seen": 79360000,
"step": 77500
},
{
"epoch": 1.95,
"grad_norm": 0.025103362277150154,
"learning_rate": 1.7500416666666666e-05,
"loss": 0.0692,
"num_input_tokens_seen": 79872000,
"step": 78000
},
{
"epoch": 1.9625,
"grad_norm": 14.473067283630371,
"learning_rate": 1.7292083333333333e-05,
"loss": 0.0658,
"num_input_tokens_seen": 80384000,
"step": 78500
},
{
"epoch": 1.975,
"grad_norm": 0.051201172173023224,
"learning_rate": 1.708375e-05,
"loss": 0.0648,
"num_input_tokens_seen": 80896000,
"step": 79000
},
{
"epoch": 1.9875,
"grad_norm": 0.05888315662741661,
"learning_rate": 1.6875416666666668e-05,
"loss": 0.0792,
"num_input_tokens_seen": 81408000,
"step": 79500
},
{
"epoch": 2.0,
"grad_norm": 0.08364333212375641,
"learning_rate": 1.666708333333333e-05,
"loss": 0.0682,
"num_input_tokens_seen": 81920000,
"step": 80000
},
{
"epoch": 2.0,
"eval_accuracy": 0.9528,
"eval_combined_score": 1.327030219983165,
"eval_loss": 0.22639968991279602,
"eval_runtime": 39.5018,
"eval_samples_per_second": 2025.225,
"eval_steps_per_second": 253.153,
"num_input_tokens_seen": 81920000,
"step": 80000
},
{
"epoch": 2.0125,
"grad_norm": 1.8050274848937988,
"learning_rate": 1.645875e-05,
"loss": 0.0508,
"num_input_tokens_seen": 82432000,
"step": 80500
},
{
"epoch": 2.025,
"grad_norm": 0.02845793031156063,
"learning_rate": 1.625041666666667e-05,
"loss": 0.0525,
"num_input_tokens_seen": 82944000,
"step": 81000
},
{
"epoch": 2.0375,
"grad_norm": 0.025743141770362854,
"learning_rate": 1.6042083333333337e-05,
"loss": 0.0483,
"num_input_tokens_seen": 83456000,
"step": 81500
},
{
"epoch": 2.05,
"grad_norm": 8.399374008178711,
"learning_rate": 1.583375e-05,
"loss": 0.0652,
"num_input_tokens_seen": 83968000,
"step": 82000
},
{
"epoch": 2.0625,
"grad_norm": 0.017835861071944237,
"learning_rate": 1.5625416666666668e-05,
"loss": 0.0482,
"num_input_tokens_seen": 84480000,
"step": 82500
},
{
"epoch": 2.075,
"grad_norm": 0.06692575663328171,
"learning_rate": 1.5417083333333335e-05,
"loss": 0.0521,
"num_input_tokens_seen": 84992000,
"step": 83000
},
{
"epoch": 2.0875,
"grad_norm": 0.035685233771800995,
"learning_rate": 1.5208749999999999e-05,
"loss": 0.0582,
"num_input_tokens_seen": 85504000,
"step": 83500
},
{
"epoch": 2.1,
"grad_norm": 0.13481773436069489,
"learning_rate": 1.5000416666666666e-05,
"loss": 0.0468,
"num_input_tokens_seen": 86016000,
"step": 84000
},
{
"epoch": 2.1125,
"grad_norm": 17.486289978027344,
"learning_rate": 1.4792083333333334e-05,
"loss": 0.0559,
"num_input_tokens_seen": 86528000,
"step": 84500
},
{
"epoch": 2.125,
"grad_norm": 0.03201691806316376,
"learning_rate": 1.458375e-05,
"loss": 0.0425,
"num_input_tokens_seen": 87040000,
"step": 85000
},
{
"epoch": 2.1375,
"grad_norm": 0.021227147430181503,
"learning_rate": 1.4375416666666666e-05,
"loss": 0.05,
"num_input_tokens_seen": 87552000,
"step": 85500
},
{
"epoch": 2.15,
"grad_norm": 0.040053412318229675,
"learning_rate": 1.4167083333333334e-05,
"loss": 0.048,
"num_input_tokens_seen": 88064000,
"step": 86000
},
{
"epoch": 2.1625,
"grad_norm": 0.01835712045431137,
"learning_rate": 1.395875e-05,
"loss": 0.038,
"num_input_tokens_seen": 88576000,
"step": 86500
},
{
"epoch": 2.175,
"grad_norm": 0.03529110550880432,
"learning_rate": 1.3750416666666668e-05,
"loss": 0.051,
"num_input_tokens_seen": 89088000,
"step": 87000
},
{
"epoch": 2.1875,
"grad_norm": 97.47898864746094,
"learning_rate": 1.3542083333333334e-05,
"loss": 0.0373,
"num_input_tokens_seen": 89600000,
"step": 87500
},
{
"epoch": 2.2,
"grad_norm": 0.014346601441502571,
"learning_rate": 1.3333750000000001e-05,
"loss": 0.0557,
"num_input_tokens_seen": 90112000,
"step": 88000
},
{
"epoch": 2.2125,
"grad_norm": 0.01878521591424942,
"learning_rate": 1.3125416666666668e-05,
"loss": 0.0526,
"num_input_tokens_seen": 90624000,
"step": 88500
},
{
"epoch": 2.225,
"grad_norm": 0.04132438451051712,
"learning_rate": 1.2917083333333335e-05,
"loss": 0.0464,
"num_input_tokens_seen": 91136000,
"step": 89000
},
{
"epoch": 2.2375,
"grad_norm": 0.02642699144780636,
"learning_rate": 1.270875e-05,
"loss": 0.0464,
"num_input_tokens_seen": 91648000,
"step": 89500
},
{
"epoch": 2.25,
"grad_norm": 877.631591796875,
"learning_rate": 1.2500416666666666e-05,
"loss": 0.0374,
"num_input_tokens_seen": 92160000,
"step": 90000
},
{
"epoch": 2.2625,
"grad_norm": 0.4528743326663971,
"learning_rate": 1.2292083333333334e-05,
"loss": 0.059,
"num_input_tokens_seen": 92672000,
"step": 90500
},
{
"epoch": 2.275,
"grad_norm": 0.1183973178267479,
"learning_rate": 1.2083750000000001e-05,
"loss": 0.0505,
"num_input_tokens_seen": 93184000,
"step": 91000
},
{
"epoch": 2.2875,
"grad_norm": 0.04196188971400261,
"learning_rate": 1.1875416666666667e-05,
"loss": 0.0517,
"num_input_tokens_seen": 93696000,
"step": 91500
},
{
"epoch": 2.3,
"grad_norm": 0.03194092586636543,
"learning_rate": 1.1667083333333334e-05,
"loss": 0.0511,
"num_input_tokens_seen": 94208000,
"step": 92000
},
{
"epoch": 2.3125,
"grad_norm": 0.0184203889220953,
"learning_rate": 1.145875e-05,
"loss": 0.0501,
"num_input_tokens_seen": 94720000,
"step": 92500
},
{
"epoch": 2.325,
"grad_norm": 0.053758785128593445,
"learning_rate": 1.1250416666666667e-05,
"loss": 0.0412,
"num_input_tokens_seen": 95232000,
"step": 93000
},
{
"epoch": 2.3375,
"grad_norm": 0.036872394382953644,
"learning_rate": 1.1042083333333334e-05,
"loss": 0.0418,
"num_input_tokens_seen": 95744000,
"step": 93500
},
{
"epoch": 2.35,
"grad_norm": 0.014922083355486393,
"learning_rate": 1.0833750000000001e-05,
"loss": 0.0507,
"num_input_tokens_seen": 96256000,
"step": 94000
},
{
"epoch": 2.3625,
"grad_norm": 0.020171664655208588,
"learning_rate": 1.0625416666666667e-05,
"loss": 0.0444,
"num_input_tokens_seen": 96768000,
"step": 94500
},
{
"epoch": 2.375,
"grad_norm": 1.7357385158538818,
"learning_rate": 1.0417083333333334e-05,
"loss": 0.0545,
"num_input_tokens_seen": 97280000,
"step": 95000
},
{
"epoch": 2.3875,
"grad_norm": 0.023304857313632965,
"learning_rate": 1.020875e-05,
"loss": 0.0515,
"num_input_tokens_seen": 97792000,
"step": 95500
},
{
"epoch": 2.4,
"grad_norm": 0.01939631998538971,
"learning_rate": 1.0000416666666668e-05,
"loss": 0.0495,
"num_input_tokens_seen": 98304000,
"step": 96000
},
{
"epoch": 2.4125,
"grad_norm": 0.019845569506287575,
"learning_rate": 9.792083333333334e-06,
"loss": 0.0411,
"num_input_tokens_seen": 98816000,
"step": 96500
},
{
"epoch": 2.425,
"grad_norm": 0.014959324151277542,
"learning_rate": 9.583750000000001e-06,
"loss": 0.0342,
"num_input_tokens_seen": 99328000,
"step": 97000
},
{
"epoch": 2.4375,
"grad_norm": 0.01649474911391735,
"learning_rate": 9.375416666666667e-06,
"loss": 0.0472,
"num_input_tokens_seen": 99840000,
"step": 97500
},
{
"epoch": 2.45,
"grad_norm": 0.031929146498441696,
"learning_rate": 9.167083333333332e-06,
"loss": 0.0384,
"num_input_tokens_seen": 100352000,
"step": 98000
},
{
"epoch": 2.4625,
"grad_norm": 0.10980285704135895,
"learning_rate": 8.958750000000001e-06,
"loss": 0.0513,
"num_input_tokens_seen": 100864000,
"step": 98500
},
{
"epoch": 2.475,
"grad_norm": 0.020933426916599274,
"learning_rate": 8.750416666666667e-06,
"loss": 0.0478,
"num_input_tokens_seen": 101376000,
"step": 99000
},
{
"epoch": 2.4875,
"grad_norm": 0.017404716461896896,
"learning_rate": 8.542083333333334e-06,
"loss": 0.039,
"num_input_tokens_seen": 101888000,
"step": 99500
},
{
"epoch": 2.5,
"grad_norm": 0.11211936920881271,
"learning_rate": 8.33375e-06,
"loss": 0.0473,
"num_input_tokens_seen": 102400000,
"step": 100000
},
{
"epoch": 2.5125,
"grad_norm": 0.023433908820152283,
"learning_rate": 8.125416666666667e-06,
"loss": 0.0417,
"num_input_tokens_seen": 102912000,
"step": 100500
},
{
"epoch": 2.525,
"grad_norm": 0.011504637077450752,
"learning_rate": 7.917083333333334e-06,
"loss": 0.0396,
"num_input_tokens_seen": 103424000,
"step": 101000
},
{
"epoch": 2.5375,
"grad_norm": 0.012843768112361431,
"learning_rate": 7.708750000000001e-06,
"loss": 0.0475,
"num_input_tokens_seen": 103936000,
"step": 101500
},
{
"epoch": 2.55,
"grad_norm": 0.020524220541119576,
"learning_rate": 7.500416666666667e-06,
"loss": 0.0472,
"num_input_tokens_seen": 104448000,
"step": 102000
},
{
"epoch": 2.5625,
"grad_norm": 0.01861303672194481,
"learning_rate": 7.292083333333334e-06,
"loss": 0.0294,
"num_input_tokens_seen": 104960000,
"step": 102500
},
{
"epoch": 2.575,
"grad_norm": 0.021343663334846497,
"learning_rate": 7.08375e-06,
"loss": 0.0376,
"num_input_tokens_seen": 105472000,
"step": 103000
},
{
"epoch": 2.5875,
"grad_norm": 0.8902124166488647,
"learning_rate": 6.875416666666668e-06,
"loss": 0.0456,
"num_input_tokens_seen": 105984000,
"step": 103500
},
{
"epoch": 2.6,
"grad_norm": 0.06912536174058914,
"learning_rate": 6.667083333333333e-06,
"loss": 0.0365,
"num_input_tokens_seen": 106496000,
"step": 104000
},
{
"epoch": 2.6125,
"grad_norm": 0.17968295514583588,
"learning_rate": 6.458750000000001e-06,
"loss": 0.0372,
"num_input_tokens_seen": 107008000,
"step": 104500
},
{
"epoch": 2.625,
"grad_norm": 0.01591988280415535,
"learning_rate": 6.250416666666667e-06,
"loss": 0.0334,
"num_input_tokens_seen": 107520000,
"step": 105000
},
{
"epoch": 2.6375,
"grad_norm": 0.015928415581583977,
"learning_rate": 6.0420833333333334e-06,
"loss": 0.0425,
"num_input_tokens_seen": 108032000,
"step": 105500
},
{
"epoch": 2.65,
"grad_norm": 0.04025963693857193,
"learning_rate": 5.833750000000001e-06,
"loss": 0.0371,
"num_input_tokens_seen": 108544000,
"step": 106000
},
{
"epoch": 2.6625,
"grad_norm": 0.07384547591209412,
"learning_rate": 5.625416666666667e-06,
"loss": 0.043,
"num_input_tokens_seen": 109056000,
"step": 106500
},
{
"epoch": 2.675,
"grad_norm": 0.0424518883228302,
"learning_rate": 5.4170833333333335e-06,
"loss": 0.0349,
"num_input_tokens_seen": 109568000,
"step": 107000
},
{
"epoch": 2.6875,
"grad_norm": 0.01949002780020237,
"learning_rate": 5.208750000000001e-06,
"loss": 0.04,
"num_input_tokens_seen": 110080000,
"step": 107500
},
{
"epoch": 2.7,
"grad_norm": 0.011860487051308155,
"learning_rate": 5.000416666666667e-06,
"loss": 0.0274,
"num_input_tokens_seen": 110592000,
"step": 108000
},
{
"epoch": 2.7125,
"grad_norm": 0.010354108177125454,
"learning_rate": 4.7920833333333335e-06,
"loss": 0.0305,
"num_input_tokens_seen": 111104000,
"step": 108500
},
{
"epoch": 2.725,
"grad_norm": 13.753798484802246,
"learning_rate": 4.583750000000001e-06,
"loss": 0.0446,
"num_input_tokens_seen": 111616000,
"step": 109000
},
{
"epoch": 2.7375,
"grad_norm": 0.017097918316721916,
"learning_rate": 4.375416666666666e-06,
"loss": 0.0398,
"num_input_tokens_seen": 112128000,
"step": 109500
},
{
"epoch": 2.75,
"grad_norm": 0.01976764015853405,
"learning_rate": 4.167083333333334e-06,
"loss": 0.0318,
"num_input_tokens_seen": 112640000,
"step": 110000
},
{
"epoch": 2.7625,
"grad_norm": 6.2214531898498535,
"learning_rate": 3.95875e-06,
"loss": 0.0424,
"num_input_tokens_seen": 113152000,
"step": 110500
},
{
"epoch": 2.775,
"grad_norm": 0.007995002903044224,
"learning_rate": 3.750416666666667e-06,
"loss": 0.0442,
"num_input_tokens_seen": 113664000,
"step": 111000
},
{
"epoch": 2.7875,
"grad_norm": 0.05575885996222496,
"learning_rate": 3.5420833333333332e-06,
"loss": 0.037,
"num_input_tokens_seen": 114176000,
"step": 111500
},
{
"epoch": 2.8,
"grad_norm": 0.1336035579442978,
"learning_rate": 3.33375e-06,
"loss": 0.0385,
"num_input_tokens_seen": 114688000,
"step": 112000
},
{
"epoch": 2.8125,
"grad_norm": 0.019037237390875816,
"learning_rate": 3.125416666666667e-06,
"loss": 0.0326,
"num_input_tokens_seen": 115200000,
"step": 112500
},
{
"epoch": 2.825,
"grad_norm": 0.00875825248658657,
"learning_rate": 2.9170833333333333e-06,
"loss": 0.0277,
"num_input_tokens_seen": 115712000,
"step": 113000
},
{
"epoch": 2.8375,
"grad_norm": 0.052571795880794525,
"learning_rate": 2.70875e-06,
"loss": 0.037,
"num_input_tokens_seen": 116224000,
"step": 113500
},
{
"epoch": 2.85,
"grad_norm": 0.019845254719257355,
"learning_rate": 2.500416666666667e-06,
"loss": 0.0445,
"num_input_tokens_seen": 116736000,
"step": 114000
},
{
"epoch": 2.8625,
"grad_norm": 0.01046211551874876,
"learning_rate": 2.2920833333333338e-06,
"loss": 0.0294,
"num_input_tokens_seen": 117248000,
"step": 114500
},
{
"epoch": 2.875,
"grad_norm": 0.012272953987121582,
"learning_rate": 2.0837499999999997e-06,
"loss": 0.0396,
"num_input_tokens_seen": 117760000,
"step": 115000
},
{
"epoch": 2.8875,
"grad_norm": 0.022472262382507324,
"learning_rate": 1.8754166666666666e-06,
"loss": 0.047,
"num_input_tokens_seen": 118272000,
"step": 115500
},
{
"epoch": 2.9,
"grad_norm": 0.07120255380868912,
"learning_rate": 1.6670833333333334e-06,
"loss": 0.0437,
"num_input_tokens_seen": 118784000,
"step": 116000
},
{
"epoch": 2.9125,
"grad_norm": 0.060723673552274704,
"learning_rate": 1.45875e-06,
"loss": 0.035,
"num_input_tokens_seen": 119296000,
"step": 116500
},
{
"epoch": 2.925,
"grad_norm": 0.012350406497716904,
"learning_rate": 1.2504166666666668e-06,
"loss": 0.03,
"num_input_tokens_seen": 119808000,
"step": 117000
},
{
"epoch": 2.9375,
"grad_norm": 0.18025244772434235,
"learning_rate": 1.0420833333333334e-06,
"loss": 0.0306,
"num_input_tokens_seen": 120320000,
"step": 117500
},
{
"epoch": 2.95,
"grad_norm": 0.007316856179386377,
"learning_rate": 8.3375e-07,
"loss": 0.0328,
"num_input_tokens_seen": 120832000,
"step": 118000
},
{
"epoch": 2.9625,
"grad_norm": 0.06496240198612213,
"learning_rate": 6.254166666666667e-07,
"loss": 0.039,
"num_input_tokens_seen": 121344000,
"step": 118500
},
{
"epoch": 2.975,
"grad_norm": 0.0057182470336556435,
"learning_rate": 4.170833333333334e-07,
"loss": 0.0367,
"num_input_tokens_seen": 121856000,
"step": 119000
},
{
"epoch": 2.9875,
"grad_norm": 0.017466630786657333,
"learning_rate": 2.0875e-07,
"loss": 0.0381,
"num_input_tokens_seen": 122368000,
"step": 119500
},
{
"epoch": 3.0,
"grad_norm": 0.013719202019274235,
"learning_rate": 4.1666666666666673e-10,
"loss": 0.0424,
"num_input_tokens_seen": 122880000,
"step": 120000
},
{
"epoch": 3.0,
"eval_accuracy": 0.955225,
"eval_combined_score": 1.2845397902488602,
"eval_loss": 0.2597787082195282,
"eval_runtime": 39.5285,
"eval_samples_per_second": 2023.856,
"eval_steps_per_second": 252.982,
"num_input_tokens_seen": 122880000,
"step": 120000
},
{
"epoch": 3.0,
"num_input_tokens_seen": 122880000,
"step": 120000,
"total_flos": 1.580945522688e+16,
"train_loss": 0.10143799341519674,
"train_runtime": 4258.2582,
"train_samples_per_second": 225.444,
"train_steps_per_second": 28.181,
"train_tokens_per_second": 28856.869
}
],
"logging_steps": 500,
"max_steps": 120000,
"num_input_tokens_seen": 122880000,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.580945522688e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}