sharkMeow's picture
End of training
05426cd verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"eval_steps": 500,
"global_step": 113400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 1.0368574857711792,
"learning_rate": 9.900440917107584e-06,
"loss": 2.4497,
"step": 1134
},
{
"epoch": 2.0,
"grad_norm": 2.962113380432129,
"learning_rate": 9.800440917107585e-06,
"loss": 2.3441,
"step": 2268
},
{
"epoch": 3.0,
"grad_norm": 2.04465913772583,
"learning_rate": 9.700529100529101e-06,
"loss": 2.2934,
"step": 3402
},
{
"epoch": 4.0,
"grad_norm": 3.710998773574829,
"learning_rate": 9.6005291005291e-06,
"loss": 2.2626,
"step": 4536
},
{
"epoch": 5.0,
"grad_norm": 2.217125177383423,
"learning_rate": 9.500529100529102e-06,
"loss": 2.2311,
"step": 5670
},
{
"epoch": 6.0,
"grad_norm": 4.2306671142578125,
"learning_rate": 9.400617283950619e-06,
"loss": 2.1809,
"step": 6804
},
{
"epoch": 7.0,
"grad_norm": 3.223266124725342,
"learning_rate": 9.300617283950618e-06,
"loss": 2.1686,
"step": 7938
},
{
"epoch": 8.0,
"grad_norm": 2.7807767391204834,
"learning_rate": 9.200705467372135e-06,
"loss": 2.139,
"step": 9072
},
{
"epoch": 9.0,
"grad_norm": 6.859907150268555,
"learning_rate": 9.100705467372136e-06,
"loss": 2.1258,
"step": 10206
},
{
"epoch": 10.0,
"grad_norm": 3.691579818725586,
"learning_rate": 9.000793650793651e-06,
"loss": 2.1096,
"step": 11340
},
{
"epoch": 11.0,
"grad_norm": 4.632028579711914,
"learning_rate": 8.90079365079365e-06,
"loss": 2.1051,
"step": 12474
},
{
"epoch": 12.0,
"grad_norm": 5.086824417114258,
"learning_rate": 8.800881834215167e-06,
"loss": 2.0945,
"step": 13608
},
{
"epoch": 13.0,
"grad_norm": 3.4331443309783936,
"learning_rate": 8.700881834215168e-06,
"loss": 2.0912,
"step": 14742
},
{
"epoch": 14.0,
"grad_norm": 4.4025092124938965,
"learning_rate": 8.600881834215168e-06,
"loss": 2.0722,
"step": 15876
},
{
"epoch": 15.0,
"grad_norm": 10.316903114318848,
"learning_rate": 8.500881834215169e-06,
"loss": 2.0726,
"step": 17010
},
{
"epoch": 16.0,
"grad_norm": 5.012073040008545,
"learning_rate": 8.400970017636686e-06,
"loss": 2.0657,
"step": 18144
},
{
"epoch": 17.0,
"grad_norm": 6.108023166656494,
"learning_rate": 8.301058201058203e-06,
"loss": 2.0583,
"step": 19278
},
{
"epoch": 18.0,
"grad_norm": 3.4143471717834473,
"learning_rate": 8.201058201058202e-06,
"loss": 2.038,
"step": 20412
},
{
"epoch": 19.0,
"grad_norm": 4.766075134277344,
"learning_rate": 8.101146384479719e-06,
"loss": 2.0292,
"step": 21546
},
{
"epoch": 20.0,
"grad_norm": 2.7671144008636475,
"learning_rate": 8.001146384479718e-06,
"loss": 2.0344,
"step": 22680
},
{
"epoch": 21.0,
"grad_norm": 3.447026491165161,
"learning_rate": 7.901234567901235e-06,
"loss": 2.0324,
"step": 23814
},
{
"epoch": 22.0,
"grad_norm": 3.207721471786499,
"learning_rate": 7.801234567901234e-06,
"loss": 2.0119,
"step": 24948
},
{
"epoch": 23.0,
"grad_norm": 2.789856433868408,
"learning_rate": 7.701234567901235e-06,
"loss": 2.0157,
"step": 26082
},
{
"epoch": 24.0,
"grad_norm": 2.2711899280548096,
"learning_rate": 7.601234567901235e-06,
"loss": 2.0048,
"step": 27216
},
{
"epoch": 25.0,
"grad_norm": 1.5879839658737183,
"learning_rate": 7.5013227513227514e-06,
"loss": 2.0111,
"step": 28350
},
{
"epoch": 26.0,
"grad_norm": 4.620934963226318,
"learning_rate": 7.401410934744268e-06,
"loss": 1.9837,
"step": 29484
},
{
"epoch": 27.0,
"grad_norm": 1.8832050561904907,
"learning_rate": 7.3014109347442685e-06,
"loss": 1.9818,
"step": 30618
},
{
"epoch": 28.0,
"grad_norm": 3.1029512882232666,
"learning_rate": 7.2014991181657855e-06,
"loss": 1.9868,
"step": 31752
},
{
"epoch": 29.0,
"grad_norm": 1.3787024021148682,
"learning_rate": 7.101499118165786e-06,
"loss": 1.988,
"step": 32886
},
{
"epoch": 30.0,
"grad_norm": 4.151069164276123,
"learning_rate": 7.001499118165786e-06,
"loss": 1.975,
"step": 34020
},
{
"epoch": 31.0,
"grad_norm": 3.1992034912109375,
"learning_rate": 6.901587301587303e-06,
"loss": 1.9814,
"step": 35154
},
{
"epoch": 32.0,
"grad_norm": 5.592874526977539,
"learning_rate": 6.801675485008819e-06,
"loss": 1.9739,
"step": 36288
},
{
"epoch": 33.0,
"grad_norm": 1.8030545711517334,
"learning_rate": 6.701675485008818e-06,
"loss": 1.9758,
"step": 37422
},
{
"epoch": 34.0,
"grad_norm": 3.9878218173980713,
"learning_rate": 6.601675485008819e-06,
"loss": 1.9726,
"step": 38556
},
{
"epoch": 35.0,
"grad_norm": 3.259308099746704,
"learning_rate": 6.501763668430335e-06,
"loss": 1.9649,
"step": 39690
},
{
"epoch": 36.0,
"grad_norm": 3.823164224624634,
"learning_rate": 6.401763668430335e-06,
"loss": 1.9547,
"step": 40824
},
{
"epoch": 37.0,
"grad_norm": 4.652733325958252,
"learning_rate": 6.301763668430335e-06,
"loss": 1.9673,
"step": 41958
},
{
"epoch": 38.0,
"grad_norm": 3.522451162338257,
"learning_rate": 6.201851851851852e-06,
"loss": 1.9544,
"step": 43092
},
{
"epoch": 39.0,
"grad_norm": 4.213411331176758,
"learning_rate": 6.1018518518518525e-06,
"loss": 1.9601,
"step": 44226
},
{
"epoch": 40.0,
"grad_norm": 5.826420783996582,
"learning_rate": 6.0019400352733694e-06,
"loss": 1.9553,
"step": 45360
},
{
"epoch": 41.0,
"grad_norm": 3.5787088871002197,
"learning_rate": 5.902028218694886e-06,
"loss": 1.9603,
"step": 46494
},
{
"epoch": 42.0,
"grad_norm": 2.380449056625366,
"learning_rate": 5.8020282186948865e-06,
"loss": 1.9551,
"step": 47628
},
{
"epoch": 43.0,
"grad_norm": 5.958215236663818,
"learning_rate": 5.702028218694886e-06,
"loss": 1.9515,
"step": 48762
},
{
"epoch": 44.0,
"grad_norm": 1.0750882625579834,
"learning_rate": 5.602116402116402e-06,
"loss": 1.9578,
"step": 49896
},
{
"epoch": 45.0,
"grad_norm": 2.135838031768799,
"learning_rate": 5.502116402116403e-06,
"loss": 1.9412,
"step": 51030
},
{
"epoch": 46.0,
"grad_norm": 2.9327504634857178,
"learning_rate": 5.402204585537919e-06,
"loss": 1.948,
"step": 52164
},
{
"epoch": 47.0,
"grad_norm": 5.212012767791748,
"learning_rate": 5.302204585537919e-06,
"loss": 1.9581,
"step": 53298
},
{
"epoch": 48.0,
"grad_norm": 3.5629754066467285,
"learning_rate": 5.202292768959436e-06,
"loss": 1.9394,
"step": 54432
},
{
"epoch": 49.0,
"grad_norm": 2.700446844100952,
"learning_rate": 5.102292768959436e-06,
"loss": 1.943,
"step": 55566
},
{
"epoch": 50.0,
"grad_norm": 5.429810523986816,
"learning_rate": 5.002380952380953e-06,
"loss": 1.9499,
"step": 56700
},
{
"epoch": 51.0,
"grad_norm": 2.2767255306243896,
"learning_rate": 4.902469135802469e-06,
"loss": 1.933,
"step": 57834
},
{
"epoch": 52.0,
"grad_norm": 4.359825611114502,
"learning_rate": 4.802469135802469e-06,
"loss": 1.9319,
"step": 58968
},
{
"epoch": 53.0,
"grad_norm": 3.3374125957489014,
"learning_rate": 4.702557319223986e-06,
"loss": 1.9303,
"step": 60102
},
{
"epoch": 54.0,
"grad_norm": 1.666292428970337,
"learning_rate": 4.6025573192239865e-06,
"loss": 1.9391,
"step": 61236
},
{
"epoch": 55.0,
"grad_norm": 2.920473575592041,
"learning_rate": 4.502645502645503e-06,
"loss": 1.9339,
"step": 62370
},
{
"epoch": 56.0,
"grad_norm": 3.5226597785949707,
"learning_rate": 4.402645502645503e-06,
"loss": 1.924,
"step": 63504
},
{
"epoch": 57.0,
"grad_norm": 1.056983232498169,
"learning_rate": 4.30273368606702e-06,
"loss": 1.939,
"step": 64638
},
{
"epoch": 58.0,
"grad_norm": 1.7839380502700806,
"learning_rate": 4.20273368606702e-06,
"loss": 1.9302,
"step": 65772
},
{
"epoch": 59.0,
"grad_norm": 2.453362464904785,
"learning_rate": 4.102821869488536e-06,
"loss": 1.932,
"step": 66906
},
{
"epoch": 60.0,
"grad_norm": 1.79325532913208,
"learning_rate": 4.002910052910054e-06,
"loss": 1.9347,
"step": 68040
},
{
"epoch": 61.0,
"grad_norm": 4.606123924255371,
"learning_rate": 3.902910052910053e-06,
"loss": 1.9163,
"step": 69174
},
{
"epoch": 62.0,
"grad_norm": 3.7879464626312256,
"learning_rate": 3.80299823633157e-06,
"loss": 1.9314,
"step": 70308
},
{
"epoch": 63.0,
"grad_norm": 3.875440835952759,
"learning_rate": 3.7029982363315697e-06,
"loss": 1.9364,
"step": 71442
},
{
"epoch": 64.0,
"grad_norm": 3.8616816997528076,
"learning_rate": 3.6030864197530867e-06,
"loss": 1.9183,
"step": 72576
},
{
"epoch": 65.0,
"grad_norm": 3.232619285583496,
"learning_rate": 3.5030864197530868e-06,
"loss": 1.9164,
"step": 73710
},
{
"epoch": 66.0,
"grad_norm": 3.7224302291870117,
"learning_rate": 3.4031746031746033e-06,
"loss": 1.9225,
"step": 74844
},
{
"epoch": 67.0,
"grad_norm": 2.774463653564453,
"learning_rate": 3.303174603174604e-06,
"loss": 1.9328,
"step": 75978
},
{
"epoch": 68.0,
"grad_norm": 1.1616075038909912,
"learning_rate": 3.2032627865961204e-06,
"loss": 1.914,
"step": 77112
},
{
"epoch": 69.0,
"grad_norm": 1.613097071647644,
"learning_rate": 3.103350970017637e-06,
"loss": 1.9147,
"step": 78246
},
{
"epoch": 70.0,
"grad_norm": 4.327018737792969,
"learning_rate": 3.003350970017637e-06,
"loss": 1.924,
"step": 79380
},
{
"epoch": 71.0,
"grad_norm": 1.2719597816467285,
"learning_rate": 2.9034391534391536e-06,
"loss": 1.938,
"step": 80514
},
{
"epoch": 72.0,
"grad_norm": 2.238760232925415,
"learning_rate": 2.8034391534391537e-06,
"loss": 1.9153,
"step": 81648
},
{
"epoch": 73.0,
"grad_norm": 1.070393443107605,
"learning_rate": 2.7035273368606703e-06,
"loss": 1.9171,
"step": 82782
},
{
"epoch": 74.0,
"grad_norm": 3.3792688846588135,
"learning_rate": 2.603527336860671e-06,
"loss": 1.9204,
"step": 83916
},
{
"epoch": 75.0,
"grad_norm": 1.406660795211792,
"learning_rate": 2.503615520282187e-06,
"loss": 1.9139,
"step": 85050
},
{
"epoch": 76.0,
"grad_norm": 1.7133662700653076,
"learning_rate": 2.403615520282187e-06,
"loss": 1.9238,
"step": 86184
},
{
"epoch": 77.0,
"grad_norm": 1.5862336158752441,
"learning_rate": 2.303703703703704e-06,
"loss": 1.9193,
"step": 87318
},
{
"epoch": 78.0,
"grad_norm": 2.752800226211548,
"learning_rate": 2.2037918871252206e-06,
"loss": 1.9147,
"step": 88452
},
{
"epoch": 79.0,
"grad_norm": 1.8123775720596313,
"learning_rate": 2.1037918871252207e-06,
"loss": 1.9007,
"step": 89586
},
{
"epoch": 80.0,
"grad_norm": 3.6397151947021484,
"learning_rate": 2.0038800705467372e-06,
"loss": 1.9123,
"step": 90720
},
{
"epoch": 81.0,
"grad_norm": 2.8387656211853027,
"learning_rate": 1.9038800705467373e-06,
"loss": 1.9165,
"step": 91854
},
{
"epoch": 82.0,
"grad_norm": 0.9121940732002258,
"learning_rate": 1.803968253968254e-06,
"loss": 1.9179,
"step": 92988
},
{
"epoch": 83.0,
"grad_norm": 3.447791576385498,
"learning_rate": 1.7039682539682542e-06,
"loss": 1.9049,
"step": 94122
},
{
"epoch": 84.0,
"grad_norm": 2.4387011528015137,
"learning_rate": 1.6040564373897708e-06,
"loss": 1.9186,
"step": 95256
},
{
"epoch": 85.0,
"grad_norm": 4.287496566772461,
"learning_rate": 1.5040564373897709e-06,
"loss": 1.9137,
"step": 96390
},
{
"epoch": 86.0,
"grad_norm": 1.9334492683410645,
"learning_rate": 1.4041446208112876e-06,
"loss": 1.9245,
"step": 97524
},
{
"epoch": 87.0,
"grad_norm": 3.3120267391204834,
"learning_rate": 1.3042328042328042e-06,
"loss": 1.9022,
"step": 98658
},
{
"epoch": 88.0,
"grad_norm": 3.476644992828369,
"learning_rate": 1.2042328042328043e-06,
"loss": 1.9157,
"step": 99792
},
{
"epoch": 89.0,
"grad_norm": 1.7216417789459229,
"learning_rate": 1.104320987654321e-06,
"loss": 1.9212,
"step": 100926
},
{
"epoch": 90.0,
"grad_norm": 1.2965023517608643,
"learning_rate": 1.0043209876543212e-06,
"loss": 1.9098,
"step": 102060
},
{
"epoch": 91.0,
"grad_norm": 2.355569362640381,
"learning_rate": 9.044091710758378e-07,
"loss": 1.9128,
"step": 103194
},
{
"epoch": 92.0,
"grad_norm": 1.394773006439209,
"learning_rate": 8.044091710758378e-07,
"loss": 1.9084,
"step": 104328
},
{
"epoch": 93.0,
"grad_norm": 2.99224591255188,
"learning_rate": 7.044973544973546e-07,
"loss": 1.902,
"step": 105462
},
{
"epoch": 94.0,
"grad_norm": 1.2547388076782227,
"learning_rate": 6.044973544973545e-07,
"loss": 1.9185,
"step": 106596
},
{
"epoch": 95.0,
"grad_norm": 0.7210695147514343,
"learning_rate": 5.045855379188714e-07,
"loss": 1.9131,
"step": 107730
},
{
"epoch": 96.0,
"grad_norm": 0.7888159155845642,
"learning_rate": 4.0467372134038806e-07,
"loss": 1.9087,
"step": 108864
},
{
"epoch": 97.0,
"grad_norm": 0.9536716341972351,
"learning_rate": 3.04673721340388e-07,
"loss": 1.9029,
"step": 109998
},
{
"epoch": 98.0,
"grad_norm": 2.2886030673980713,
"learning_rate": 2.047619047619048e-07,
"loss": 1.9094,
"step": 111132
},
{
"epoch": 99.0,
"grad_norm": 2.0867316722869873,
"learning_rate": 1.0476190476190476e-07,
"loss": 1.9115,
"step": 112266
},
{
"epoch": 100.0,
"grad_norm": 0.59788578748703,
"learning_rate": 4.8500881834215175e-09,
"loss": 1.8996,
"step": 113400
},
{
"epoch": 100.0,
"step": 113400,
"total_flos": 3.571154905966387e+18,
"train_loss": 1.9808363689442792,
"train_runtime": 84553.2908,
"train_samples_per_second": 160.897,
"train_steps_per_second": 1.341
}
],
"logging_steps": 1134,
"max_steps": 113400,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 530,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.571154905966387e+18,
"train_batch_size": 60,
"trial_name": null,
"trial_params": null
}