train_V2_single_GPU / trainer_state.json
sharkMeow's picture
End of training
bc1bfb2 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"eval_steps": 500,
"global_step": 113400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"grad_norm": 1.912750482559204,
"learning_rate": 9.900352733686068e-06,
"loss": 2.4424,
"step": 1134
},
{
"epoch": 2.0,
"grad_norm": 2.366978645324707,
"learning_rate": 9.800440917107585e-06,
"loss": 2.3536,
"step": 2268
},
{
"epoch": 3.0,
"grad_norm": 2.4429948329925537,
"learning_rate": 9.700440917107584e-06,
"loss": 2.3138,
"step": 3402
},
{
"epoch": 4.0,
"grad_norm": 3.427384853363037,
"learning_rate": 9.600440917107585e-06,
"loss": 2.2871,
"step": 4536
},
{
"epoch": 5.0,
"grad_norm": 2.3265113830566406,
"learning_rate": 9.500529100529102e-06,
"loss": 2.2576,
"step": 5670
},
{
"epoch": 6.0,
"grad_norm": 4.512721538543701,
"learning_rate": 9.400529100529101e-06,
"loss": 2.2087,
"step": 6804
},
{
"epoch": 7.0,
"grad_norm": 3.427581548690796,
"learning_rate": 9.300617283950618e-06,
"loss": 2.1972,
"step": 7938
},
{
"epoch": 8.0,
"grad_norm": 2.985769510269165,
"learning_rate": 9.200617283950617e-06,
"loss": 2.1687,
"step": 9072
},
{
"epoch": 9.0,
"grad_norm": 5.713807106018066,
"learning_rate": 9.100705467372136e-06,
"loss": 2.1537,
"step": 10206
},
{
"epoch": 10.0,
"grad_norm": 3.9860029220581055,
"learning_rate": 9.000705467372135e-06,
"loss": 2.1393,
"step": 11340
},
{
"epoch": 11.0,
"grad_norm": NaN,
"learning_rate": 8.900705467372134e-06,
"loss": 2.1331,
"step": 12474
},
{
"epoch": 12.0,
"grad_norm": 5.642363548278809,
"learning_rate": 8.800793650793651e-06,
"loss": 2.1241,
"step": 13608
},
{
"epoch": 13.0,
"grad_norm": 3.955806016921997,
"learning_rate": 8.700881834215168e-06,
"loss": 2.1189,
"step": 14742
},
{
"epoch": 14.0,
"grad_norm": 4.731072425842285,
"learning_rate": 8.600881834215168e-06,
"loss": 2.1023,
"step": 15876
},
{
"epoch": 15.0,
"grad_norm": 5.199740886688232,
"learning_rate": 8.500881834215169e-06,
"loss": 2.0996,
"step": 17010
},
{
"epoch": 16.0,
"grad_norm": 8.10754680633545,
"learning_rate": 8.400970017636686e-06,
"loss": 2.0948,
"step": 18144
},
{
"epoch": 17.0,
"grad_norm": 7.051990509033203,
"learning_rate": 8.300970017636685e-06,
"loss": 2.0864,
"step": 19278
},
{
"epoch": 18.0,
"grad_norm": 3.324284553527832,
"learning_rate": 8.200970017636684e-06,
"loss": 2.0647,
"step": 20412
},
{
"epoch": 19.0,
"grad_norm": 4.6707682609558105,
"learning_rate": 8.101058201058201e-06,
"loss": 2.0577,
"step": 21546
},
{
"epoch": 20.0,
"grad_norm": 3.083761692047119,
"learning_rate": 8.001058201058202e-06,
"loss": 2.0609,
"step": 22680
},
{
"epoch": 21.0,
"grad_norm": 4.806012153625488,
"learning_rate": 7.901058201058201e-06,
"loss": 2.0601,
"step": 23814
},
{
"epoch": 22.0,
"grad_norm": 4.715734958648682,
"learning_rate": 7.801146384479718e-06,
"loss": 2.0389,
"step": 24948
},
{
"epoch": 23.0,
"grad_norm": 3.61401104927063,
"learning_rate": 7.70114638447972e-06,
"loss": 2.0428,
"step": 26082
},
{
"epoch": 24.0,
"grad_norm": 2.6958115100860596,
"learning_rate": 7.6011463844797185e-06,
"loss": 2.0315,
"step": 27216
},
{
"epoch": 25.0,
"grad_norm": 2.96954345703125,
"learning_rate": 7.5011463844797186e-06,
"loss": 2.0378,
"step": 28350
},
{
"epoch": 26.0,
"grad_norm": 5.391402721405029,
"learning_rate": 7.4013227513227516e-06,
"loss": 2.0096,
"step": 29484
},
{
"epoch": 27.0,
"grad_norm": 2.2738614082336426,
"learning_rate": 7.301322751322752e-06,
"loss": 2.0087,
"step": 30618
},
{
"epoch": 28.0,
"grad_norm": 4.0258378982543945,
"learning_rate": 7.201322751322753e-06,
"loss": 2.0135,
"step": 31752
},
{
"epoch": 29.0,
"grad_norm": 3.196429491043091,
"learning_rate": 7.101410934744269e-06,
"loss": 2.013,
"step": 32886
},
{
"epoch": 30.0,
"grad_norm": 3.6472020149230957,
"learning_rate": 7.001410934744268e-06,
"loss": 2.0002,
"step": 34020
},
{
"epoch": 31.0,
"grad_norm": 3.440370559692383,
"learning_rate": 6.901499118165785e-06,
"loss": 2.005,
"step": 35154
},
{
"epoch": 32.0,
"grad_norm": 5.474171161651611,
"learning_rate": 6.801587301587303e-06,
"loss": 1.9974,
"step": 36288
},
{
"epoch": 33.0,
"grad_norm": 2.209702253341675,
"learning_rate": 6.701587301587302e-06,
"loss": 2.0003,
"step": 37422
},
{
"epoch": 34.0,
"grad_norm": 5.641165256500244,
"learning_rate": 6.601587301587302e-06,
"loss": 1.9966,
"step": 38556
},
{
"epoch": 35.0,
"grad_norm": 3.8642516136169434,
"learning_rate": 6.501587301587302e-06,
"loss": 1.9866,
"step": 39690
},
{
"epoch": 36.0,
"grad_norm": 3.8914637565612793,
"learning_rate": 6.401675485008819e-06,
"loss": 1.9769,
"step": 40824
},
{
"epoch": 37.0,
"grad_norm": 2.3981971740722656,
"learning_rate": 6.301675485008819e-06,
"loss": 1.9695,
"step": 41958
},
{
"epoch": 38.0,
"grad_norm": 3.656935453414917,
"learning_rate": 6.201763668430335e-06,
"loss": 1.9774,
"step": 43092
},
{
"epoch": 39.0,
"grad_norm": 5.288485050201416,
"learning_rate": 6.1018518518518525e-06,
"loss": 1.984,
"step": 44226
},
{
"epoch": 40.0,
"grad_norm": 5.823598861694336,
"learning_rate": 6.001851851851852e-06,
"loss": 1.9786,
"step": 45360
},
{
"epoch": 41.0,
"grad_norm": 9.096850395202637,
"learning_rate": 5.901940035273369e-06,
"loss": 1.9815,
"step": 46494
},
{
"epoch": 42.0,
"grad_norm": 2.720540761947632,
"learning_rate": 5.801940035273369e-06,
"loss": 1.9756,
"step": 47628
},
{
"epoch": 43.0,
"grad_norm": 6.2244696617126465,
"learning_rate": 5.702028218694886e-06,
"loss": 1.9715,
"step": 48762
},
{
"epoch": 44.0,
"grad_norm": 1.7180719375610352,
"learning_rate": 5.602028218694886e-06,
"loss": 1.9783,
"step": 49896
},
{
"epoch": 45.0,
"grad_norm": 3.5073482990264893,
"learning_rate": 5.502116402116403e-06,
"loss": 1.9619,
"step": 51030
},
{
"epoch": 46.0,
"grad_norm": 3.795732021331787,
"learning_rate": 5.402204585537919e-06,
"loss": 1.9664,
"step": 52164
},
{
"epoch": 47.0,
"grad_norm": 2.003103733062744,
"learning_rate": 5.302204585537919e-06,
"loss": 1.9772,
"step": 53298
},
{
"epoch": 48.0,
"grad_norm": 8.892159461975098,
"learning_rate": 5.202292768959436e-06,
"loss": 1.9586,
"step": 54432
},
{
"epoch": 49.0,
"grad_norm": 3.5988032817840576,
"learning_rate": 5.102292768959436e-06,
"loss": 1.9623,
"step": 55566
},
{
"epoch": 50.0,
"grad_norm": 6.822943687438965,
"learning_rate": 5.002380952380953e-06,
"loss": 1.972,
"step": 56700
},
{
"epoch": 51.0,
"grad_norm": 3.6781959533691406,
"learning_rate": 4.902380952380953e-06,
"loss": 1.9518,
"step": 57834
},
{
"epoch": 52.0,
"grad_norm": 6.689939975738525,
"learning_rate": 4.802469135802469e-06,
"loss": 1.9515,
"step": 58968
},
{
"epoch": 53.0,
"grad_norm": 3.9311599731445312,
"learning_rate": 4.7024691358024695e-06,
"loss": 1.9482,
"step": 60102
},
{
"epoch": 54.0,
"grad_norm": 2.7226245403289795,
"learning_rate": 4.6025573192239865e-06,
"loss": 1.9569,
"step": 61236
},
{
"epoch": 55.0,
"grad_norm": 4.933896541595459,
"learning_rate": 4.502645502645503e-06,
"loss": 1.9498,
"step": 62370
},
{
"epoch": 56.0,
"grad_norm": 5.327332019805908,
"learning_rate": 4.402645502645503e-06,
"loss": 1.9405,
"step": 63504
},
{
"epoch": 57.0,
"grad_norm": 2.307694911956787,
"learning_rate": 4.30273368606702e-06,
"loss": 1.9582,
"step": 64638
},
{
"epoch": 58.0,
"grad_norm": 2.4888343811035156,
"learning_rate": 4.20273368606702e-06,
"loss": 1.9462,
"step": 65772
},
{
"epoch": 59.0,
"grad_norm": 3.256455898284912,
"learning_rate": 4.102821869488536e-06,
"loss": 1.9481,
"step": 66906
},
{
"epoch": 60.0,
"grad_norm": 2.2037665843963623,
"learning_rate": 4.002821869488536e-06,
"loss": 1.9526,
"step": 68040
},
{
"epoch": 61.0,
"grad_norm": 5.307345390319824,
"learning_rate": 3.902910052910053e-06,
"loss": 1.9317,
"step": 69174
},
{
"epoch": 62.0,
"grad_norm": 5.948610305786133,
"learning_rate": 3.802910052910053e-06,
"loss": 1.9486,
"step": 70308
},
{
"epoch": 63.0,
"grad_norm": 4.689316272735596,
"learning_rate": 3.7029982363315697e-06,
"loss": 1.9503,
"step": 71442
},
{
"epoch": 64.0,
"grad_norm": 4.029778957366943,
"learning_rate": 3.6030864197530867e-06,
"loss": 1.9316,
"step": 72576
},
{
"epoch": 65.0,
"grad_norm": 4.771517276763916,
"learning_rate": 3.5030864197530868e-06,
"loss": 1.9331,
"step": 73710
},
{
"epoch": 66.0,
"grad_norm": 2.0614895820617676,
"learning_rate": 3.4031746031746033e-06,
"loss": 1.9369,
"step": 74844
},
{
"epoch": 67.0,
"grad_norm": 4.460591793060303,
"learning_rate": 3.303174603174604e-06,
"loss": 1.9512,
"step": 75978
},
{
"epoch": 68.0,
"grad_norm": 2.2756810188293457,
"learning_rate": 3.2032627865961204e-06,
"loss": 1.9298,
"step": 77112
},
{
"epoch": 69.0,
"grad_norm": 2.6889700889587402,
"learning_rate": 3.10326278659612e-06,
"loss": 1.9309,
"step": 78246
},
{
"epoch": 70.0,
"grad_norm": 6.319788455963135,
"learning_rate": 3.003350970017637e-06,
"loss": 1.9377,
"step": 79380
},
{
"epoch": 71.0,
"grad_norm": 3.2527384757995605,
"learning_rate": 2.9034391534391536e-06,
"loss": 1.9509,
"step": 80514
},
{
"epoch": 72.0,
"grad_norm": 3.7406935691833496,
"learning_rate": 2.8034391534391537e-06,
"loss": 1.9285,
"step": 81648
},
{
"epoch": 73.0,
"grad_norm": 1.6912527084350586,
"learning_rate": 2.7035273368606703e-06,
"loss": 1.9321,
"step": 82782
},
{
"epoch": 74.0,
"grad_norm": 4.06993293762207,
"learning_rate": 2.603527336860671e-06,
"loss": 1.9334,
"step": 83916
},
{
"epoch": 75.0,
"grad_norm": 1.849129319190979,
"learning_rate": 2.503615520282187e-06,
"loss": 1.9308,
"step": 85050
},
{
"epoch": 76.0,
"grad_norm": 3.185353994369507,
"learning_rate": 2.403615520282187e-06,
"loss": 1.9368,
"step": 86184
},
{
"epoch": 77.0,
"grad_norm": 2.2649121284484863,
"learning_rate": 2.303703703703704e-06,
"loss": 1.9328,
"step": 87318
},
{
"epoch": 78.0,
"grad_norm": 3.588413953781128,
"learning_rate": 2.203703703703704e-06,
"loss": 1.9286,
"step": 88452
},
{
"epoch": 79.0,
"grad_norm": 2.376042127609253,
"learning_rate": 2.1037918871252207e-06,
"loss": 1.9131,
"step": 89586
},
{
"epoch": 80.0,
"grad_norm": 5.023166179656982,
"learning_rate": 2.0038800705467372e-06,
"loss": 1.9246,
"step": 90720
},
{
"epoch": 81.0,
"grad_norm": 3.989335775375366,
"learning_rate": 1.9038800705467373e-06,
"loss": 1.9278,
"step": 91854
},
{
"epoch": 82.0,
"grad_norm": 1.964154601097107,
"learning_rate": 1.803968253968254e-06,
"loss": 1.9324,
"step": 92988
},
{
"epoch": 83.0,
"grad_norm": 3.225398302078247,
"learning_rate": 1.7039682539682542e-06,
"loss": 1.9182,
"step": 94122
},
{
"epoch": 84.0,
"grad_norm": 4.1602935791015625,
"learning_rate": 1.6040564373897708e-06,
"loss": 1.9336,
"step": 95256
},
{
"epoch": 85.0,
"grad_norm": 4.833250522613525,
"learning_rate": 1.5040564373897709e-06,
"loss": 1.9248,
"step": 96390
},
{
"epoch": 86.0,
"grad_norm": 2.619542121887207,
"learning_rate": 1.4041446208112876e-06,
"loss": 1.9369,
"step": 97524
},
{
"epoch": 87.0,
"grad_norm": 2.0560250282287598,
"learning_rate": 1.3041446208112875e-06,
"loss": 1.9184,
"step": 98658
},
{
"epoch": 88.0,
"grad_norm": 7.1246209144592285,
"learning_rate": 1.2042328042328043e-06,
"loss": 1.9279,
"step": 99792
},
{
"epoch": 89.0,
"grad_norm": 3.3801794052124023,
"learning_rate": 1.104320987654321e-06,
"loss": 1.9329,
"step": 100926
},
{
"epoch": 90.0,
"grad_norm": 1.7479180097579956,
"learning_rate": 1.0043209876543212e-06,
"loss": 1.9211,
"step": 102060
},
{
"epoch": 91.0,
"grad_norm": 3.091583013534546,
"learning_rate": 9.044091710758378e-07,
"loss": 1.9236,
"step": 103194
},
{
"epoch": 92.0,
"grad_norm": 2.1435961723327637,
"learning_rate": 8.044091710758378e-07,
"loss": 1.9214,
"step": 104328
},
{
"epoch": 93.0,
"grad_norm": 3.5798821449279785,
"learning_rate": 7.044973544973546e-07,
"loss": 1.9142,
"step": 105462
},
{
"epoch": 94.0,
"grad_norm": 1.9765084981918335,
"learning_rate": 6.044973544973545e-07,
"loss": 1.9336,
"step": 106596
},
{
"epoch": 95.0,
"grad_norm": 1.3698736429214478,
"learning_rate": 5.045855379188714e-07,
"loss": 1.927,
"step": 107730
},
{
"epoch": 96.0,
"grad_norm": 1.0919588804244995,
"learning_rate": 4.045855379188713e-07,
"loss": 1.9213,
"step": 108864
},
{
"epoch": 97.0,
"grad_norm": 1.2900702953338623,
"learning_rate": 3.04673721340388e-07,
"loss": 1.9118,
"step": 109998
},
{
"epoch": 98.0,
"grad_norm": 1.611956238746643,
"learning_rate": 2.047619047619048e-07,
"loss": 1.9227,
"step": 111132
},
{
"epoch": 99.0,
"grad_norm": 4.691267490386963,
"learning_rate": 1.0476190476190476e-07,
"loss": 1.9204,
"step": 112266
},
{
"epoch": 100.0,
"grad_norm": 1.494165301322937,
"learning_rate": 4.8500881834215175e-09,
"loss": 1.9094,
"step": 113400
},
{
"epoch": 100.0,
"step": 113400,
"total_flos": 3.571154905966387e+18,
"train_loss": 1.224746272576549,
"train_runtime": 52658.2042,
"train_samples_per_second": 258.353,
"train_steps_per_second": 2.154
}
],
"logging_steps": 1134,
"max_steps": 113400,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 530,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.571154905966387e+18,
"train_batch_size": 60,
"trial_name": null,
"trial_params": null
}