gpt-neo-125M-magicprompt-SD / trainer_state.json
pszemraj's picture
End of training
48b5e3c
raw
history blame
10.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.992481203007518,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"learning_rate": 2.9411764705882354e-05,
"loss": 4.7948,
"step": 5
},
{
"epoch": 0.3,
"learning_rate": 5.882352941176471e-05,
"loss": 4.6565,
"step": 10
},
{
"epoch": 0.45,
"learning_rate": 8.823529411764706e-05,
"loss": 4.2672,
"step": 15
},
{
"epoch": 0.6,
"learning_rate": 9.997733473639876e-05,
"loss": 3.8103,
"step": 20
},
{
"epoch": 0.75,
"learning_rate": 9.983889919973586e-05,
"loss": 3.4219,
"step": 25
},
{
"epoch": 0.9,
"learning_rate": 9.957496810072027e-05,
"loss": 3.2189,
"step": 30
},
{
"epoch": 0.99,
"eval_loss": 3.00506591796875,
"eval_runtime": 5.8168,
"eval_samples_per_second": 76.331,
"eval_steps_per_second": 19.083,
"step": 33
},
{
"epoch": 1.06,
"learning_rate": 9.918620602428915e-05,
"loss": 3.1568,
"step": 35
},
{
"epoch": 1.21,
"learning_rate": 9.867359188282192e-05,
"loss": 2.8976,
"step": 40
},
{
"epoch": 1.36,
"learning_rate": 9.803841645121504e-05,
"loss": 2.8159,
"step": 45
},
{
"epoch": 1.51,
"learning_rate": 9.728227911667934e-05,
"loss": 2.7279,
"step": 50
},
{
"epoch": 1.66,
"learning_rate": 9.640708385144403e-05,
"loss": 2.6442,
"step": 55
},
{
"epoch": 1.81,
"learning_rate": 9.541503441850843e-05,
"loss": 2.5975,
"step": 60
},
{
"epoch": 1.96,
"learning_rate": 9.430862882251278e-05,
"loss": 2.5466,
"step": 65
},
{
"epoch": 1.99,
"eval_loss": 2.5214502811431885,
"eval_runtime": 5.9909,
"eval_samples_per_second": 74.113,
"eval_steps_per_second": 18.528,
"step": 66
},
{
"epoch": 2.12,
"learning_rate": 9.309065301970193e-05,
"loss": 2.6158,
"step": 70
},
{
"epoch": 2.27,
"learning_rate": 9.176417390281944e-05,
"loss": 2.4248,
"step": 75
},
{
"epoch": 2.42,
"learning_rate": 9.033253157859714e-05,
"loss": 2.3915,
"step": 80
},
{
"epoch": 2.57,
"learning_rate": 8.879933095728485e-05,
"loss": 2.3519,
"step": 85
},
{
"epoch": 2.72,
"learning_rate": 8.716843267539869e-05,
"loss": 2.3216,
"step": 90
},
{
"epoch": 2.87,
"learning_rate": 8.544394337454409e-05,
"loss": 2.2791,
"step": 95
},
{
"epoch": 2.99,
"eval_loss": 2.2881205081939697,
"eval_runtime": 6.1549,
"eval_samples_per_second": 72.138,
"eval_steps_per_second": 18.034,
"step": 99
},
{
"epoch": 3.03,
"learning_rate": 8.363020536079239e-05,
"loss": 2.3846,
"step": 100
},
{
"epoch": 3.18,
"learning_rate": 8.17317856706482e-05,
"loss": 2.2154,
"step": 105
},
{
"epoch": 3.33,
"learning_rate": 7.975346457114034e-05,
"loss": 2.1769,
"step": 110
},
{
"epoch": 3.48,
"learning_rate": 7.770022352299293e-05,
"loss": 2.1613,
"step": 115
},
{
"epoch": 3.63,
"learning_rate": 7.557723263718596e-05,
"loss": 2.1198,
"step": 120
},
{
"epoch": 3.78,
"learning_rate": 7.338983765648985e-05,
"loss": 2.1125,
"step": 125
},
{
"epoch": 3.93,
"learning_rate": 7.114354649475499e-05,
"loss": 2.107,
"step": 130
},
{
"epoch": 3.99,
"eval_loss": 2.1322436332702637,
"eval_runtime": 5.9353,
"eval_samples_per_second": 74.807,
"eval_steps_per_second": 18.702,
"step": 132
},
{
"epoch": 4.09,
"learning_rate": 6.884401536785045e-05,
"loss": 2.1638,
"step": 135
},
{
"epoch": 4.24,
"learning_rate": 6.649703455117458e-05,
"loss": 2.0297,
"step": 140
},
{
"epoch": 4.39,
"learning_rate": 6.41085137996006e-05,
"loss": 1.9982,
"step": 145
},
{
"epoch": 4.54,
"learning_rate": 6.168446746656973e-05,
"loss": 1.9981,
"step": 150
},
{
"epoch": 4.69,
"learning_rate": 5.9230999359802784e-05,
"loss": 1.9916,
"step": 155
},
{
"epoch": 4.84,
"learning_rate": 5.675428737176367e-05,
"loss": 1.9654,
"step": 160
},
{
"epoch": 4.99,
"learning_rate": 5.426056792357551e-05,
"loss": 1.9458,
"step": 165
},
{
"epoch": 4.99,
"eval_loss": 2.02701735496521,
"eval_runtime": 6.09,
"eval_samples_per_second": 72.906,
"eval_steps_per_second": 18.227,
"step": 165
},
{
"epoch": 5.15,
"learning_rate": 5.1756120261560446e-05,
"loss": 2.0128,
"step": 170
},
{
"epoch": 5.3,
"learning_rate": 4.924725064594447e-05,
"loss": 1.9078,
"step": 175
},
{
"epoch": 5.45,
"learning_rate": 4.674027647154037e-05,
"loss": 1.8843,
"step": 180
},
{
"epoch": 5.6,
"learning_rate": 4.4241510360393804e-05,
"loss": 1.8944,
"step": 185
},
{
"epoch": 5.75,
"learning_rate": 4.1757244266447245e-05,
"loss": 1.8812,
"step": 190
},
{
"epoch": 5.9,
"learning_rate": 3.9293733632246544e-05,
"loss": 1.8664,
"step": 195
},
{
"epoch": 5.99,
"eval_loss": 1.9580020904541016,
"eval_runtime": 5.9675,
"eval_samples_per_second": 74.403,
"eval_steps_per_second": 18.601,
"step": 198
},
{
"epoch": 6.06,
"learning_rate": 3.685718163758427e-05,
"loss": 1.9469,
"step": 200
},
{
"epoch": 6.21,
"learning_rate": 3.445372357974194e-05,
"loss": 1.8484,
"step": 205
},
{
"epoch": 6.36,
"learning_rate": 3.208941142466187e-05,
"loss": 1.8128,
"step": 210
},
{
"epoch": 6.51,
"learning_rate": 2.9770198567949546e-05,
"loss": 1.8096,
"step": 215
},
{
"epoch": 6.66,
"learning_rate": 2.7501924844078534e-05,
"loss": 1.8189,
"step": 220
},
{
"epoch": 6.81,
"learning_rate": 2.5290301821544825e-05,
"loss": 1.8172,
"step": 225
},
{
"epoch": 6.96,
"learning_rate": 2.3140898420998426e-05,
"loss": 1.8083,
"step": 230
},
{
"epoch": 6.99,
"eval_loss": 1.9176976680755615,
"eval_runtime": 6.163,
"eval_samples_per_second": 72.043,
"eval_steps_per_second": 18.011,
"step": 231
},
{
"epoch": 7.12,
"learning_rate": 2.105912689256533e-05,
"loss": 1.8905,
"step": 235
},
{
"epoch": 7.27,
"learning_rate": 1.905022918766995e-05,
"loss": 1.7894,
"step": 240
},
{
"epoch": 7.42,
"learning_rate": 1.7119263759673675e-05,
"loss": 1.7816,
"step": 245
},
{
"epoch": 7.57,
"learning_rate": 1.527109282656611e-05,
"loss": 1.7818,
"step": 250
},
{
"epoch": 7.72,
"learning_rate": 1.3510370127781635e-05,
"loss": 1.7792,
"step": 255
},
{
"epoch": 7.87,
"learning_rate": 1.184152920597028e-05,
"loss": 1.7631,
"step": 260
},
{
"epoch": 7.99,
"eval_loss": 1.896404504776001,
"eval_runtime": 6.0238,
"eval_samples_per_second": 73.708,
"eval_steps_per_second": 18.427,
"step": 264
},
{
"epoch": 8.03,
"learning_rate": 1.026877224322923e-05,
"loss": 1.8595,
"step": 265
},
{
"epoch": 8.18,
"learning_rate": 8.7960594799059e-06,
"loss": 1.7476,
"step": 270
},
{
"epoch": 8.33,
"learning_rate": 7.427099242616348e-06,
"loss": 1.7775,
"step": 275
},
{
"epoch": 8.48,
"learning_rate": 6.1653386065885165e-06,
"loss": 1.7544,
"step": 280
},
{
"epoch": 8.63,
"learning_rate": 5.0139547158427e-06,
"loss": 1.7617,
"step": 285
},
{
"epoch": 8.78,
"learning_rate": 3.975846783065662e-06,
"loss": 1.7706,
"step": 290
},
{
"epoch": 8.93,
"learning_rate": 3.0536287893223604e-06,
"loss": 1.7369,
"step": 295
},
{
"epoch": 8.99,
"eval_loss": 1.8884940147399902,
"eval_runtime": 5.932,
"eval_samples_per_second": 74.849,
"eval_steps_per_second": 18.712,
"step": 297
},
{
"epoch": 9.09,
"learning_rate": 2.249622901987963e-06,
"loss": 1.8462,
"step": 300
},
{
"epoch": 9.24,
"learning_rate": 1.5658536274738621e-06,
"loss": 1.7371,
"step": 305
},
{
"epoch": 9.39,
"learning_rate": 1.004042713471165e-06,
"loss": 1.7526,
"step": 310
},
{
"epoch": 9.54,
"learning_rate": 5.656048135480763e-07,
"loss": 1.7684,
"step": 315
},
{
"epoch": 9.69,
"learning_rate": 2.5164392501777487e-07,
"loss": 1.7508,
"step": 320
},
{
"epoch": 9.84,
"learning_rate": 6.295060904623617e-08,
"loss": 1.7465,
"step": 325
},
{
"epoch": 9.99,
"learning_rate": 0.0,
"loss": 1.766,
"step": 330
},
{
"epoch": 9.99,
"eval_loss": 1.887495756149292,
"eval_runtime": 6.0398,
"eval_samples_per_second": 73.513,
"eval_steps_per_second": 18.378,
"step": 330
},
{
"epoch": 9.99,
"step": 330,
"total_flos": 2.2217230871691264e+16,
"train_loss": 2.230022551796653,
"train_runtime": 1073.0912,
"train_samples_per_second": 79.36,
"train_steps_per_second": 0.308
}
],
"max_steps": 330,
"num_train_epochs": 10,
"total_flos": 2.2217230871691264e+16,
"trial_name": null,
"trial_params": null
}