zephyr-7b-sft-qlora / trainer_state.json
terry69's picture
Model save
00a0755 verified
raw
history blame
11.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 325,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.2686200088875347,
"learning_rate": 6.060606060606061e-06,
"loss": 0.7782,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 0.21475453367529,
"learning_rate": 3.0303030303030306e-05,
"loss": 0.7797,
"step": 5
},
{
"epoch": 0.03,
"grad_norm": 0.2247471478195798,
"learning_rate": 6.060606060606061e-05,
"loss": 0.7851,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 0.20570321309918235,
"learning_rate": 9.090909090909092e-05,
"loss": 0.7854,
"step": 15
},
{
"epoch": 0.06,
"grad_norm": 0.18031349892436885,
"learning_rate": 0.00012121212121212122,
"loss": 0.7919,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 0.24146229252382978,
"learning_rate": 0.00015151515151515152,
"loss": 0.738,
"step": 25
},
{
"epoch": 0.09,
"grad_norm": 0.3320926970306046,
"learning_rate": 0.00018181818181818183,
"loss": 0.7418,
"step": 30
},
{
"epoch": 0.11,
"grad_norm": 0.19858557544056218,
"learning_rate": 0.00019997685019798912,
"loss": 0.764,
"step": 35
},
{
"epoch": 0.12,
"grad_norm": 0.19954091433241658,
"learning_rate": 0.0001997165380022878,
"loss": 0.7607,
"step": 40
},
{
"epoch": 0.14,
"grad_norm": 0.1889847467880647,
"learning_rate": 0.000199167731989929,
"loss": 0.7949,
"step": 45
},
{
"epoch": 0.15,
"grad_norm": 0.19722641297574658,
"learning_rate": 0.0001983320199330545,
"loss": 0.7642,
"step": 50
},
{
"epoch": 0.17,
"grad_norm": 0.19369172161622056,
"learning_rate": 0.00019721181966290613,
"loss": 0.7837,
"step": 55
},
{
"epoch": 0.18,
"grad_norm": 0.18633655144200606,
"learning_rate": 0.00019581037207470382,
"loss": 0.7569,
"step": 60
},
{
"epoch": 0.2,
"grad_norm": 0.17978045786081515,
"learning_rate": 0.00019413173175128473,
"loss": 0.7615,
"step": 65
},
{
"epoch": 0.22,
"grad_norm": 0.1946967703536159,
"learning_rate": 0.00019218075523263104,
"loss": 0.7537,
"step": 70
},
{
"epoch": 0.23,
"grad_norm": 0.1697959010297178,
"learning_rate": 0.00018996308696522433,
"loss": 0.7617,
"step": 75
},
{
"epoch": 0.25,
"grad_norm": 0.19424776053519632,
"learning_rate": 0.00018748514297187648,
"loss": 0.7995,
"step": 80
},
{
"epoch": 0.26,
"grad_norm": 0.17591012762345642,
"learning_rate": 0.00018475409228928312,
"loss": 0.754,
"step": 85
},
{
"epoch": 0.28,
"grad_norm": 0.18668198691171894,
"learning_rate": 0.00018177783622700327,
"loss": 0.7754,
"step": 90
},
{
"epoch": 0.29,
"grad_norm": 0.1741754965716253,
"learning_rate": 0.00017856498550787144,
"loss": 0.7923,
"step": 95
},
{
"epoch": 0.31,
"grad_norm": 0.18855993719083636,
"learning_rate": 0.00017512483535597867,
"loss": 0.7546,
"step": 100
},
{
"epoch": 0.32,
"grad_norm": 0.16677585411695353,
"learning_rate": 0.00017146733860429612,
"loss": 0.7752,
"step": 105
},
{
"epoch": 0.34,
"grad_norm": 0.2057091607658948,
"learning_rate": 0.0001676030768997445,
"loss": 0.745,
"step": 110
},
{
"epoch": 0.35,
"grad_norm": 0.18783062116028282,
"learning_rate": 0.00016354323008901776,
"loss": 0.7499,
"step": 115
},
{
"epoch": 0.37,
"grad_norm": 0.1625675859425024,
"learning_rate": 0.00015929954387373103,
"loss": 0.7347,
"step": 120
},
{
"epoch": 0.38,
"grad_norm": 0.19803022161416892,
"learning_rate": 0.00015488429582847192,
"loss": 0.791,
"step": 125
},
{
"epoch": 0.4,
"grad_norm": 0.19340769155849927,
"learning_rate": 0.00015031025988006936,
"loss": 0.7844,
"step": 130
},
{
"epoch": 0.42,
"grad_norm": 0.18701663817803796,
"learning_rate": 0.00014559066935084588,
"loss": 0.7883,
"step": 135
},
{
"epoch": 0.43,
"grad_norm": 0.16387851112042653,
"learning_rate": 0.00014073917867277557,
"loss": 0.7477,
"step": 140
},
{
"epoch": 0.45,
"grad_norm": 0.16958293060069174,
"learning_rate": 0.0001357698238833126,
"loss": 0.7577,
"step": 145
},
{
"epoch": 0.46,
"grad_norm": 0.17304291851159886,
"learning_rate": 0.000130696982017182,
"loss": 0.7349,
"step": 150
},
{
"epoch": 0.48,
"grad_norm": 0.20233311247327937,
"learning_rate": 0.0001255353295116187,
"loss": 0.7579,
"step": 155
},
{
"epoch": 0.49,
"grad_norm": 0.18036287897941758,
"learning_rate": 0.00012029979974539234,
"loss": 0.7428,
"step": 160
},
{
"epoch": 0.51,
"grad_norm": 0.18711131061340178,
"learning_rate": 0.00011500553983446527,
"loss": 0.7913,
"step": 165
},
{
"epoch": 0.52,
"grad_norm": 0.17301090514375908,
"learning_rate": 0.00010966786680927874,
"loss": 0.7278,
"step": 170
},
{
"epoch": 0.54,
"grad_norm": 0.19697238676167705,
"learning_rate": 0.00010430222330045304,
"loss": 0.7802,
"step": 175
},
{
"epoch": 0.55,
"grad_norm": 0.1688341794712563,
"learning_rate": 9.892413286110886e-05,
"loss": 0.7521,
"step": 180
},
{
"epoch": 0.57,
"grad_norm": 0.1770041663872631,
"learning_rate": 9.354915505506839e-05,
"loss": 0.7652,
"step": 185
},
{
"epoch": 0.58,
"grad_norm": 0.17975920055364886,
"learning_rate": 8.81928404408726e-05,
"loss": 0.7423,
"step": 190
},
{
"epoch": 0.6,
"grad_norm": 0.16596750546842118,
"learning_rate": 8.287068558185225e-05,
"loss": 0.785,
"step": 195
},
{
"epoch": 0.62,
"grad_norm": 0.17922768297335867,
"learning_rate": 7.759808821241406e-05,
"loss": 0.7469,
"step": 200
},
{
"epoch": 0.63,
"grad_norm": 0.16797159131863615,
"learning_rate": 7.239030269025311e-05,
"loss": 0.7434,
"step": 205
},
{
"epoch": 0.65,
"grad_norm": 0.17963390706599044,
"learning_rate": 6.726239586337408e-05,
"loss": 0.76,
"step": 210
},
{
"epoch": 0.66,
"grad_norm": 0.1632820830493435,
"learning_rate": 6.22292034796035e-05,
"loss": 0.753,
"step": 215
},
{
"epoch": 0.68,
"grad_norm": 0.1826122995064924,
"learning_rate": 5.730528726470792e-05,
"loss": 0.7681,
"step": 220
},
{
"epoch": 0.69,
"grad_norm": 0.1613160734854796,
"learning_rate": 5.2504892793295e-05,
"loss": 0.743,
"step": 225
},
{
"epoch": 0.71,
"grad_norm": 0.18621725463530472,
"learning_rate": 4.7841908274384616e-05,
"loss": 0.7717,
"step": 230
},
{
"epoch": 0.72,
"grad_norm": 0.1744166288344059,
"learning_rate": 4.332982437088825e-05,
"loss": 0.7149,
"step": 235
},
{
"epoch": 0.74,
"grad_norm": 0.17659716102656,
"learning_rate": 3.898169516924398e-05,
"loss": 0.751,
"step": 240
},
{
"epoch": 0.75,
"grad_norm": 0.1715227056995353,
"learning_rate": 3.4810100412128747e-05,
"loss": 0.7561,
"step": 245
},
{
"epoch": 0.77,
"grad_norm": 0.17506626977459022,
"learning_rate": 3.0827109103512643e-05,
"loss": 0.7669,
"step": 250
},
{
"epoch": 0.78,
"grad_norm": 0.17815639423266896,
"learning_rate": 2.7044244591351232e-05,
"loss": 0.7589,
"step": 255
},
{
"epoch": 0.8,
"grad_norm": 0.1636846820858173,
"learning_rate": 2.3472451228937253e-05,
"loss": 0.7393,
"step": 260
},
{
"epoch": 0.82,
"grad_norm": 0.17100053615356214,
"learning_rate": 2.0122062711363532e-05,
"loss": 0.7764,
"step": 265
},
{
"epoch": 0.83,
"grad_norm": 0.17768328277125636,
"learning_rate": 1.7002772178705716e-05,
"loss": 0.7331,
"step": 270
},
{
"epoch": 0.85,
"grad_norm": 0.16988419995617995,
"learning_rate": 1.4123604172419713e-05,
"loss": 0.7719,
"step": 275
},
{
"epoch": 0.86,
"grad_norm": 0.18468128236342862,
"learning_rate": 1.149288852608743e-05,
"loss": 0.7558,
"step": 280
},
{
"epoch": 0.88,
"grad_norm": 0.16594356186499187,
"learning_rate": 9.118236266049707e-06,
"loss": 0.7309,
"step": 285
},
{
"epoch": 0.89,
"grad_norm": 0.1726455301629271,
"learning_rate": 7.0065175916482095e-06,
"loss": 0.76,
"step": 290
},
{
"epoch": 0.91,
"grad_norm": 0.1521864857478625,
"learning_rate": 5.163841998782837e-06,
"loss": 0.7642,
"step": 295
},
{
"epoch": 0.92,
"grad_norm": 0.1629366183790264,
"learning_rate": 3.595540604290437e-06,
"loss": 0.7578,
"step": 300
},
{
"epoch": 0.94,
"grad_norm": 0.17095198936108816,
"learning_rate": 2.30615072228183e-06,
"loss": 0.7332,
"step": 305
},
{
"epoch": 0.95,
"grad_norm": 0.17577071939294034,
"learning_rate": 1.2994027370611173e-06,
"loss": 0.7624,
"step": 310
},
{
"epoch": 0.97,
"grad_norm": 0.15277306413297514,
"learning_rate": 5.782093106048159e-07,
"loss": 0.7367,
"step": 315
},
{
"epoch": 0.98,
"grad_norm": 0.17579866149327247,
"learning_rate": 1.446569558255395e-07,
"loss": 0.7431,
"step": 320
},
{
"epoch": 1.0,
"grad_norm": 0.16982851480438013,
"learning_rate": 0.0,
"loss": 0.749,
"step": 325
},
{
"epoch": 1.0,
"eval_loss": 0.7775599956512451,
"eval_runtime": 47.5812,
"eval_samples_per_second": 4.855,
"eval_steps_per_second": 0.168,
"step": 325
},
{
"epoch": 1.0,
"step": 325,
"total_flos": 4962652747988992.0,
"train_loss": 0.7600007471671472,
"train_runtime": 11976.0052,
"train_samples_per_second": 1.736,
"train_steps_per_second": 0.027
}
],
"logging_steps": 5,
"max_steps": 325,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 4962652747988992.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}