pythia-70m-lmsys-prompts / trainer_state.json
agentlans's picture
Upload 12 files
5000b17 verified
{
"best_global_step": 24815,
"best_metric": 2.6661746501922607,
"best_model_checkpoint": "lmsys-prompts/checkpoint-24815",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 24815,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10074551682450131,
"grad_norm": 216.5968017578125,
"learning_rate": 4.832426623681913e-05,
"loss": 5.5953,
"step": 500
},
{
"epoch": 0.20149103364900262,
"grad_norm": 90.29191589355469,
"learning_rate": 4.664517428974411e-05,
"loss": 4.1114,
"step": 1000
},
{
"epoch": 0.30223655047350395,
"grad_norm": 27.48844337463379,
"learning_rate": 4.496608234266908e-05,
"loss": 3.9607,
"step": 1500
},
{
"epoch": 0.40298206729800523,
"grad_norm": 29.192981719970703,
"learning_rate": 4.328699039559406e-05,
"loss": 3.7888,
"step": 2000
},
{
"epoch": 0.5037275841225065,
"grad_norm": 27.68057632446289,
"learning_rate": 4.160789844851904e-05,
"loss": 3.6636,
"step": 2500
},
{
"epoch": 0.6044731009470079,
"grad_norm": 26.73339080810547,
"learning_rate": 3.992880650144402e-05,
"loss": 3.5414,
"step": 3000
},
{
"epoch": 0.7052186177715092,
"grad_norm": 13.406410217285156,
"learning_rate": 3.8249714554368995e-05,
"loss": 3.467,
"step": 3500
},
{
"epoch": 0.8059641345960105,
"grad_norm": 17.33591079711914,
"learning_rate": 3.657062260729397e-05,
"loss": 3.3742,
"step": 4000
},
{
"epoch": 0.9067096514205117,
"grad_norm": 11.469970703125,
"learning_rate": 3.4891530660218955e-05,
"loss": 3.3254,
"step": 4500
},
{
"epoch": 1.0,
"eval_accuracy": 0.42128131925101936,
"eval_loss": 3.2208595275878906,
"eval_runtime": 16.4691,
"eval_samples_per_second": 133.037,
"eval_steps_per_second": 16.637,
"step": 4963
},
{
"epoch": 1.007455168245013,
"grad_norm": 10.576227188110352,
"learning_rate": 3.321243871314393e-05,
"loss": 3.2632,
"step": 5000
},
{
"epoch": 1.1082006850695145,
"grad_norm": 10.408350944519043,
"learning_rate": 3.153334676606891e-05,
"loss": 3.177,
"step": 5500
},
{
"epoch": 1.2089462018940158,
"grad_norm": 8.68959903717041,
"learning_rate": 2.9854254818993887e-05,
"loss": 3.1321,
"step": 6000
},
{
"epoch": 1.309691718718517,
"grad_norm": 10.844682693481445,
"learning_rate": 2.817516287191887e-05,
"loss": 3.0864,
"step": 6500
},
{
"epoch": 1.4104372355430184,
"grad_norm": 11.559004783630371,
"learning_rate": 2.6496070924843847e-05,
"loss": 3.0802,
"step": 7000
},
{
"epoch": 1.5111827523675196,
"grad_norm": 11.332049369812012,
"learning_rate": 2.4816978977768823e-05,
"loss": 3.021,
"step": 7500
},
{
"epoch": 1.611928269192021,
"grad_norm": 8.480116844177246,
"learning_rate": 2.3137887030693803e-05,
"loss": 3.0068,
"step": 8000
},
{
"epoch": 1.7126737860165222,
"grad_norm": 11.450610160827637,
"learning_rate": 2.145879508361878e-05,
"loss": 2.9865,
"step": 8500
},
{
"epoch": 1.8134193028410235,
"grad_norm": 7.126410007476807,
"learning_rate": 1.977970313654376e-05,
"loss": 2.946,
"step": 9000
},
{
"epoch": 1.9141648196655248,
"grad_norm": 9.249116897583008,
"learning_rate": 1.8100611189468735e-05,
"loss": 2.9236,
"step": 9500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4685653073780457,
"eval_loss": 2.902540445327759,
"eval_runtime": 17.401,
"eval_samples_per_second": 125.912,
"eval_steps_per_second": 15.746,
"step": 9926
},
{
"epoch": 2.014910336490026,
"grad_norm": 7.414788246154785,
"learning_rate": 1.6421519242393715e-05,
"loss": 2.9077,
"step": 10000
},
{
"epoch": 2.1156558533145273,
"grad_norm": 10.728218078613281,
"learning_rate": 1.4742427295318693e-05,
"loss": 2.8403,
"step": 10500
},
{
"epoch": 2.216401370139029,
"grad_norm": 9.297683715820312,
"learning_rate": 1.3063335348243671e-05,
"loss": 2.8482,
"step": 11000
},
{
"epoch": 2.3171468869635303,
"grad_norm": 6.445867538452148,
"learning_rate": 1.1384243401168649e-05,
"loss": 2.8066,
"step": 11500
},
{
"epoch": 2.4178924037880316,
"grad_norm": 8.223403930664062,
"learning_rate": 9.705151454093627e-06,
"loss": 2.8088,
"step": 12000
},
{
"epoch": 2.518637920612533,
"grad_norm": 6.288245677947998,
"learning_rate": 8.026059507018605e-06,
"loss": 2.7869,
"step": 12500
},
{
"epoch": 2.619383437437034,
"grad_norm": 7.528985977172852,
"learning_rate": 6.346967559943583e-06,
"loss": 2.7774,
"step": 13000
},
{
"epoch": 2.7201289542615354,
"grad_norm": 8.420631408691406,
"learning_rate": 4.667875612868561e-06,
"loss": 2.7582,
"step": 13500
},
{
"epoch": 2.8208744710860367,
"grad_norm": 7.883354187011719,
"learning_rate": 2.988783665793539e-06,
"loss": 2.7613,
"step": 14000
},
{
"epoch": 2.921619987910538,
"grad_norm": 7.071943759918213,
"learning_rate": 1.309691718718517e-06,
"loss": 2.7526,
"step": 14500
},
{
"epoch": 3.0,
"eval_accuracy": 0.48612581550848066,
"eval_loss": 2.79274320602417,
"eval_runtime": 16.6412,
"eval_samples_per_second": 131.661,
"eval_steps_per_second": 16.465,
"step": 14889
},
{
"epoch": 3.0223655047350393,
"grad_norm": 7.703913688659668,
"learning_rate": 1.97783598629861e-05,
"loss": 2.7412,
"step": 15000
},
{
"epoch": 3.1231110215595406,
"grad_norm": 9.797307968139648,
"learning_rate": 1.8770904694741083e-05,
"loss": 2.7521,
"step": 15500
},
{
"epoch": 3.223856538384042,
"grad_norm": 11.222009658813477,
"learning_rate": 1.776344952649607e-05,
"loss": 2.7435,
"step": 16000
},
{
"epoch": 3.324602055208543,
"grad_norm": 9.812458038330078,
"learning_rate": 1.675599435825106e-05,
"loss": 2.7507,
"step": 16500
},
{
"epoch": 3.4253475720330444,
"grad_norm": 7.502840995788574,
"learning_rate": 1.5748539190006045e-05,
"loss": 2.7181,
"step": 17000
},
{
"epoch": 3.5260930888575457,
"grad_norm": 6.958070278167725,
"learning_rate": 1.4741084021761032e-05,
"loss": 2.7264,
"step": 17500
},
{
"epoch": 3.626838605682047,
"grad_norm": 7.174421787261963,
"learning_rate": 1.373362885351602e-05,
"loss": 2.7289,
"step": 18000
},
{
"epoch": 3.7275841225065482,
"grad_norm": 5.985280990600586,
"learning_rate": 1.2726173685271006e-05,
"loss": 2.6752,
"step": 18500
},
{
"epoch": 3.8283296393310495,
"grad_norm": 7.28540563583374,
"learning_rate": 1.1718718517025991e-05,
"loss": 2.6896,
"step": 19000
},
{
"epoch": 3.929075156155551,
"grad_norm": 9.331765174865723,
"learning_rate": 1.071126334878098e-05,
"loss": 2.683,
"step": 19500
},
{
"epoch": 4.0,
"eval_accuracy": 0.4999047467356238,
"eval_loss": 2.7130751609802246,
"eval_runtime": 16.5004,
"eval_samples_per_second": 132.785,
"eval_steps_per_second": 16.606,
"step": 19852
},
{
"epoch": 4.029820672980052,
"grad_norm": 8.58707046508789,
"learning_rate": 9.703808180535967e-06,
"loss": 2.6525,
"step": 20000
},
{
"epoch": 4.130566189804553,
"grad_norm": 7.965948581695557,
"learning_rate": 8.696353012290953e-06,
"loss": 2.6099,
"step": 20500
},
{
"epoch": 4.231311706629055,
"grad_norm": 5.951071739196777,
"learning_rate": 7.68889784404594e-06,
"loss": 2.633,
"step": 21000
},
{
"epoch": 4.332057223453556,
"grad_norm": 6.427402973175049,
"learning_rate": 6.681442675800926e-06,
"loss": 2.6239,
"step": 21500
},
{
"epoch": 4.432802740278058,
"grad_norm": 6.754240989685059,
"learning_rate": 5.673987507555914e-06,
"loss": 2.6033,
"step": 22000
},
{
"epoch": 4.533548257102559,
"grad_norm": 6.711804389953613,
"learning_rate": 4.666532339310901e-06,
"loss": 2.6101,
"step": 22500
},
{
"epoch": 4.634293773927061,
"grad_norm": 7.764159202575684,
"learning_rate": 3.6590771710658874e-06,
"loss": 2.6117,
"step": 23000
},
{
"epoch": 4.735039290751562,
"grad_norm": 8.959000587463379,
"learning_rate": 2.6516220028208746e-06,
"loss": 2.6101,
"step": 23500
},
{
"epoch": 4.835784807576063,
"grad_norm": 6.05490255355835,
"learning_rate": 1.6441668345758613e-06,
"loss": 2.5942,
"step": 24000
},
{
"epoch": 4.9365303244005645,
"grad_norm": 6.731525897979736,
"learning_rate": 6.367116663308483e-07,
"loss": 2.6099,
"step": 24500
},
{
"epoch": 5.0,
"eval_accuracy": 0.5068343659501033,
"eval_loss": 2.6661746501922607,
"eval_runtime": 16.4299,
"eval_samples_per_second": 133.355,
"eval_steps_per_second": 16.677,
"step": 24815
},
{
"epoch": 5.0,
"step": 24815,
"total_flos": 5.448549676351488e+16,
"train_loss": 1.0647943758421696,
"train_runtime": 2230.6964,
"train_samples_per_second": 88.995,
"train_steps_per_second": 11.124
}
],
"logging_steps": 500,
"max_steps": 24815,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.448549676351488e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}