lora-grade-elementary-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
2fda84a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.788732394366197,
"eval_steps": 54,
"global_step": 594,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 1.9276669025421143,
"learning_rate": 1.267605633802817e-05,
"loss": 1.2779,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 1.222019910812378,
"eval_runtime": 54.1446,
"eval_samples_per_second": 4.156,
"eval_steps_per_second": 1.053,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 2.256397008895874,
"learning_rate": 2.535211267605634e-05,
"loss": 1.0801,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 0.9641969203948975,
"eval_runtime": 54.1532,
"eval_samples_per_second": 4.155,
"eval_steps_per_second": 1.053,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.1346447467803955,
"learning_rate": 3.802816901408451e-05,
"loss": 0.9403,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 0.9075038433074951,
"eval_runtime": 54.1623,
"eval_samples_per_second": 4.154,
"eval_steps_per_second": 1.052,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 3.086144208908081,
"learning_rate": 4.992175273865415e-05,
"loss": 0.9201,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 0.888337254524231,
"eval_runtime": 54.1657,
"eval_samples_per_second": 4.154,
"eval_steps_per_second": 1.052,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 2.150139331817627,
"learning_rate": 4.85133020344288e-05,
"loss": 0.8568,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 0.8800536394119263,
"eval_runtime": 54.1792,
"eval_samples_per_second": 4.153,
"eval_steps_per_second": 1.052,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 2.6009371280670166,
"learning_rate": 4.710485133020345e-05,
"loss": 0.8316,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 0.8666479587554932,
"eval_runtime": 54.1596,
"eval_samples_per_second": 4.154,
"eval_steps_per_second": 1.052,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 3.0447170734405518,
"learning_rate": 4.569640062597809e-05,
"loss": 0.863,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 0.8673966526985168,
"eval_runtime": 54.1837,
"eval_samples_per_second": 4.153,
"eval_steps_per_second": 1.052,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 3.341876745223999,
"learning_rate": 4.428794992175274e-05,
"loss": 0.8366,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 0.8631945848464966,
"eval_runtime": 54.1757,
"eval_samples_per_second": 4.153,
"eval_steps_per_second": 1.052,
"step": 432
},
{
"epoch": 2.28,
"grad_norm": 2.3844501972198486,
"learning_rate": 4.287949921752739e-05,
"loss": 0.7709,
"step": 486
},
{
"epoch": 2.28,
"eval_loss": 0.8624871373176575,
"eval_runtime": 54.1608,
"eval_samples_per_second": 4.154,
"eval_steps_per_second": 1.052,
"step": 486
},
{
"epoch": 2.54,
"grad_norm": 3.0776872634887695,
"learning_rate": 4.1471048513302035e-05,
"loss": 0.7905,
"step": 540
},
{
"epoch": 2.54,
"eval_loss": 0.864746630191803,
"eval_runtime": 54.1833,
"eval_samples_per_second": 4.153,
"eval_steps_per_second": 1.052,
"step": 540
},
{
"epoch": 2.79,
"grad_norm": 2.175617218017578,
"learning_rate": 4.0062597809076686e-05,
"loss": 0.7826,
"step": 594
},
{
"epoch": 2.79,
"eval_loss": 0.8620367050170898,
"eval_runtime": 54.1597,
"eval_samples_per_second": 4.154,
"eval_steps_per_second": 1.052,
"step": 594
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 4.336297242697728e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}