heboya8's picture
Add files using upload-large-folder tool
994029b verified
raw
history blame
5.33 kB
{
"best_global_step": 800,
"best_metric": 0.5450772643089294,
"best_model_checkpoint": "Llama-3.2-1B-it-Medical-LoRA/checkpoint-800",
"epoch": 1.8131370328425822,
"eval_steps": 100,
"global_step": 800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11325028312570781,
"grad_norm": 0.6456167697906494,
"learning_rate": 0.00019075425790754258,
"loss": 0.643,
"step": 50
},
{
"epoch": 0.22650056625141562,
"grad_norm": 1.0792498588562012,
"learning_rate": 0.0001664233576642336,
"loss": 0.6465,
"step": 100
},
{
"epoch": 0.22650056625141562,
"eval_loss": 0.5826964378356934,
"eval_runtime": 394.3597,
"eval_samples_per_second": 3.979,
"eval_steps_per_second": 0.5,
"step": 100
},
{
"epoch": 0.33975084937712347,
"grad_norm": 0.5628879070281982,
"learning_rate": 0.0001420924574209246,
"loss": 0.5815,
"step": 150
},
{
"epoch": 0.45300113250283125,
"grad_norm": 0.5532649755477905,
"learning_rate": 0.00011776155717761557,
"loss": 0.5435,
"step": 200
},
{
"epoch": 0.45300113250283125,
"eval_loss": 0.5700864791870117,
"eval_runtime": 394.4221,
"eval_samples_per_second": 3.978,
"eval_steps_per_second": 0.499,
"step": 200
},
{
"epoch": 0.5662514156285391,
"grad_norm": 0.7321934700012207,
"learning_rate": 9.343065693430657e-05,
"loss": 0.5816,
"step": 250
},
{
"epoch": 0.6795016987542469,
"grad_norm": 0.43280959129333496,
"learning_rate": 6.909975669099758e-05,
"loss": 0.5611,
"step": 300
},
{
"epoch": 0.6795016987542469,
"eval_loss": 0.5585977435112,
"eval_runtime": 393.6528,
"eval_samples_per_second": 3.986,
"eval_steps_per_second": 0.5,
"step": 300
},
{
"epoch": 0.7927519818799547,
"grad_norm": 0.5519748330116272,
"learning_rate": 4.476885644768857e-05,
"loss": 0.5964,
"step": 350
},
{
"epoch": 0.9060022650056625,
"grad_norm": 0.43155646324157715,
"learning_rate": 2.0437956204379563e-05,
"loss": 0.5656,
"step": 400
},
{
"epoch": 0.9060022650056625,
"eval_loss": 0.5522705912590027,
"eval_runtime": 393.6183,
"eval_samples_per_second": 3.986,
"eval_steps_per_second": 0.5,
"step": 400
},
{
"epoch": 1.0203850509626273,
"grad_norm": 0.5300919413566589,
"learning_rate": 0.0001614712643678161,
"loss": 0.5,
"step": 450
},
{
"epoch": 1.1336353340883352,
"grad_norm": 0.6625114679336548,
"learning_rate": 0.00015687356321839082,
"loss": 0.4964,
"step": 500
},
{
"epoch": 1.1336353340883352,
"eval_loss": 0.5636059045791626,
"eval_runtime": 394.3412,
"eval_samples_per_second": 3.979,
"eval_steps_per_second": 0.5,
"step": 500
},
{
"epoch": 1.246885617214043,
"grad_norm": 0.635853111743927,
"learning_rate": 0.0001522758620689655,
"loss": 0.5245,
"step": 550
},
{
"epoch": 1.3601359003397508,
"grad_norm": 0.6611766219139099,
"learning_rate": 0.00014767816091954024,
"loss": 0.5212,
"step": 600
},
{
"epoch": 1.3601359003397508,
"eval_loss": 0.5590068101882935,
"eval_runtime": 394.5384,
"eval_samples_per_second": 3.977,
"eval_steps_per_second": 0.499,
"step": 600
},
{
"epoch": 1.4733861834654587,
"grad_norm": 0.5332358479499817,
"learning_rate": 0.00014308045977011496,
"loss": 0.5143,
"step": 650
},
{
"epoch": 1.5866364665911665,
"grad_norm": 0.7008891701698303,
"learning_rate": 0.00013848275862068967,
"loss": 0.506,
"step": 700
},
{
"epoch": 1.5866364665911665,
"eval_loss": 0.5510138869285583,
"eval_runtime": 394.522,
"eval_samples_per_second": 3.977,
"eval_steps_per_second": 0.499,
"step": 700
},
{
"epoch": 1.6998867497168741,
"grad_norm": 0.6186133623123169,
"learning_rate": 0.00013388505747126436,
"loss": 0.5094,
"step": 750
},
{
"epoch": 1.8131370328425822,
"grad_norm": 0.5614038109779358,
"learning_rate": 0.00012928735632183907,
"loss": 0.5112,
"step": 800
},
{
"epoch": 1.8131370328425822,
"eval_loss": 0.5450772643089294,
"eval_runtime": 394.4384,
"eval_samples_per_second": 3.978,
"eval_steps_per_second": 0.499,
"step": 800
}
],
"logging_steps": 50,
"max_steps": 2205,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.048744699120845e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}