|
{ |
|
"best_global_step": 400, |
|
"best_metric": 0.9678384954963574, |
|
"best_model_checkpoint": "BTX24/hiera-finetuned-busi/checkpoint-400", |
|
"epoch": 48.0, |
|
"eval_steps": 100, |
|
"global_step": 480, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 10.227621078491211, |
|
"learning_rate": 1.9999735576321776e-05, |
|
"loss": 0.6534, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 8.809008598327637, |
|
"learning_rate": 1.9346189253489888e-05, |
|
"loss": 0.2276, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9615384615384616, |
|
"eval_f1": 0.9610639894395369, |
|
"eval_loss": 0.16655339300632477, |
|
"eval_precision": 0.964046822742475, |
|
"eval_recall": 0.9615384615384616, |
|
"eval_runtime": 2.0085, |
|
"eval_samples_per_second": 77.671, |
|
"eval_steps_per_second": 9.958, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 5.131218910217285, |
|
"learning_rate": 1.7470250712409963e-05, |
|
"loss": 0.1848, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 27.55030059814453, |
|
"learning_rate": 1.4617486132350343e-05, |
|
"loss": 0.1447, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9358974358974359, |
|
"eval_f1": 0.9351834928758006, |
|
"eval_loss": 0.18257668614387512, |
|
"eval_precision": 0.9425747863247864, |
|
"eval_recall": 0.9358974358974359, |
|
"eval_runtime": 1.8774, |
|
"eval_samples_per_second": 83.092, |
|
"eval_steps_per_second": 10.653, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"grad_norm": 4.577565670013428, |
|
"learning_rate": 1.1160929141252303e-05, |
|
"loss": 0.1148, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"grad_norm": 7.3700270652771, |
|
"learning_rate": 7.623141076738271e-06, |
|
"loss": 0.0882, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy": 0.9615384615384616, |
|
"eval_f1": 0.9611905417400026, |
|
"eval_loss": 0.16116316616535187, |
|
"eval_precision": 0.9612915234951416, |
|
"eval_recall": 0.9615384615384616, |
|
"eval_runtime": 1.8625, |
|
"eval_samples_per_second": 83.757, |
|
"eval_steps_per_second": 10.738, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"grad_norm": 1.2752153873443604, |
|
"learning_rate": 4.323957196315714e-06, |
|
"loss": 0.0728, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"grad_norm": 5.241044044494629, |
|
"learning_rate": 1.7669848724331984e-06, |
|
"loss": 0.0606, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy": 0.967948717948718, |
|
"eval_f1": 0.9678384954963574, |
|
"eval_loss": 0.1380443274974823, |
|
"eval_precision": 0.9678410301752478, |
|
"eval_recall": 0.967948717948718, |
|
"eval_runtime": 1.8643, |
|
"eval_samples_per_second": 83.679, |
|
"eval_steps_per_second": 10.728, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"grad_norm": 13.990592956542969, |
|
"learning_rate": 2.865793018673857e-07, |
|
"loss": 0.0628, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"step": 480, |
|
"total_flos": 1.3730970154696704e+18, |
|
"train_loss": 0.17117963035901387, |
|
"train_runtime": 645.3789, |
|
"train_samples_per_second": 46.41, |
|
"train_steps_per_second": 0.744 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 480, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 48, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3730970154696704e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|