|
{
|
|
"best_metric": 0.9535481125844637,
|
|
"best_model_checkpoint": "microsoft-swinv2-small-patch4-window16-256-finetuned-xblockm\\checkpoint-160",
|
|
"epoch": 7.901234567901234,
|
|
"eval_steps": 500,
|
|
"global_step": 160,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.49382716049382713,
|
|
"grad_norm": 1.6179720163345337,
|
|
"learning_rate": 1.5503875968992248e-05,
|
|
"loss": 0.6579,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.9876543209876543,
|
|
"grad_norm": 2.4816274642944336,
|
|
"learning_rate": 3.1007751937984497e-05,
|
|
"loss": 0.4357,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.9876543209876543,
|
|
"eval_loss": 0.2544282078742981,
|
|
"eval_roc_auc": 0.7784430091957527,
|
|
"eval_runtime": 57.6433,
|
|
"eval_samples_per_second": 5.621,
|
|
"eval_steps_per_second": 0.364,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.4814814814814814,
|
|
"grad_norm": 1.2008687257766724,
|
|
"learning_rate": 4.651162790697675e-05,
|
|
"loss": 0.2163,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.9753086419753085,
|
|
"grad_norm": 1.6955859661102295,
|
|
"learning_rate": 6.201550387596899e-05,
|
|
"loss": 0.2027,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 1.9753086419753085,
|
|
"eval_loss": 0.20164352655410767,
|
|
"eval_roc_auc": 0.8430768302596149,
|
|
"eval_runtime": 62.1569,
|
|
"eval_samples_per_second": 5.213,
|
|
"eval_steps_per_second": 0.338,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 2.4691358024691357,
|
|
"grad_norm": 0.869544267654419,
|
|
"learning_rate": 7.751937984496124e-05,
|
|
"loss": 0.1889,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.962962962962963,
|
|
"grad_norm": 0.8159006834030151,
|
|
"learning_rate": 9.30232558139535e-05,
|
|
"loss": 0.1743,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 2.962962962962963,
|
|
"eval_loss": 0.17011196911334991,
|
|
"eval_roc_auc": 0.8911886208911244,
|
|
"eval_runtime": 79.4085,
|
|
"eval_samples_per_second": 4.08,
|
|
"eval_steps_per_second": 0.264,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 3.45679012345679,
|
|
"grad_norm": 0.8456718921661377,
|
|
"learning_rate": 0.00010852713178294573,
|
|
"loss": 0.1567,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 3.950617283950617,
|
|
"grad_norm": 0.9450457692146301,
|
|
"learning_rate": 0.00012403100775193799,
|
|
"loss": 0.1625,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_loss": 0.16766950488090515,
|
|
"eval_roc_auc": 0.9083226959813038,
|
|
"eval_runtime": 84.3656,
|
|
"eval_samples_per_second": 3.84,
|
|
"eval_steps_per_second": 0.249,
|
|
"step": 81
|
|
},
|
|
{
|
|
"epoch": 4.444444444444445,
|
|
"grad_norm": 0.6654064655303955,
|
|
"learning_rate": 0.00013953488372093025,
|
|
"loss": 0.1553,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 4.938271604938271,
|
|
"grad_norm": 0.7665498852729797,
|
|
"learning_rate": 0.0001550387596899225,
|
|
"loss": 0.1321,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 4.987654320987654,
|
|
"eval_loss": 0.144697904586792,
|
|
"eval_roc_auc": 0.9246192780572068,
|
|
"eval_runtime": 39.0205,
|
|
"eval_samples_per_second": 8.303,
|
|
"eval_steps_per_second": 0.538,
|
|
"step": 101
|
|
},
|
|
{
|
|
"epoch": 5.432098765432099,
|
|
"grad_norm": 0.6621432900428772,
|
|
"learning_rate": 0.00017054263565891473,
|
|
"loss": 0.1232,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 5.925925925925926,
|
|
"grad_norm": 0.7424349188804626,
|
|
"learning_rate": 0.000186046511627907,
|
|
"loss": 0.1155,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 5.9753086419753085,
|
|
"eval_loss": 0.14180859923362732,
|
|
"eval_roc_auc": 0.9311112381242697,
|
|
"eval_runtime": 132.4426,
|
|
"eval_samples_per_second": 2.446,
|
|
"eval_steps_per_second": 0.159,
|
|
"step": 121
|
|
},
|
|
{
|
|
"epoch": 6.419753086419753,
|
|
"grad_norm": 0.6604060530662537,
|
|
"learning_rate": 0.00019999963640522314,
|
|
"loss": 0.1009,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 6.91358024691358,
|
|
"grad_norm": 0.7176498174667358,
|
|
"learning_rate": 0.0001999560082311786,
|
|
"loss": 0.0959,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 6.962962962962963,
|
|
"eval_loss": 0.13807977735996246,
|
|
"eval_roc_auc": 0.9460352842554489,
|
|
"eval_runtime": 148.6943,
|
|
"eval_samples_per_second": 2.179,
|
|
"eval_steps_per_second": 0.141,
|
|
"step": 141
|
|
},
|
|
{
|
|
"epoch": 7.407407407407407,
|
|
"grad_norm": 0.7749078869819641,
|
|
"learning_rate": 0.0001998396974527365,
|
|
"loss": 0.0748,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 7.901234567901234,
|
|
"grad_norm": 0.5961336493492126,
|
|
"learning_rate": 0.00019965078864480587,
|
|
"loss": 0.0788,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 7.901234567901234,
|
|
"eval_loss": 0.12518148124217987,
|
|
"eval_roc_auc": 0.9535481125844637,
|
|
"eval_runtime": 101.3985,
|
|
"eval_samples_per_second": 3.195,
|
|
"eval_steps_per_second": 0.207,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 7.901234567901234,
|
|
"step": 160,
|
|
"total_flos": 4.522715256123187e+17,
|
|
"train_loss": 0.19196589663624763,
|
|
"train_runtime": 9159.5769,
|
|
"train_samples_per_second": 1.13,
|
|
"train_steps_per_second": 0.017
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 160,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 8,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4.522715256123187e+17,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|