|
{ |
|
"best_metric": 82.67898383371825, |
|
"best_model_checkpoint": "whisper-small-chinese/checkpoint-1000", |
|
"epoch": 192.30769230769232, |
|
"eval_steps": 1000, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 19.23076923076923, |
|
"grad_norm": 1.2309635877609253, |
|
"learning_rate": 1.7395e-05, |
|
"loss": 0.2754, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 38.46153846153846, |
|
"grad_norm": 0.0204775370657444, |
|
"learning_rate": 1.556722222222222e-05, |
|
"loss": 0.006, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 38.46153846153846, |
|
"eval_loss": 0.9139542579650879, |
|
"eval_runtime": 11.2242, |
|
"eval_samples_per_second": 8.107, |
|
"eval_steps_per_second": 0.356, |
|
"eval_wer": 82.67898383371825, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 57.69230769230769, |
|
"grad_norm": 0.007038629613816738, |
|
"learning_rate": 1.3622777777777778e-05, |
|
"loss": 0.0024, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 76.92307692307692, |
|
"grad_norm": 0.004221443086862564, |
|
"learning_rate": 1.1678333333333333e-05, |
|
"loss": 0.0003, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 76.92307692307692, |
|
"eval_loss": 0.9765084385871887, |
|
"eval_runtime": 11.0976, |
|
"eval_samples_per_second": 8.2, |
|
"eval_steps_per_second": 0.36, |
|
"eval_wer": 84.52655889145497, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 96.15384615384616, |
|
"grad_norm": 0.0029376207385212183, |
|
"learning_rate": 9.733888888888888e-06, |
|
"loss": 0.0002, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 115.38461538461539, |
|
"grad_norm": 0.0019433604320511222, |
|
"learning_rate": 7.789444444444444e-06, |
|
"loss": 0.0001, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 115.38461538461539, |
|
"eval_loss": 1.022544264793396, |
|
"eval_runtime": 11.1249, |
|
"eval_samples_per_second": 8.18, |
|
"eval_steps_per_second": 0.36, |
|
"eval_wer": 84.06466512702079, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 134.6153846153846, |
|
"grad_norm": 0.001670741941779852, |
|
"learning_rate": 5.845e-06, |
|
"loss": 0.0001, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 153.84615384615384, |
|
"grad_norm": 0.0014259717427194118, |
|
"learning_rate": 3.9005555555555556e-06, |
|
"loss": 0.0001, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 153.84615384615384, |
|
"eval_loss": 1.0503652095794678, |
|
"eval_runtime": 11.0802, |
|
"eval_samples_per_second": 8.213, |
|
"eval_steps_per_second": 0.361, |
|
"eval_wer": 83.37182448036951, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 173.07692307692307, |
|
"grad_norm": 0.0013439098838716745, |
|
"learning_rate": 1.956111111111111e-06, |
|
"loss": 0.0001, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 192.30769230769232, |
|
"grad_norm": 0.0017614001408219337, |
|
"learning_rate": 1.1666666666666665e-08, |
|
"loss": 0.0001, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 192.30769230769232, |
|
"eval_loss": 1.0611838102340698, |
|
"eval_runtime": 11.1867, |
|
"eval_samples_per_second": 8.135, |
|
"eval_steps_per_second": 0.358, |
|
"eval_wer": 83.37182448036951, |
|
"step": 5000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 193, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.876182050504704e+19, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|