|
{ |
|
"best_global_step": 4000, |
|
"best_metric": 61.04808211777417, |
|
"best_model_checkpoint": "./whisper-base-en/checkpoint-4000", |
|
"epoch": 40.0, |
|
"eval_steps": 500, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 8.189814567565918, |
|
"learning_rate": 9.9e-06, |
|
"loss": 1.1053, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.7731186151504517, |
|
"eval_runtime": 124.1426, |
|
"eval_samples_per_second": 1.603, |
|
"eval_steps_per_second": 1.603, |
|
"eval_wer": 69.74608319827121, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 6.20778751373291, |
|
"learning_rate": 9.658620689655173e-06, |
|
"loss": 0.1669, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.890454113483429, |
|
"eval_runtime": 117.5449, |
|
"eval_samples_per_second": 1.693, |
|
"eval_steps_per_second": 1.693, |
|
"eval_wer": 65.09994597514857, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 2.8285183906555176, |
|
"learning_rate": 9.313793103448276e-06, |
|
"loss": 0.0208, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 1.1541786193847656, |
|
"eval_runtime": 117.6046, |
|
"eval_samples_per_second": 1.692, |
|
"eval_steps_per_second": 1.692, |
|
"eval_wer": 62.452728254997304, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.5594582557678223, |
|
"learning_rate": 8.96896551724138e-06, |
|
"loss": 0.0045, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 1.191721796989441, |
|
"eval_runtime": 118.3942, |
|
"eval_samples_per_second": 1.681, |
|
"eval_steps_per_second": 1.681, |
|
"eval_wer": 62.452728254997304, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"grad_norm": 0.05242369323968887, |
|
"learning_rate": 8.624137931034482e-06, |
|
"loss": 0.0019, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 1.2775946855545044, |
|
"eval_runtime": 117.3796, |
|
"eval_samples_per_second": 1.695, |
|
"eval_steps_per_second": 1.695, |
|
"eval_wer": 61.85845488924906, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"grad_norm": 10.651979446411133, |
|
"learning_rate": 8.279310344827587e-06, |
|
"loss": 0.0029, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 1.2370944023132324, |
|
"eval_runtime": 119.3897, |
|
"eval_samples_per_second": 1.667, |
|
"eval_steps_per_second": 1.667, |
|
"eval_wer": 65.53214478660183, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"grad_norm": 1.879603385925293, |
|
"learning_rate": 7.934482758620691e-06, |
|
"loss": 0.0052, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_loss": 1.2851910591125488, |
|
"eval_runtime": 118.7346, |
|
"eval_samples_per_second": 1.676, |
|
"eval_steps_per_second": 1.676, |
|
"eval_wer": 62.02052944354403, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"grad_norm": 2.0357964038848877, |
|
"learning_rate": 7.589655172413793e-06, |
|
"loss": 0.0014, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_loss": 1.3512685298919678, |
|
"eval_runtime": 119.2661, |
|
"eval_samples_per_second": 1.669, |
|
"eval_steps_per_second": 1.669, |
|
"eval_wer": 61.04808211777417, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 15000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 150, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.1484453543936e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|