sardinian-whisper / trainer_state.json
Zenomis's picture
Upload folder using huggingface_hub
906cf83 verified
{
"best_global_step": 774,
"best_metric": 0.3903301886792453,
"best_model_checkpoint": "./whisper-sardu-checkpoints/checkpoint-774",
"epoch": 7.0,
"eval_steps": 100,
"global_step": 903,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.38910505836575876,
"grad_norm": 11.551776885986328,
"learning_rate": 1.3800000000000002e-05,
"loss": 2.7016,
"step": 50
},
{
"epoch": 0.7782101167315175,
"grad_norm": 8.857993125915527,
"learning_rate": 2.88e-05,
"loss": 0.9443,
"step": 100
},
{
"epoch": 1.0,
"eval_loss": 0.5204761028289795,
"eval_runtime": 546.4632,
"eval_samples_per_second": 0.375,
"eval_steps_per_second": 0.048,
"eval_wer": 0.5660377358490566,
"step": 129
},
{
"epoch": 1.1634241245136188,
"grad_norm": 5.102855205535889,
"learning_rate": 2.9953507804049928e-05,
"loss": 0.4937,
"step": 150
},
{
"epoch": 1.5525291828793775,
"grad_norm": 5.129488945007324,
"learning_rate": 2.979785942784645e-05,
"loss": 0.3193,
"step": 200
},
{
"epoch": 1.9416342412451362,
"grad_norm": 4.888234615325928,
"learning_rate": 2.953384299136067e-05,
"loss": 0.2984,
"step": 250
},
{
"epoch": 2.0,
"eval_loss": 0.41961559653282166,
"eval_runtime": 542.4905,
"eval_samples_per_second": 0.378,
"eval_steps_per_second": 0.048,
"eval_wer": 0.5660377358490566,
"step": 258
},
{
"epoch": 2.3268482490272375,
"grad_norm": 3.4571590423583984,
"learning_rate": 2.9163391946470812e-05,
"loss": 0.1334,
"step": 300
},
{
"epoch": 2.7159533073929962,
"grad_norm": 3.795649528503418,
"learning_rate": 2.8689219189684655e-05,
"loss": 0.1045,
"step": 350
},
{
"epoch": 3.0,
"eval_loss": 0.4368809163570404,
"eval_runtime": 437.9643,
"eval_samples_per_second": 0.468,
"eval_steps_per_second": 0.059,
"eval_wer": 0.42452830188679247,
"step": 387
},
{
"epoch": 3.1011673151750974,
"grad_norm": 1.8026232719421387,
"learning_rate": 2.811479719498683e-05,
"loss": 0.1069,
"step": 400
},
{
"epoch": 3.490272373540856,
"grad_norm": 3.5268654823303223,
"learning_rate": 2.7444332584129128e-05,
"loss": 0.0503,
"step": 450
},
{
"epoch": 3.8793774319066148,
"grad_norm": 2.7765798568725586,
"learning_rate": 2.6682735320591115e-05,
"loss": 0.0516,
"step": 500
},
{
"epoch": 4.0,
"eval_loss": 0.4473298192024231,
"eval_runtime": 439.5871,
"eval_samples_per_second": 0.466,
"eval_steps_per_second": 0.059,
"eval_wer": 0.4033018867924528,
"step": 516
},
{
"epoch": 4.264591439688716,
"grad_norm": 3.099731206893921,
"learning_rate": 2.5835582752810806e-05,
"loss": 0.0333,
"step": 550
},
{
"epoch": 4.653696498054475,
"grad_norm": 1.8574504852294922,
"learning_rate": 2.4909078770004833e-05,
"loss": 0.0267,
"step": 600
},
{
"epoch": 5.0,
"eval_loss": 0.4518275558948517,
"eval_runtime": 439.9833,
"eval_samples_per_second": 0.466,
"eval_steps_per_second": 0.059,
"eval_wer": 0.41509433962264153,
"step": 645
},
{
"epoch": 5.038910505836576,
"grad_norm": 1.2988134622573853,
"learning_rate": 2.3910008369689226e-05,
"loss": 0.0315,
"step": 650
},
{
"epoch": 5.428015564202335,
"grad_norm": 0.6612567901611328,
"learning_rate": 2.2845687969613155e-05,
"loss": 0.0168,
"step": 700
},
{
"epoch": 5.817120622568093,
"grad_norm": 0.6520644426345825,
"learning_rate": 2.1723911827982513e-05,
"loss": 0.0177,
"step": 750
},
{
"epoch": 6.0,
"eval_loss": 0.45992329716682434,
"eval_runtime": 436.4339,
"eval_samples_per_second": 0.47,
"eval_steps_per_second": 0.06,
"eval_wer": 0.3903301886792453,
"step": 774
},
{
"epoch": 6.202334630350195,
"grad_norm": 1.1170883178710938,
"learning_rate": 2.0552894964350105e-05,
"loss": 0.0134,
"step": 800
},
{
"epoch": 6.591439688715953,
"grad_norm": 1.3675938844680786,
"learning_rate": 1.9341212999175674e-05,
"loss": 0.009,
"step": 850
},
{
"epoch": 6.980544747081712,
"grad_norm": 2.0311999320983887,
"learning_rate": 1.8097739352624154e-05,
"loss": 0.0087,
"step": 900
},
{
"epoch": 7.0,
"eval_loss": 0.4803558588027954,
"eval_runtime": 437.6131,
"eval_samples_per_second": 0.468,
"eval_steps_per_second": 0.059,
"eval_wer": 0.41037735849056606,
"step": 903
}
],
"logging_steps": 50,
"max_steps": 1935,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.1513009836032e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}