reach-vb's picture
reach-vb HF staff
Upload folder using huggingface_hub
ef7f7d1 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.029961549345007242,
"eval_steps": 1000,
"global_step": 3600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.322652595835345e-06,
"grad_norm": 14.9375,
"learning_rate": 2e-06,
"loss": 0.6484,
"step": 1
},
{
"epoch": 0.0008322652595835345,
"grad_norm": 0.3671875,
"learning_rate": 0.0002,
"loss": 0.4345,
"step": 100
},
{
"epoch": 0.001664530519167069,
"grad_norm": 0.1689453125,
"learning_rate": 0.0004,
"loss": 0.2537,
"step": 200
},
{
"epoch": 0.0024967957787506035,
"grad_norm": 0.55859375,
"learning_rate": 0.0006,
"loss": 0.2344,
"step": 300
},
{
"epoch": 0.003329061038334138,
"grad_norm": 0.154296875,
"learning_rate": 0.0008,
"loss": 0.222,
"step": 400
},
{
"epoch": 0.004161326297917672,
"grad_norm": 0.126953125,
"learning_rate": 0.001,
"loss": 0.2142,
"step": 500
},
{
"epoch": 0.004993591557501207,
"grad_norm": 0.12353515625,
"learning_rate": 0.0012,
"loss": 0.2011,
"step": 600
},
{
"epoch": 0.005825856817084741,
"grad_norm": 0.10693359375,
"learning_rate": 0.0014,
"loss": 0.1897,
"step": 700
},
{
"epoch": 0.006658122076668276,
"grad_norm": 0.10595703125,
"learning_rate": 0.0016,
"loss": 0.1775,
"step": 800
},
{
"epoch": 0.0074903873362518105,
"grad_norm": 0.0859375,
"learning_rate": 0.0018000000000000002,
"loss": 0.1615,
"step": 900
},
{
"epoch": 0.008322652595835344,
"grad_norm": 0.083984375,
"learning_rate": 0.002,
"loss": 0.1444,
"step": 1000
},
{
"epoch": 0.008322652595835344,
"eval_peoplespeech-clean-transcription_loss": 2.2110702991485596,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.5405,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.708,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.839,
"step": 1000
},
{
"epoch": 0.009154917855418878,
"grad_norm": 0.0771484375,
"learning_rate": 0.001999725185109816,
"loss": 0.1323,
"step": 1100
},
{
"epoch": 0.009987183115002414,
"grad_norm": 0.083984375,
"learning_rate": 0.0019989008914857113,
"loss": 0.1242,
"step": 1200
},
{
"epoch": 0.010819448374585948,
"grad_norm": 0.07861328125,
"learning_rate": 0.00199752757218401,
"loss": 0.1162,
"step": 1300
},
{
"epoch": 0.011651713634169482,
"grad_norm": 0.07275390625,
"learning_rate": 0.001995605982021898,
"loss": 0.1128,
"step": 1400
},
{
"epoch": 0.012483978893753018,
"grad_norm": 0.07666015625,
"learning_rate": 0.0019931371771625545,
"loss": 0.1094,
"step": 1500
},
{
"epoch": 0.013316244153336551,
"grad_norm": 0.06201171875,
"learning_rate": 0.001990122514534651,
"loss": 0.1052,
"step": 1600
},
{
"epoch": 0.014148509412920085,
"grad_norm": 0.058837890625,
"learning_rate": 0.0019865636510865464,
"loss": 0.1022,
"step": 1700
},
{
"epoch": 0.014980774672503621,
"grad_norm": 0.0625,
"learning_rate": 0.001982462542875576,
"loss": 0.1011,
"step": 1800
},
{
"epoch": 0.015813039932087155,
"grad_norm": 0.06787109375,
"learning_rate": 0.001977821443992945,
"loss": 0.0983,
"step": 1900
},
{
"epoch": 0.01664530519167069,
"grad_norm": 0.0537109375,
"learning_rate": 0.001972642905324813,
"loss": 0.0975,
"step": 2000
},
{
"epoch": 0.01664530519167069,
"eval_peoplespeech-clean-transcription_loss": 1.6914767026901245,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.5301,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.716,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.839,
"step": 2000
},
{
"epoch": 0.017477570451254223,
"grad_norm": 0.05859375,
"learning_rate": 0.0019669297731502505,
"loss": 0.0947,
"step": 2100
},
{
"epoch": 0.018309835710837757,
"grad_norm": 0.062255859375,
"learning_rate": 0.00196068518757684,
"loss": 0.0935,
"step": 2200
},
{
"epoch": 0.019142100970421294,
"grad_norm": 0.059326171875,
"learning_rate": 0.001953912580814779,
"loss": 0.0911,
"step": 2300
},
{
"epoch": 0.019974366230004828,
"grad_norm": 0.060302734375,
"learning_rate": 0.0019466156752904343,
"loss": 0.0904,
"step": 2400
},
{
"epoch": 0.020806631489588362,
"grad_norm": 0.0615234375,
"learning_rate": 0.0019387984816003866,
"loss": 0.0882,
"step": 2500
},
{
"epoch": 0.021638896749171896,
"grad_norm": 0.056884765625,
"learning_rate": 0.0019304652963070869,
"loss": 0.0874,
"step": 2600
},
{
"epoch": 0.02247116200875543,
"grad_norm": 0.049072265625,
"learning_rate": 0.0019216206995773372,
"loss": 0.0871,
"step": 2700
},
{
"epoch": 0.023303427268338964,
"grad_norm": 0.053955078125,
"learning_rate": 0.0019122695526648968,
"loss": 0.0856,
"step": 2800
},
{
"epoch": 0.0241356925279225,
"grad_norm": 0.05517578125,
"learning_rate": 0.0019024169952385887,
"loss": 0.0845,
"step": 2900
},
{
"epoch": 0.024967957787506035,
"grad_norm": 0.0546875,
"learning_rate": 0.0018920684425573864,
"loss": 0.0852,
"step": 3000
},
{
"epoch": 0.024967957787506035,
"eval_peoplespeech-clean-transcription_loss": 1.6414048671722412,
"eval_peoplespeech-clean-transcription_model_preparation_time": 0.0065,
"eval_peoplespeech-clean-transcription_runtime": 9.8151,
"eval_peoplespeech-clean-transcription_samples_per_second": 6.521,
"eval_peoplespeech-clean-transcription_steps_per_second": 0.815,
"step": 3000
},
{
"epoch": 0.02580022304708957,
"grad_norm": 0.052978515625,
"learning_rate": 0.0018812295824940284,
"loss": 0.0836,
"step": 3100
},
{
"epoch": 0.026632488306673103,
"grad_norm": 0.05126953125,
"learning_rate": 0.0018699063724087904,
"loss": 0.0821,
"step": 3200
},
{
"epoch": 0.027464753566256637,
"grad_norm": 0.056884765625,
"learning_rate": 0.0018581050358751443,
"loss": 0.0816,
"step": 3300
},
{
"epoch": 0.02829701882584017,
"grad_norm": 0.045654296875,
"learning_rate": 0.0018458320592590974,
"loss": 0.0814,
"step": 3400
},
{
"epoch": 0.029129284085423705,
"grad_norm": 0.04638671875,
"learning_rate": 0.0018330941881540914,
"loss": 0.0791,
"step": 3500
},
{
"epoch": 0.029961549345007242,
"grad_norm": 0.043212890625,
"learning_rate": 0.0018198984236734246,
"loss": 0.0804,
"step": 3600
}
],
"logging_steps": 100,
"max_steps": 14400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3600,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.3112799838724096e+17,
"train_batch_size": 24,
"trial_name": null,
"trial_params": null
}