Abduraxim commited on
Commit
6553188
·
1 Parent(s): 0084c0d
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - uz
4
+ license: apache-2.0
5
+ tags:
6
+ - automatic-speech-recognition
7
+ - mozilla-foundation/common_voice_10_0
8
+ - generated_from_trainer
9
+ datasets:
10
+ - common_voice_10_0
11
+ model-index:
12
+ - name: uzbek_stt_5_version
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # uzbek_stt_5_version
20
+
21
+ This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the MOZILLA-FOUNDATION/COMMON_VOICE_10_0 - UZ dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 1.8085
24
+ - Wer: 0.9421
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 3e-05
44
+ - train_batch_size: 32
45
+ - eval_batch_size: 16
46
+ - seed: 42
47
+ - distributed_type: multi-GPU
48
+ - num_devices: 2
49
+ - total_train_batch_size: 64
50
+ - total_eval_batch_size: 32
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: linear
53
+ - lr_scheduler_warmup_steps: 500
54
+ - num_epochs: 30.0
55
+ - mixed_precision_training: Native AMP
56
+
57
+ ### Training results
58
+
59
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
60
+ |:-------------:|:-----:|:-----:|:---------------:|:------:|
61
+ | 0.3452 | 5.45 | 5000 | 0.3839 | 0.4574 |
62
+ | 0.2466 | 10.91 | 10000 | 0.4011 | 0.4067 |
63
+ | 1.5753 | 16.36 | 15000 | 1.2937 | 0.8844 |
64
+ | 1.9454 | 21.81 | 20000 | 1.8227 | 0.9392 |
65
+ | 1.922 | 27.26 | 25000 | 1.8085 | 0.9421 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.22.0.dev0
71
+ - Pytorch 1.10.0+cu113
72
+ - Datasets 2.4.0
73
+ - Tokenizers 0.12.1
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 31,
3
+ "<s>": 30
4
+ }
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 30.0,
3
+ "eval_loss": 1.8085108995437622,
4
+ "eval_runtime": 205.3429,
5
+ "eval_samples": 12242,
6
+ "eval_samples_per_second": 59.617,
7
+ "eval_steps_per_second": 1.865,
8
+ "eval_wer": 0.9420627621915116,
9
+ "train_loss": 1.198725398988214,
10
+ "train_runtime": 26753.1928,
11
+ "train_samples": 58652,
12
+ "train_samples_per_second": 65.77,
13
+ "train_steps_per_second": 1.028
14
+ }
config.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-base",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": false,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_norm": "group",
51
+ "feat_proj_dropout": 0.0,
52
+ "feat_quantizer_dropout": 0.0,
53
+ "final_dropout": 0.0,
54
+ "freeze_feat_extract_train": true,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 768,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 3072,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_channel_length": 10,
63
+ "mask_channel_min_space": 1,
64
+ "mask_channel_other": 0.0,
65
+ "mask_channel_prob": 0.0,
66
+ "mask_channel_selection": "static",
67
+ "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
+ "mask_feature_prob": 0.0,
70
+ "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
+ "mask_time_min_space": 1,
73
+ "mask_time_other": 0.0,
74
+ "mask_time_prob": 0.05,
75
+ "mask_time_selection": "static",
76
+ "model_type": "wav2vec2",
77
+ "no_mask_channel_overlap": false,
78
+ "no_mask_time_overlap": false,
79
+ "num_adapter_layers": 3,
80
+ "num_attention_heads": 12,
81
+ "num_codevector_groups": 2,
82
+ "num_codevectors_per_group": 320,
83
+ "num_conv_pos_embedding_groups": 16,
84
+ "num_conv_pos_embeddings": 128,
85
+ "num_feat_extract_layers": 7,
86
+ "num_hidden_layers": 12,
87
+ "num_negatives": 100,
88
+ "output_hidden_size": 768,
89
+ "pad_token_id": 29,
90
+ "proj_codevector_dim": 256,
91
+ "tdnn_dilation": [
92
+ 1,
93
+ 2,
94
+ 3,
95
+ 1,
96
+ 1
97
+ ],
98
+ "tdnn_dim": [
99
+ 512,
100
+ 512,
101
+ 512,
102
+ 512,
103
+ 1500
104
+ ],
105
+ "tdnn_kernel": [
106
+ 5,
107
+ 3,
108
+ 3,
109
+ 1,
110
+ 1
111
+ ],
112
+ "torch_dtype": "float32",
113
+ "transformers_version": "4.22.0.dev0",
114
+ "use_weighted_layer_sum": false,
115
+ "vocab_size": 32,
116
+ "xvector_output_dim": 512
117
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 30.0,
3
+ "eval_loss": 1.8085108995437622,
4
+ "eval_runtime": 205.3429,
5
+ "eval_samples": 12242,
6
+ "eval_samples_per_second": 59.617,
7
+ "eval_steps_per_second": 1.865,
8
+ "eval_wer": 0.9420627621915116
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": false,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860b235c5359efc6401e7ab0e580c2dbc76ebf9a089be02e27b47e2617672c49
3
+ size 377656855
special_tokens_map.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "</s>",
12
+ "lstrip": false,
13
+ "normalized": true,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "bos_token": "<s>",
19
+ "eos_token": "</s>",
20
+ "pad_token": "[PAD]",
21
+ "unk_token": "[UNK]"
22
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "do_lower_case": false,
4
+ "eos_token": "</s>",
5
+ "name_or_path": "uzbek_stt_5_version",
6
+ "pad_token": "[PAD]",
7
+ "replace_word_delimiter_char": " ",
8
+ "special_tokens_map_file": null,
9
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
10
+ "unk_token": "[UNK]",
11
+ "word_delimiter_token": "|"
12
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 30.0,
3
+ "train_loss": 1.198725398988214,
4
+ "train_runtime": 26753.1928,
5
+ "train_samples": 58652,
6
+ "train_samples_per_second": 65.77,
7
+ "train_steps_per_second": 1.028
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 30.0,
5
+ "global_step": 27510,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.55,
12
+ "learning_rate": 2.982e-05,
13
+ "loss": 4.6277,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.09,
18
+ "learning_rate": 2.9449092928544985e-05,
19
+ "loss": 1.5086,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 1.64,
24
+ "learning_rate": 2.889374305812662e-05,
25
+ "loss": 0.7041,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 2.18,
30
+ "learning_rate": 2.834061458718993e-05,
31
+ "loss": 0.5814,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 2.73,
36
+ "learning_rate": 2.7785264716771567e-05,
37
+ "loss": 0.5086,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 3.27,
42
+ "learning_rate": 2.7232136245834875e-05,
43
+ "loss": 0.462,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 3.82,
48
+ "learning_rate": 2.667789707515735e-05,
49
+ "loss": 0.4247,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 4.36,
54
+ "learning_rate": 2.6123657904479824e-05,
55
+ "loss": 0.3919,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 4.91,
60
+ "learning_rate": 2.556830803406146e-05,
61
+ "loss": 0.3743,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 5.45,
66
+ "learning_rate": 2.5014068863383933e-05,
67
+ "loss": 0.3452,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 5.45,
72
+ "eval_loss": 0.38393500447273254,
73
+ "eval_runtime": 207.46,
74
+ "eval_samples_per_second": 59.009,
75
+ "eval_steps_per_second": 1.846,
76
+ "eval_wer": 0.4573501807782915,
77
+ "step": 5000
78
+ },
79
+ {
80
+ "epoch": 6.0,
81
+ "learning_rate": 2.445871899296557e-05,
82
+ "loss": 0.3357,
83
+ "step": 5500
84
+ },
85
+ {
86
+ "epoch": 6.54,
87
+ "learning_rate": 2.3904479822288042e-05,
88
+ "loss": 0.3131,
89
+ "step": 6000
90
+ },
91
+ {
92
+ "epoch": 7.09,
93
+ "learning_rate": 2.334912995186968e-05,
94
+ "loss": 0.3039,
95
+ "step": 6500
96
+ },
97
+ {
98
+ "epoch": 7.63,
99
+ "learning_rate": 2.279489078119215e-05,
100
+ "loss": 0.2879,
101
+ "step": 7000
102
+ },
103
+ {
104
+ "epoch": 8.18,
105
+ "learning_rate": 2.2240651610514624e-05,
106
+ "loss": 0.2772,
107
+ "step": 7500
108
+ },
109
+ {
110
+ "epoch": 8.72,
111
+ "learning_rate": 2.1686412439837097e-05,
112
+ "loss": 0.2649,
113
+ "step": 8000
114
+ },
115
+ {
116
+ "epoch": 9.27,
117
+ "learning_rate": 2.1132173269159573e-05,
118
+ "loss": 0.2596,
119
+ "step": 8500
120
+ },
121
+ {
122
+ "epoch": 9.81,
123
+ "learning_rate": 2.057682339874121e-05,
124
+ "loss": 0.2526,
125
+ "step": 9000
126
+ },
127
+ {
128
+ "epoch": 10.36,
129
+ "learning_rate": 2.0021473528322843e-05,
130
+ "loss": 0.2451,
131
+ "step": 9500
132
+ },
133
+ {
134
+ "epoch": 10.91,
135
+ "learning_rate": 1.946834505738615e-05,
136
+ "loss": 0.2466,
137
+ "step": 10000
138
+ },
139
+ {
140
+ "epoch": 10.91,
141
+ "eval_loss": 0.40105822682380676,
142
+ "eval_runtime": 206.4585,
143
+ "eval_samples_per_second": 59.295,
144
+ "eval_steps_per_second": 1.855,
145
+ "eval_wer": 0.4067448569520906,
146
+ "step": 10000
147
+ },
148
+ {
149
+ "epoch": 11.45,
150
+ "learning_rate": 1.8914105886708627e-05,
151
+ "loss": 0.2564,
152
+ "step": 10500
153
+ },
154
+ {
155
+ "epoch": 12.0,
156
+ "learning_rate": 1.8358756016290264e-05,
157
+ "loss": 0.3219,
158
+ "step": 11000
159
+ },
160
+ {
161
+ "epoch": 12.54,
162
+ "learning_rate": 1.7804516845612736e-05,
163
+ "loss": 0.3501,
164
+ "step": 11500
165
+ },
166
+ {
167
+ "epoch": 13.09,
168
+ "learning_rate": 1.7249166975194373e-05,
169
+ "loss": 0.375,
170
+ "step": 12000
171
+ },
172
+ {
173
+ "epoch": 13.63,
174
+ "learning_rate": 1.669381710477601e-05,
175
+ "loss": 0.3621,
176
+ "step": 12500
177
+ },
178
+ {
179
+ "epoch": 14.18,
180
+ "learning_rate": 1.614068863383932e-05,
181
+ "loss": 0.4168,
182
+ "step": 13000
183
+ },
184
+ {
185
+ "epoch": 14.72,
186
+ "learning_rate": 1.5585338763420955e-05,
187
+ "loss": 0.515,
188
+ "step": 13500
189
+ },
190
+ {
191
+ "epoch": 15.27,
192
+ "learning_rate": 1.5033320992225103e-05,
193
+ "loss": 0.8026,
194
+ "step": 14000
195
+ },
196
+ {
197
+ "epoch": 15.81,
198
+ "learning_rate": 1.4477971121806738e-05,
199
+ "loss": 1.3623,
200
+ "step": 14500
201
+ },
202
+ {
203
+ "epoch": 16.36,
204
+ "learning_rate": 1.3922621251388376e-05,
205
+ "loss": 1.5753,
206
+ "step": 15000
207
+ },
208
+ {
209
+ "epoch": 16.36,
210
+ "eval_loss": 1.2936806678771973,
211
+ "eval_runtime": 205.5873,
212
+ "eval_samples_per_second": 59.546,
213
+ "eval_steps_per_second": 1.863,
214
+ "eval_wer": 0.8843522846092795,
215
+ "step": 15000
216
+ },
217
+ {
218
+ "epoch": 16.9,
219
+ "learning_rate": 1.3368382080710849e-05,
220
+ "loss": 1.6596,
221
+ "step": 15500
222
+ },
223
+ {
224
+ "epoch": 17.45,
225
+ "learning_rate": 1.2813032210292485e-05,
226
+ "loss": 1.6553,
227
+ "step": 16000
228
+ },
229
+ {
230
+ "epoch": 17.99,
231
+ "learning_rate": 1.2258793039614958e-05,
232
+ "loss": 1.7339,
233
+ "step": 16500
234
+ },
235
+ {
236
+ "epoch": 18.54,
237
+ "learning_rate": 1.1703443169196594e-05,
238
+ "loss": 1.8133,
239
+ "step": 17000
240
+ },
241
+ {
242
+ "epoch": 19.08,
243
+ "learning_rate": 1.1149203998519067e-05,
244
+ "loss": 1.7555,
245
+ "step": 17500
246
+ },
247
+ {
248
+ "epoch": 19.63,
249
+ "learning_rate": 1.059496482784154e-05,
250
+ "loss": 1.7767,
251
+ "step": 18000
252
+ },
253
+ {
254
+ "epoch": 20.17,
255
+ "learning_rate": 1.0039614957423176e-05,
256
+ "loss": 1.8686,
257
+ "step": 18500
258
+ },
259
+ {
260
+ "epoch": 20.72,
261
+ "learning_rate": 9.485375786745649e-06,
262
+ "loss": 1.8748,
263
+ "step": 19000
264
+ },
265
+ {
266
+ "epoch": 21.26,
267
+ "learning_rate": 8.931136616068122e-06,
268
+ "loss": 1.9346,
269
+ "step": 19500
270
+ },
271
+ {
272
+ "epoch": 21.81,
273
+ "learning_rate": 8.37578674564976e-06,
274
+ "loss": 1.9454,
275
+ "step": 20000
276
+ },
277
+ {
278
+ "epoch": 21.81,
279
+ "eval_loss": 1.822685718536377,
280
+ "eval_runtime": 205.1767,
281
+ "eval_samples_per_second": 59.666,
282
+ "eval_steps_per_second": 1.867,
283
+ "eval_wer": 0.9391778682019174,
284
+ "step": 20000
285
+ },
286
+ {
287
+ "epoch": 22.36,
288
+ "learning_rate": 7.820436875231396e-06,
289
+ "loss": 1.9335,
290
+ "step": 20500
291
+ },
292
+ {
293
+ "epoch": 22.9,
294
+ "learning_rate": 7.267308404294706e-06,
295
+ "loss": 1.9261,
296
+ "step": 21000
297
+ },
298
+ {
299
+ "epoch": 23.45,
300
+ "learning_rate": 6.7130692336171785e-06,
301
+ "loss": 1.9216,
302
+ "step": 21500
303
+ },
304
+ {
305
+ "epoch": 23.99,
306
+ "learning_rate": 6.158830062939652e-06,
307
+ "loss": 1.9228,
308
+ "step": 22000
309
+ },
310
+ {
311
+ "epoch": 24.54,
312
+ "learning_rate": 5.604590892262125e-06,
313
+ "loss": 1.9233,
314
+ "step": 22500
315
+ },
316
+ {
317
+ "epoch": 25.08,
318
+ "learning_rate": 5.049241021843762e-06,
319
+ "loss": 1.9197,
320
+ "step": 23000
321
+ },
322
+ {
323
+ "epoch": 25.63,
324
+ "learning_rate": 4.495001851166235e-06,
325
+ "loss": 1.9206,
326
+ "step": 23500
327
+ },
328
+ {
329
+ "epoch": 26.17,
330
+ "learning_rate": 3.939651980747871e-06,
331
+ "loss": 1.9235,
332
+ "step": 24000
333
+ },
334
+ {
335
+ "epoch": 26.72,
336
+ "learning_rate": 3.384302110329508e-06,
337
+ "loss": 1.9229,
338
+ "step": 24500
339
+ },
340
+ {
341
+ "epoch": 27.26,
342
+ "learning_rate": 2.8311736393928178e-06,
343
+ "loss": 1.922,
344
+ "step": 25000
345
+ },
346
+ {
347
+ "epoch": 27.26,
348
+ "eval_loss": 1.8085108995437622,
349
+ "eval_runtime": 206.76,
350
+ "eval_samples_per_second": 59.209,
351
+ "eval_steps_per_second": 1.852,
352
+ "eval_wer": 0.9420627621915116,
353
+ "step": 25000
354
+ },
355
+ {
356
+ "epoch": 27.81,
357
+ "learning_rate": 2.275823768974454e-06,
358
+ "loss": 1.9211,
359
+ "step": 25500
360
+ },
361
+ {
362
+ "epoch": 28.35,
363
+ "learning_rate": 1.7204738985560903e-06,
364
+ "loss": 1.9205,
365
+ "step": 26000
366
+ },
367
+ {
368
+ "epoch": 28.9,
369
+ "learning_rate": 1.1662347278785636e-06,
370
+ "loss": 1.9253,
371
+ "step": 26500
372
+ },
373
+ {
374
+ "epoch": 29.44,
375
+ "learning_rate": 6.119955572010366e-07,
376
+ "loss": 1.92,
377
+ "step": 27000
378
+ },
379
+ {
380
+ "epoch": 29.99,
381
+ "learning_rate": 5.6645686782673083e-08,
382
+ "loss": 1.9231,
383
+ "step": 27500
384
+ },
385
+ {
386
+ "epoch": 30.0,
387
+ "step": 27510,
388
+ "total_flos": 6.935706045316176e+19,
389
+ "train_loss": 1.198725398988214,
390
+ "train_runtime": 26753.1928,
391
+ "train_samples_per_second": 65.77,
392
+ "train_steps_per_second": 1.028
393
+ }
394
+ ],
395
+ "max_steps": 27510,
396
+ "num_train_epochs": 30,
397
+ "total_flos": 6.935706045316176e+19,
398
+ "trial_name": null,
399
+ "trial_params": null
400
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0f3ca167f441e5b90cd92ecce770d5e6d913b1039df7d0b10dcfe9e89660d43
3
+ size 3375
vocab.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 29,
3
+ "[UNK]": 28,
4
+ "a": 1,
5
+ "b": 2,
6
+ "c": 3,
7
+ "d": 4,
8
+ "e": 5,
9
+ "f": 6,
10
+ "g": 7,
11
+ "h": 8,
12
+ "i": 9,
13
+ "j": 10,
14
+ "k": 11,
15
+ "l": 12,
16
+ "m": 13,
17
+ "n": 14,
18
+ "o": 15,
19
+ "p": 16,
20
+ "q": 17,
21
+ "r": 18,
22
+ "s": 19,
23
+ "t": 20,
24
+ "u": 21,
25
+ "v": 22,
26
+ "x": 23,
27
+ "y": 24,
28
+ "z": 25,
29
+ "|": 0,
30
+ "ʻ": 26,
31
+ "ʼ": 27
32
+ }