bagasshw commited on
Commit
78e2725
·
verified ·
1 Parent(s): d55644e

End of training

Browse files
README.md CHANGED
@@ -1,22 +1,27 @@
1
  ---
2
  library_name: transformers
 
 
3
  license: apache-2.0
4
- base_model: openai/whisper-large-v2
5
  tags:
 
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
9
  model-index:
10
- - name: whisper-large-v2-javanese-openslr-v2
11
  results: []
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- # whisper-large-v2-javanese-openslr-v2
18
 
19
- This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.1112
22
  - Wer: 0.0612
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - jv
5
  license: apache-2.0
6
+ base_model: openai/whisper-tiny
7
  tags:
8
+ - whisper
9
+ - javanese
10
+ - asr
11
  - generated_from_trainer
12
  metrics:
13
  - wer
14
  model-index:
15
+ - name: Whisper-Large-v2-Java-v2
16
  results: []
17
  ---
18
 
19
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
20
  should probably proofread and complete it, then remove this comment. -->
21
 
22
+ # Whisper-Large-v2-Java-v2
23
 
24
+ This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the None dataset.
25
  It achieves the following results on the evaluation set:
26
  - Loss: 0.1112
27
  - Wer: 0.0612
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "total_flos": 1.698366962958336e+20,
4
+ "train_loss": 0.16200368287563324,
5
+ "train_runtime": 40923.5162,
6
+ "train_samples": 2000,
7
+ "train_samples_per_second": 1.955,
8
+ "train_steps_per_second": 0.489,
9
+ "val_loss": 0.11124598234891891,
10
+ "val_runtime": 893.4451,
11
+ "val_samples": 250,
12
+ "val_samples_per_second": 1.399,
13
+ "val_steps_per_second": 0.7,
14
+ "val_wer": 0.06115037298782882
15
+ }
runs/May01_23-52-56_dgx-a100/events.out.tfevents.1746160531.dgx-a100.2099488.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:136ba24cefbc3d727301c4814fb7406377ba0010934aa9fcb61641e5b0629334
3
+ size 437
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "total_flos": 1.698366962958336e+20,
4
+ "train_loss": 0.16200368287563324,
5
+ "train_runtime": 40923.5162,
6
+ "train_samples": 2000,
7
+ "train_samples_per_second": 1.955,
8
+ "train_steps_per_second": 0.489
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,923 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 20000,
3
+ "best_metric": 0.06115037298782882,
4
+ "best_model_checkpoint": "/home/cluster-dgxa100/slp01/bagas-fine-tune-whisper/whisper-large-v2-javanese-openslr-v2/checkpoint-20000",
5
+ "epoch": 8.0,
6
+ "eval_steps": 1000,
7
+ "global_step": 20000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.08,
14
+ "grad_norm": 19.06084442138672,
15
+ "learning_rate": 1.97e-06,
16
+ "loss": 1.8907,
17
+ "step": 200
18
+ },
19
+ {
20
+ "epoch": 0.16,
21
+ "grad_norm": 16.973052978515625,
22
+ "learning_rate": 3.97e-06,
23
+ "loss": 0.6026,
24
+ "step": 400
25
+ },
26
+ {
27
+ "epoch": 0.24,
28
+ "grad_norm": 8.261700630187988,
29
+ "learning_rate": 5.9700000000000004e-06,
30
+ "loss": 0.5317,
31
+ "step": 600
32
+ },
33
+ {
34
+ "epoch": 0.32,
35
+ "grad_norm": 7.87921667098999,
36
+ "learning_rate": 7.970000000000002e-06,
37
+ "loss": 0.491,
38
+ "step": 800
39
+ },
40
+ {
41
+ "epoch": 0.4,
42
+ "grad_norm": 7.55085563659668,
43
+ "learning_rate": 9.970000000000001e-06,
44
+ "loss": 0.4821,
45
+ "step": 1000
46
+ },
47
+ {
48
+ "epoch": 0.4,
49
+ "eval_loss": 0.3569938838481903,
50
+ "eval_runtime": 880.4774,
51
+ "eval_samples_per_second": 1.42,
52
+ "eval_steps_per_second": 0.71,
53
+ "eval_wer": 0.22683549273655282,
54
+ "step": 1000
55
+ },
56
+ {
57
+ "epoch": 0.48,
58
+ "grad_norm": 5.795778751373291,
59
+ "learning_rate": 1.1970000000000002e-05,
60
+ "loss": 0.4608,
61
+ "step": 1200
62
+ },
63
+ {
64
+ "epoch": 0.56,
65
+ "grad_norm": 8.08930492401123,
66
+ "learning_rate": 1.3970000000000002e-05,
67
+ "loss": 0.4761,
68
+ "step": 1400
69
+ },
70
+ {
71
+ "epoch": 0.64,
72
+ "grad_norm": 7.31475305557251,
73
+ "learning_rate": 1.597e-05,
74
+ "loss": 0.4942,
75
+ "step": 1600
76
+ },
77
+ {
78
+ "epoch": 0.72,
79
+ "grad_norm": 7.746321201324463,
80
+ "learning_rate": 1.796e-05,
81
+ "loss": 0.4758,
82
+ "step": 1800
83
+ },
84
+ {
85
+ "epoch": 0.8,
86
+ "grad_norm": 5.482341289520264,
87
+ "learning_rate": 1.9960000000000002e-05,
88
+ "loss": 0.4761,
89
+ "step": 2000
90
+ },
91
+ {
92
+ "epoch": 0.8,
93
+ "eval_loss": 0.34740468859672546,
94
+ "eval_runtime": 901.2547,
95
+ "eval_samples_per_second": 1.387,
96
+ "eval_steps_per_second": 0.693,
97
+ "eval_wer": 0.2396937573616019,
98
+ "step": 2000
99
+ },
100
+ {
101
+ "epoch": 0.88,
102
+ "grad_norm": 7.196951866149902,
103
+ "learning_rate": 1.9782222222222226e-05,
104
+ "loss": 0.4924,
105
+ "step": 2200
106
+ },
107
+ {
108
+ "epoch": 0.96,
109
+ "grad_norm": 6.221120834350586,
110
+ "learning_rate": 1.9560000000000002e-05,
111
+ "loss": 0.4779,
112
+ "step": 2400
113
+ },
114
+ {
115
+ "epoch": 1.04,
116
+ "grad_norm": 6.380108833312988,
117
+ "learning_rate": 1.933777777777778e-05,
118
+ "loss": 0.3806,
119
+ "step": 2600
120
+ },
121
+ {
122
+ "epoch": 1.12,
123
+ "grad_norm": 5.491684913635254,
124
+ "learning_rate": 1.9115555555555555e-05,
125
+ "loss": 0.3465,
126
+ "step": 2800
127
+ },
128
+ {
129
+ "epoch": 1.2,
130
+ "grad_norm": 4.4581379890441895,
131
+ "learning_rate": 1.8893333333333334e-05,
132
+ "loss": 0.3099,
133
+ "step": 3000
134
+ },
135
+ {
136
+ "epoch": 1.2,
137
+ "eval_loss": 0.30754217505455017,
138
+ "eval_runtime": 891.4205,
139
+ "eval_samples_per_second": 1.402,
140
+ "eval_steps_per_second": 0.701,
141
+ "eval_wer": 0.20259128386336867,
142
+ "step": 3000
143
+ },
144
+ {
145
+ "epoch": 1.28,
146
+ "grad_norm": 7.791854381561279,
147
+ "learning_rate": 1.8671111111111114e-05,
148
+ "loss": 0.3275,
149
+ "step": 3200
150
+ },
151
+ {
152
+ "epoch": 1.3599999999999999,
153
+ "grad_norm": 5.212815761566162,
154
+ "learning_rate": 1.844888888888889e-05,
155
+ "loss": 0.3296,
156
+ "step": 3400
157
+ },
158
+ {
159
+ "epoch": 1.44,
160
+ "grad_norm": 9.706353187561035,
161
+ "learning_rate": 1.822666666666667e-05,
162
+ "loss": 0.3142,
163
+ "step": 3600
164
+ },
165
+ {
166
+ "epoch": 1.52,
167
+ "grad_norm": 5.222635269165039,
168
+ "learning_rate": 1.8004444444444446e-05,
169
+ "loss": 0.3097,
170
+ "step": 3800
171
+ },
172
+ {
173
+ "epoch": 1.6,
174
+ "grad_norm": 3.670520067214966,
175
+ "learning_rate": 1.7782222222222222e-05,
176
+ "loss": 0.3108,
177
+ "step": 4000
178
+ },
179
+ {
180
+ "epoch": 1.6,
181
+ "eval_loss": 0.26660874485969543,
182
+ "eval_runtime": 896.6083,
183
+ "eval_samples_per_second": 1.394,
184
+ "eval_steps_per_second": 0.697,
185
+ "eval_wer": 0.19709462112288967,
186
+ "step": 4000
187
+ },
188
+ {
189
+ "epoch": 1.6800000000000002,
190
+ "grad_norm": 5.417144298553467,
191
+ "learning_rate": 1.756e-05,
192
+ "loss": 0.3096,
193
+ "step": 4200
194
+ },
195
+ {
196
+ "epoch": 1.76,
197
+ "grad_norm": 5.865799427032471,
198
+ "learning_rate": 1.733777777777778e-05,
199
+ "loss": 0.3266,
200
+ "step": 4400
201
+ },
202
+ {
203
+ "epoch": 1.8399999999999999,
204
+ "grad_norm": 3.2357375621795654,
205
+ "learning_rate": 1.7115555555555557e-05,
206
+ "loss": 0.3134,
207
+ "step": 4600
208
+ },
209
+ {
210
+ "epoch": 1.92,
211
+ "grad_norm": 6.072234630584717,
212
+ "learning_rate": 1.6893333333333336e-05,
213
+ "loss": 0.289,
214
+ "step": 4800
215
+ },
216
+ {
217
+ "epoch": 2.0,
218
+ "grad_norm": 3.143366813659668,
219
+ "learning_rate": 1.6671111111111113e-05,
220
+ "loss": 0.3131,
221
+ "step": 5000
222
+ },
223
+ {
224
+ "epoch": 2.0,
225
+ "eval_loss": 0.21725231409072876,
226
+ "eval_runtime": 885.5009,
227
+ "eval_samples_per_second": 1.412,
228
+ "eval_steps_per_second": 0.706,
229
+ "eval_wer": 0.15312131919905772,
230
+ "step": 5000
231
+ },
232
+ {
233
+ "epoch": 2.08,
234
+ "grad_norm": 4.550586700439453,
235
+ "learning_rate": 1.644888888888889e-05,
236
+ "loss": 0.1525,
237
+ "step": 5200
238
+ },
239
+ {
240
+ "epoch": 2.16,
241
+ "grad_norm": 3.604379892349243,
242
+ "learning_rate": 1.6226666666666668e-05,
243
+ "loss": 0.1656,
244
+ "step": 5400
245
+ },
246
+ {
247
+ "epoch": 2.24,
248
+ "grad_norm": 3.664323568344116,
249
+ "learning_rate": 1.6004444444444444e-05,
250
+ "loss": 0.1743,
251
+ "step": 5600
252
+ },
253
+ {
254
+ "epoch": 2.32,
255
+ "grad_norm": 2.5190961360931396,
256
+ "learning_rate": 1.5782222222222224e-05,
257
+ "loss": 0.175,
258
+ "step": 5800
259
+ },
260
+ {
261
+ "epoch": 2.4,
262
+ "grad_norm": 3.1211793422698975,
263
+ "learning_rate": 1.556e-05,
264
+ "loss": 0.1796,
265
+ "step": 6000
266
+ },
267
+ {
268
+ "epoch": 2.4,
269
+ "eval_loss": 0.2026291936635971,
270
+ "eval_runtime": 894.7957,
271
+ "eval_samples_per_second": 1.397,
272
+ "eval_steps_per_second": 0.698,
273
+ "eval_wer": 0.13839811542991756,
274
+ "step": 6000
275
+ },
276
+ {
277
+ "epoch": 2.48,
278
+ "grad_norm": 3.203092336654663,
279
+ "learning_rate": 1.533777777777778e-05,
280
+ "loss": 0.1721,
281
+ "step": 6200
282
+ },
283
+ {
284
+ "epoch": 2.56,
285
+ "grad_norm": 1.1959407329559326,
286
+ "learning_rate": 1.5115555555555557e-05,
287
+ "loss": 0.1722,
288
+ "step": 6400
289
+ },
290
+ {
291
+ "epoch": 2.64,
292
+ "grad_norm": 4.8345160484313965,
293
+ "learning_rate": 1.4893333333333335e-05,
294
+ "loss": 0.17,
295
+ "step": 6600
296
+ },
297
+ {
298
+ "epoch": 2.7199999999999998,
299
+ "grad_norm": 5.333383560180664,
300
+ "learning_rate": 1.4671111111111111e-05,
301
+ "loss": 0.1751,
302
+ "step": 6800
303
+ },
304
+ {
305
+ "epoch": 2.8,
306
+ "grad_norm": 1.5021018981933594,
307
+ "learning_rate": 1.444888888888889e-05,
308
+ "loss": 0.17,
309
+ "step": 7000
310
+ },
311
+ {
312
+ "epoch": 2.8,
313
+ "eval_loss": 0.19221611320972443,
314
+ "eval_runtime": 891.3109,
315
+ "eval_samples_per_second": 1.402,
316
+ "eval_steps_per_second": 0.701,
317
+ "eval_wer": 0.13780918727915195,
318
+ "step": 7000
319
+ },
320
+ {
321
+ "epoch": 2.88,
322
+ "grad_norm": 3.0855863094329834,
323
+ "learning_rate": 1.4226666666666669e-05,
324
+ "loss": 0.1672,
325
+ "step": 7200
326
+ },
327
+ {
328
+ "epoch": 2.96,
329
+ "grad_norm": 4.673288822174072,
330
+ "learning_rate": 1.4004444444444445e-05,
331
+ "loss": 0.1724,
332
+ "step": 7400
333
+ },
334
+ {
335
+ "epoch": 3.04,
336
+ "grad_norm": 2.8060922622680664,
337
+ "learning_rate": 1.3782222222222223e-05,
338
+ "loss": 0.1262,
339
+ "step": 7600
340
+ },
341
+ {
342
+ "epoch": 3.12,
343
+ "grad_norm": 1.5213590860366821,
344
+ "learning_rate": 1.3560000000000002e-05,
345
+ "loss": 0.094,
346
+ "step": 7800
347
+ },
348
+ {
349
+ "epoch": 3.2,
350
+ "grad_norm": 5.938966751098633,
351
+ "learning_rate": 1.3337777777777778e-05,
352
+ "loss": 0.0995,
353
+ "step": 8000
354
+ },
355
+ {
356
+ "epoch": 3.2,
357
+ "eval_loss": 0.17917467653751373,
358
+ "eval_runtime": 900.0369,
359
+ "eval_samples_per_second": 1.389,
360
+ "eval_steps_per_second": 0.694,
361
+ "eval_wer": 0.12063211621515509,
362
+ "step": 8000
363
+ },
364
+ {
365
+ "epoch": 3.2800000000000002,
366
+ "grad_norm": 3.5296432971954346,
367
+ "learning_rate": 1.3115555555555556e-05,
368
+ "loss": 0.1002,
369
+ "step": 8200
370
+ },
371
+ {
372
+ "epoch": 3.36,
373
+ "grad_norm": 5.334218502044678,
374
+ "learning_rate": 1.2893333333333336e-05,
375
+ "loss": 0.1061,
376
+ "step": 8400
377
+ },
378
+ {
379
+ "epoch": 3.44,
380
+ "grad_norm": 5.724509239196777,
381
+ "learning_rate": 1.2671111111111112e-05,
382
+ "loss": 0.1012,
383
+ "step": 8600
384
+ },
385
+ {
386
+ "epoch": 3.52,
387
+ "grad_norm": 2.5498409271240234,
388
+ "learning_rate": 1.244888888888889e-05,
389
+ "loss": 0.099,
390
+ "step": 8800
391
+ },
392
+ {
393
+ "epoch": 3.6,
394
+ "grad_norm": 6.009134769439697,
395
+ "learning_rate": 1.2226666666666666e-05,
396
+ "loss": 0.0972,
397
+ "step": 9000
398
+ },
399
+ {
400
+ "epoch": 3.6,
401
+ "eval_loss": 0.16701579093933105,
402
+ "eval_runtime": 899.6835,
403
+ "eval_samples_per_second": 1.389,
404
+ "eval_steps_per_second": 0.695,
405
+ "eval_wer": 0.11493914409108755,
406
+ "step": 9000
407
+ },
408
+ {
409
+ "epoch": 3.68,
410
+ "grad_norm": 2.472468137741089,
411
+ "learning_rate": 1.2004444444444445e-05,
412
+ "loss": 0.0933,
413
+ "step": 9200
414
+ },
415
+ {
416
+ "epoch": 3.76,
417
+ "grad_norm": 2.989001750946045,
418
+ "learning_rate": 1.1782222222222223e-05,
419
+ "loss": 0.0999,
420
+ "step": 9400
421
+ },
422
+ {
423
+ "epoch": 3.84,
424
+ "grad_norm": 3.3118479251861572,
425
+ "learning_rate": 1.156e-05,
426
+ "loss": 0.0949,
427
+ "step": 9600
428
+ },
429
+ {
430
+ "epoch": 3.92,
431
+ "grad_norm": 4.284938812255859,
432
+ "learning_rate": 1.1337777777777779e-05,
433
+ "loss": 0.0964,
434
+ "step": 9800
435
+ },
436
+ {
437
+ "epoch": 4.0,
438
+ "grad_norm": 3.4380695819854736,
439
+ "learning_rate": 1.1115555555555557e-05,
440
+ "loss": 0.097,
441
+ "step": 10000
442
+ },
443
+ {
444
+ "epoch": 4.0,
445
+ "eval_loss": 0.1545259952545166,
446
+ "eval_runtime": 903.5859,
447
+ "eval_samples_per_second": 1.383,
448
+ "eval_steps_per_second": 0.692,
449
+ "eval_wer": 0.10963879073419709,
450
+ "step": 10000
451
+ },
452
+ {
453
+ "epoch": 4.08,
454
+ "grad_norm": 1.92518949508667,
455
+ "learning_rate": 1.0893333333333333e-05,
456
+ "loss": 0.0524,
457
+ "step": 10200
458
+ },
459
+ {
460
+ "epoch": 4.16,
461
+ "grad_norm": 2.2278802394866943,
462
+ "learning_rate": 1.0671111111111112e-05,
463
+ "loss": 0.0523,
464
+ "step": 10400
465
+ },
466
+ {
467
+ "epoch": 4.24,
468
+ "grad_norm": 0.5611541271209717,
469
+ "learning_rate": 1.044888888888889e-05,
470
+ "loss": 0.0521,
471
+ "step": 10600
472
+ },
473
+ {
474
+ "epoch": 4.32,
475
+ "grad_norm": 1.7205854654312134,
476
+ "learning_rate": 1.0226666666666666e-05,
477
+ "loss": 0.0499,
478
+ "step": 10800
479
+ },
480
+ {
481
+ "epoch": 4.4,
482
+ "grad_norm": 2.563293218612671,
483
+ "learning_rate": 1.0004444444444446e-05,
484
+ "loss": 0.0553,
485
+ "step": 11000
486
+ },
487
+ {
488
+ "epoch": 4.4,
489
+ "eval_loss": 0.1574954092502594,
490
+ "eval_runtime": 893.7485,
491
+ "eval_samples_per_second": 1.399,
492
+ "eval_steps_per_second": 0.699,
493
+ "eval_wer": 0.10296427169218689,
494
+ "step": 11000
495
+ },
496
+ {
497
+ "epoch": 4.48,
498
+ "grad_norm": 2.2215042114257812,
499
+ "learning_rate": 9.783333333333335e-06,
500
+ "loss": 0.0537,
501
+ "step": 11200
502
+ },
503
+ {
504
+ "epoch": 4.5600000000000005,
505
+ "grad_norm": 1.360498070716858,
506
+ "learning_rate": 9.561111111111113e-06,
507
+ "loss": 0.0588,
508
+ "step": 11400
509
+ },
510
+ {
511
+ "epoch": 4.64,
512
+ "grad_norm": 4.086482048034668,
513
+ "learning_rate": 9.340000000000002e-06,
514
+ "loss": 0.0537,
515
+ "step": 11600
516
+ },
517
+ {
518
+ "epoch": 4.72,
519
+ "grad_norm": 2.1945817470550537,
520
+ "learning_rate": 9.117777777777778e-06,
521
+ "loss": 0.0599,
522
+ "step": 11800
523
+ },
524
+ {
525
+ "epoch": 4.8,
526
+ "grad_norm": 2.7617948055267334,
527
+ "learning_rate": 8.895555555555556e-06,
528
+ "loss": 0.0526,
529
+ "step": 12000
530
+ },
531
+ {
532
+ "epoch": 4.8,
533
+ "eval_loss": 0.14308780431747437,
534
+ "eval_runtime": 905.6641,
535
+ "eval_samples_per_second": 1.38,
536
+ "eval_steps_per_second": 0.69,
537
+ "eval_wer": 0.08902630545740087,
538
+ "step": 12000
539
+ },
540
+ {
541
+ "epoch": 4.88,
542
+ "grad_norm": 0.3309612572193146,
543
+ "learning_rate": 8.673333333333334e-06,
544
+ "loss": 0.0527,
545
+ "step": 12200
546
+ },
547
+ {
548
+ "epoch": 4.96,
549
+ "grad_norm": 0.3702127933502197,
550
+ "learning_rate": 8.451111111111112e-06,
551
+ "loss": 0.0574,
552
+ "step": 12400
553
+ },
554
+ {
555
+ "epoch": 5.04,
556
+ "grad_norm": 1.9310526847839355,
557
+ "learning_rate": 8.22888888888889e-06,
558
+ "loss": 0.0385,
559
+ "step": 12600
560
+ },
561
+ {
562
+ "epoch": 5.12,
563
+ "grad_norm": 0.8932788372039795,
564
+ "learning_rate": 8.006666666666667e-06,
565
+ "loss": 0.0369,
566
+ "step": 12800
567
+ },
568
+ {
569
+ "epoch": 5.2,
570
+ "grad_norm": 2.246431589126587,
571
+ "learning_rate": 7.784444444444445e-06,
572
+ "loss": 0.0299,
573
+ "step": 13000
574
+ },
575
+ {
576
+ "epoch": 5.2,
577
+ "eval_loss": 0.13719478249549866,
578
+ "eval_runtime": 896.1487,
579
+ "eval_samples_per_second": 1.395,
580
+ "eval_steps_per_second": 0.697,
581
+ "eval_wer": 0.08333333333333333,
582
+ "step": 13000
583
+ },
584
+ {
585
+ "epoch": 5.28,
586
+ "grad_norm": 2.8669216632843018,
587
+ "learning_rate": 7.562222222222223e-06,
588
+ "loss": 0.0276,
589
+ "step": 13200
590
+ },
591
+ {
592
+ "epoch": 5.36,
593
+ "grad_norm": 0.38674354553222656,
594
+ "learning_rate": 7.341111111111112e-06,
595
+ "loss": 0.0334,
596
+ "step": 13400
597
+ },
598
+ {
599
+ "epoch": 5.44,
600
+ "grad_norm": 4.218255996704102,
601
+ "learning_rate": 7.11888888888889e-06,
602
+ "loss": 0.0253,
603
+ "step": 13600
604
+ },
605
+ {
606
+ "epoch": 5.52,
607
+ "grad_norm": 2.424956798553467,
608
+ "learning_rate": 6.896666666666667e-06,
609
+ "loss": 0.0287,
610
+ "step": 13800
611
+ },
612
+ {
613
+ "epoch": 5.6,
614
+ "grad_norm": 1.3083245754241943,
615
+ "learning_rate": 6.674444444444445e-06,
616
+ "loss": 0.0311,
617
+ "step": 14000
618
+ },
619
+ {
620
+ "epoch": 5.6,
621
+ "eval_loss": 0.12584801018238068,
622
+ "eval_runtime": 892.859,
623
+ "eval_samples_per_second": 1.4,
624
+ "eval_steps_per_second": 0.7,
625
+ "eval_wer": 0.07803297997644287,
626
+ "step": 14000
627
+ },
628
+ {
629
+ "epoch": 5.68,
630
+ "grad_norm": 1.7961013317108154,
631
+ "learning_rate": 6.452222222222223e-06,
632
+ "loss": 0.0291,
633
+ "step": 14200
634
+ },
635
+ {
636
+ "epoch": 5.76,
637
+ "grad_norm": 0.16184474527835846,
638
+ "learning_rate": 6.2300000000000005e-06,
639
+ "loss": 0.0287,
640
+ "step": 14400
641
+ },
642
+ {
643
+ "epoch": 5.84,
644
+ "grad_norm": 0.13447080552577972,
645
+ "learning_rate": 6.007777777777778e-06,
646
+ "loss": 0.0303,
647
+ "step": 14600
648
+ },
649
+ {
650
+ "epoch": 5.92,
651
+ "grad_norm": 2.1226162910461426,
652
+ "learning_rate": 5.785555555555556e-06,
653
+ "loss": 0.0261,
654
+ "step": 14800
655
+ },
656
+ {
657
+ "epoch": 6.0,
658
+ "grad_norm": 1.8055452108383179,
659
+ "learning_rate": 5.563333333333334e-06,
660
+ "loss": 0.0295,
661
+ "step": 15000
662
+ },
663
+ {
664
+ "epoch": 6.0,
665
+ "eval_loss": 0.12012948095798492,
666
+ "eval_runtime": 888.9428,
667
+ "eval_samples_per_second": 1.406,
668
+ "eval_steps_per_second": 0.703,
669
+ "eval_wer": 0.07253631723596388,
670
+ "step": 15000
671
+ },
672
+ {
673
+ "epoch": 6.08,
674
+ "grad_norm": 0.1165083572268486,
675
+ "learning_rate": 5.341111111111111e-06,
676
+ "loss": 0.0143,
677
+ "step": 15200
678
+ },
679
+ {
680
+ "epoch": 6.16,
681
+ "grad_norm": 0.12827950716018677,
682
+ "learning_rate": 5.118888888888889e-06,
683
+ "loss": 0.0169,
684
+ "step": 15400
685
+ },
686
+ {
687
+ "epoch": 6.24,
688
+ "grad_norm": 3.075695276260376,
689
+ "learning_rate": 4.896666666666667e-06,
690
+ "loss": 0.0144,
691
+ "step": 15600
692
+ },
693
+ {
694
+ "epoch": 6.32,
695
+ "grad_norm": 0.4469108581542969,
696
+ "learning_rate": 4.6744444444444445e-06,
697
+ "loss": 0.0124,
698
+ "step": 15800
699
+ },
700
+ {
701
+ "epoch": 6.4,
702
+ "grad_norm": 0.264642596244812,
703
+ "learning_rate": 4.452222222222223e-06,
704
+ "loss": 0.0151,
705
+ "step": 16000
706
+ },
707
+ {
708
+ "epoch": 6.4,
709
+ "eval_loss": 0.12285350263118744,
710
+ "eval_runtime": 883.7262,
711
+ "eval_samples_per_second": 1.414,
712
+ "eval_steps_per_second": 0.707,
713
+ "eval_wer": 0.07420494699646643,
714
+ "step": 16000
715
+ },
716
+ {
717
+ "epoch": 6.48,
718
+ "grad_norm": 3.0525002479553223,
719
+ "learning_rate": 4.23e-06,
720
+ "loss": 0.0122,
721
+ "step": 16200
722
+ },
723
+ {
724
+ "epoch": 6.5600000000000005,
725
+ "grad_norm": 1.7435777187347412,
726
+ "learning_rate": 4.007777777777778e-06,
727
+ "loss": 0.0134,
728
+ "step": 16400
729
+ },
730
+ {
731
+ "epoch": 6.64,
732
+ "grad_norm": 0.03412042185664177,
733
+ "learning_rate": 3.785555555555556e-06,
734
+ "loss": 0.0146,
735
+ "step": 16600
736
+ },
737
+ {
738
+ "epoch": 6.72,
739
+ "grad_norm": 4.467521667480469,
740
+ "learning_rate": 3.5633333333333337e-06,
741
+ "loss": 0.0139,
742
+ "step": 16800
743
+ },
744
+ {
745
+ "epoch": 6.8,
746
+ "grad_norm": 1.6740899085998535,
747
+ "learning_rate": 3.3411111111111115e-06,
748
+ "loss": 0.0163,
749
+ "step": 17000
750
+ },
751
+ {
752
+ "epoch": 6.8,
753
+ "eval_loss": 0.1136574074625969,
754
+ "eval_runtime": 890.0494,
755
+ "eval_samples_per_second": 1.404,
756
+ "eval_steps_per_second": 0.702,
757
+ "eval_wer": 0.06576364350215941,
758
+ "step": 17000
759
+ },
760
+ {
761
+ "epoch": 6.88,
762
+ "grad_norm": 0.25668439269065857,
763
+ "learning_rate": 3.118888888888889e-06,
764
+ "loss": 0.0143,
765
+ "step": 17200
766
+ },
767
+ {
768
+ "epoch": 6.96,
769
+ "grad_norm": 1.1925643682479858,
770
+ "learning_rate": 2.896666666666667e-06,
771
+ "loss": 0.0132,
772
+ "step": 17400
773
+ },
774
+ {
775
+ "epoch": 7.04,
776
+ "grad_norm": 0.45678913593292236,
777
+ "learning_rate": 2.6744444444444446e-06,
778
+ "loss": 0.0103,
779
+ "step": 17600
780
+ },
781
+ {
782
+ "epoch": 7.12,
783
+ "grad_norm": 0.25028079748153687,
784
+ "learning_rate": 2.4522222222222224e-06,
785
+ "loss": 0.0089,
786
+ "step": 17800
787
+ },
788
+ {
789
+ "epoch": 7.2,
790
+ "grad_norm": 0.04460795596241951,
791
+ "learning_rate": 2.2300000000000002e-06,
792
+ "loss": 0.0082,
793
+ "step": 18000
794
+ },
795
+ {
796
+ "epoch": 7.2,
797
+ "eval_loss": 0.1142345443367958,
798
+ "eval_runtime": 885.5644,
799
+ "eval_samples_per_second": 1.412,
800
+ "eval_steps_per_second": 0.706,
801
+ "eval_wer": 0.06389870435806831,
802
+ "step": 18000
803
+ },
804
+ {
805
+ "epoch": 7.28,
806
+ "grad_norm": 6.106534957885742,
807
+ "learning_rate": 2.007777777777778e-06,
808
+ "loss": 0.0063,
809
+ "step": 18200
810
+ },
811
+ {
812
+ "epoch": 7.36,
813
+ "grad_norm": 0.5640923380851746,
814
+ "learning_rate": 1.7855555555555557e-06,
815
+ "loss": 0.0088,
816
+ "step": 18400
817
+ },
818
+ {
819
+ "epoch": 7.44,
820
+ "grad_norm": 0.09705500304698944,
821
+ "learning_rate": 1.5633333333333333e-06,
822
+ "loss": 0.0091,
823
+ "step": 18600
824
+ },
825
+ {
826
+ "epoch": 7.52,
827
+ "grad_norm": 0.042120128870010376,
828
+ "learning_rate": 1.3411111111111112e-06,
829
+ "loss": 0.0066,
830
+ "step": 18800
831
+ },
832
+ {
833
+ "epoch": 7.6,
834
+ "grad_norm": 0.024996856227517128,
835
+ "learning_rate": 1.118888888888889e-06,
836
+ "loss": 0.0092,
837
+ "step": 19000
838
+ },
839
+ {
840
+ "epoch": 7.6,
841
+ "eval_loss": 0.11208222806453705,
842
+ "eval_runtime": 886.2816,
843
+ "eval_samples_per_second": 1.41,
844
+ "eval_steps_per_second": 0.705,
845
+ "eval_wer": 0.0627208480565371,
846
+ "step": 19000
847
+ },
848
+ {
849
+ "epoch": 7.68,
850
+ "grad_norm": 0.38041412830352783,
851
+ "learning_rate": 8.966666666666668e-07,
852
+ "loss": 0.0068,
853
+ "step": 19200
854
+ },
855
+ {
856
+ "epoch": 7.76,
857
+ "grad_norm": 3.13415265083313,
858
+ "learning_rate": 6.744444444444446e-07,
859
+ "loss": 0.0062,
860
+ "step": 19400
861
+ },
862
+ {
863
+ "epoch": 7.84,
864
+ "grad_norm": 0.7191887497901917,
865
+ "learning_rate": 4.5222222222222224e-07,
866
+ "loss": 0.0067,
867
+ "step": 19600
868
+ },
869
+ {
870
+ "epoch": 7.92,
871
+ "grad_norm": 0.09878556430339813,
872
+ "learning_rate": 2.3000000000000002e-07,
873
+ "loss": 0.0077,
874
+ "step": 19800
875
+ },
876
+ {
877
+ "epoch": 8.0,
878
+ "grad_norm": 0.007985732518136501,
879
+ "learning_rate": 7.777777777777778e-09,
880
+ "loss": 0.006,
881
+ "step": 20000
882
+ },
883
+ {
884
+ "epoch": 8.0,
885
+ "eval_loss": 0.11124598234891891,
886
+ "eval_runtime": 889.6592,
887
+ "eval_samples_per_second": 1.405,
888
+ "eval_steps_per_second": 0.703,
889
+ "eval_wer": 0.06115037298782882,
890
+ "step": 20000
891
+ },
892
+ {
893
+ "epoch": 8.0,
894
+ "step": 20000,
895
+ "total_flos": 1.698366962958336e+20,
896
+ "train_loss": 0.16200368287563324,
897
+ "train_runtime": 40923.5162,
898
+ "train_samples_per_second": 1.955,
899
+ "train_steps_per_second": 0.489
900
+ }
901
+ ],
902
+ "logging_steps": 200,
903
+ "max_steps": 20000,
904
+ "num_input_tokens_seen": 0,
905
+ "num_train_epochs": 8,
906
+ "save_steps": 1000,
907
+ "stateful_callbacks": {
908
+ "TrainerControl": {
909
+ "args": {
910
+ "should_epoch_stop": false,
911
+ "should_evaluate": false,
912
+ "should_log": false,
913
+ "should_save": true,
914
+ "should_training_stop": true
915
+ },
916
+ "attributes": {}
917
+ }
918
+ },
919
+ "total_flos": 1.698366962958336e+20,
920
+ "train_batch_size": 2,
921
+ "trial_name": null,
922
+ "trial_params": null
923
+ }
val_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "val_loss": 0.11124598234891891,
4
+ "val_runtime": 893.4451,
5
+ "val_samples": 250,
6
+ "val_samples_per_second": 1.399,
7
+ "val_steps_per_second": 0.7,
8
+ "val_wer": 0.06115037298782882
9
+ }