NMP123 commited on
Commit
86999b5
·
verified ·
1 Parent(s): d77a1c0

Upload tokenizer

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. tokenizer_config.json +0 -1
README.md CHANGED
@@ -12,8 +12,8 @@ model-index:
12
  - name: w2v-bert-2.0-Vietnamese-colab-CV17.0
13
  results:
14
  - task:
15
- name: Automatic Speech Recognition
16
  type: automatic-speech-recognition
 
17
  dataset:
18
  name: common_voice_17_0
19
  type: common_voice_17_0
@@ -21,9 +21,9 @@ model-index:
21
  split: test
22
  args: vi
23
  metrics:
24
- - name: Wer
25
- type: wer
26
  value: 0.26461245235069886
 
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
12
  - name: w2v-bert-2.0-Vietnamese-colab-CV17.0
13
  results:
14
  - task:
 
15
  type: automatic-speech-recognition
16
+ name: Automatic Speech Recognition
17
  dataset:
18
  name: common_voice_17_0
19
  type: common_voice_17_0
 
21
  split: test
22
  args: vi
23
  metrics:
24
+ - type: wer
 
25
  value: 0.26461245235069886
26
+ name: Wer
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
tokenizer_config.json CHANGED
@@ -40,7 +40,6 @@
40
  "extra_special_tokens": {},
41
  "model_max_length": 1000000000000000019884624838656,
42
  "pad_token": "[PAD]",
43
- "processor_class": "Wav2Vec2BertProcessor",
44
  "replace_word_delimiter_char": " ",
45
  "target_lang": null,
46
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
 
40
  "extra_special_tokens": {},
41
  "model_max_length": 1000000000000000019884624838656,
42
  "pad_token": "[PAD]",
 
43
  "replace_word_delimiter_char": " ",
44
  "target_lang": null,
45
  "tokenizer_class": "Wav2Vec2CTCTokenizer",