Seosnaps commited on
Commit
d923ee8
1 Parent(s): 4a0f246

Delete common_voice_16_0.py

Browse files
Files changed (1) hide show
  1. common_voice_16_0.py +0 -122
common_voice_16_0.py DELETED
@@ -1,122 +0,0 @@
1
- _CITATION = """\
2
- @inproceedings{commonvoice:2020,
3
- author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
4
- title = {Common Voice: A Massively-Multilingual Speech Corpus},
5
- booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
6
- pages = {4211--4215},
7
- year = 2020
8
- }
9
- """
10
-
11
- _HOMEPAGE = "https://huggingface.co/datasets/Seon25/common_voice_16_0/tree/main"
12
-
13
- _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
14
-
15
- # TODO: change "streaming" to "main" after merge!
16
- _BASE_URL = "https://huggingface.co/datasets/Seon25/common_voice_16_0/tree/main"
17
-
18
- _AUDIO_URL ="audio/ha/{split}/ha_{split}_0.tar"
19
-
20
- _TRANSCRIPT_URL ="transcript/ha/{split}.tsv"
21
- _N_SHARDS_URL ="n_shards.json"
22
-
23
-
24
-
25
-
26
-
27
- import csv
28
- import os
29
- import json
30
-
31
- import datasets
32
- from datasets.utils.py_utils import size_str
33
- from tqdm import tqdm
34
-
35
- from .languages import LANGUAGES
36
- from .release_stats import STATS
37
-
38
- class CommonVoice(datasets.GeneratorBasedBuilder):
39
- """Common Voice."""
40
-
41
- def _info(self):
42
- return datasets.DatasetInfo(
43
- features=datasets.Features(
44
- {
45
- "speaker_id": datasets.Value("string"),
46
- "path": datasets.Value("string"),
47
- "audio": datasets.Audio(sampling_rate=16_000),
48
- "sentence": datasets.Value("string"),
49
- }
50
- ),
51
- supervised_keys=None,
52
- homepage=_HOMEPAGE,
53
- license=_LICENSE,
54
- citation=_CITATION,
55
- )
56
-
57
- def _split_generators(self, dl_manager):
58
- lang = self.config.name
59
- n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
60
- with open(n_shards_path, encoding="utf-8") as f:
61
- n_shards = json.load(f)
62
-
63
- audio_urls = {}
64
- splits = ("train", "dev", "test", "other", "invalidated")
65
- for split in splits:
66
- audio_urls[split] = [
67
- _AUDIO_URL.format(lang=lang, split=split, shard_idx=0) for i in range(n_shards[lang][split])
68
- ]
69
- archive_paths = dl_manager.download(audio_urls)
70
- local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
71
-
72
- meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
73
- meta_paths = dl_manager.download_and_extract(meta_urls)
74
-
75
- split_generators = []
76
- split_names = {
77
- "train": datasets.Split.TRAIN,
78
- "dev": datasets.Split.VALIDATION,
79
- "test": datasets.Split.TEST,
80
- }
81
- for split in splits:
82
- split_generators.append(
83
- datasets.SplitGenerator(
84
- name=split_names.get(split, split),
85
- gen_kwargs={
86
- "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
87
- "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
88
- "meta_path": meta_paths[split],
89
- },
90
- ),
91
- )
92
-
93
- return split_generators
94
-
95
- def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
96
- data_fields = list(self._info().features.keys())
97
- metadata = {}
98
- with open(meta_path, encoding="utf-8") as f:
99
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
100
- for row in tqdm(reader, desc="Reading metadata..."):
101
- if not row["path"].endswith(".mp3"):
102
- row["path"] += ".mp3"
103
- # accent -> accents in CV 8.0
104
- if "accents" in row:
105
- row["accent"] = row["accents"]
106
- del row["accents"]
107
- # if data is incomplete, fill with empty values
108
- for field in data_fields:
109
- if field not in row:
110
- row[field] = ""
111
- metadata[row["path"]] = row
112
-
113
- for i, audio_archive in enumerate(archives):
114
- for path, file in audio_archive:
115
- _, filename = os.path.split(path)
116
- if filename in metadata:
117
- result = dict(metadata[filename])
118
- # set the audio feature and the path to the extracted file
119
- path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
120
- result["audio"] = {"path": path, "bytes": file.read()}
121
- result["path"] = path
122
- yield path, result