ssolito commited on
Commit
80670c0
·
verified ·
1 Parent(s): 6ac0d4f

Upload corts_valencianes_asr_a.py

Browse files
Files changed (1) hide show
  1. corts_valencianes_asr_a.py +304 -0
corts_valencianes_asr_a.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+ import datasets
6
+
7
+ _NAME="corts_valencianes_asr_a"
8
+ _VERSION="1.0.0"
9
+
10
+ _DESCRIPTION = """
11
+ This is the first version of CortsValencianes speech corpus for Valencian: a collection of speech recordings with transcriptions intended for Automatic Speech Recognition (ASR) applications.
12
+ """
13
+
14
+ _CITATION = """
15
+ @misc{bscib32024,
16
+ title={Corts Valencianes - Speech Corpus for Valencian ASR},
17
+ author={Baybars, Kulebi},
18
+ publisher={Barcelona Supercomputing Center},
19
+ year={2024},
20
+ url={},
21
+ }
22
+ """
23
+
24
+ _HOMEPAGE = "https://huggingface.co/datasets/projecte-aina/corts_valencianes_asr_a"
25
+
26
+ _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/deed.es"
27
+
28
+
29
+ _BASE_DATA_DIR = "corpus/"
30
+
31
+ _METADATA_CLEAN_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","clean_train_corts_short.csv")
32
+ _METADATA_CLEAN_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "clean_test_corts_short.csv")
33
+ _METADATA_CLEAN_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "clean_dev_corts_short.csv")
34
+
35
+ _METADATA_OTHER_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","other_train_corts_short.csv")
36
+ _METADATA_OTHER_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "other_test_corts_short.csv")
37
+ _METADATA_OTHER_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "other_dev_corts_short.csv")
38
+
39
+ _TARS_CLEAN_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","tars_clean_train_short.paths")
40
+ _TARS_CLEAN_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_test_short.paths")
41
+ _TARS_CLEAN_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_dev_short.paths")
42
+
43
+ _TARS_OTHER_TRAIN_SHORT = os.path.join(_BASE_DATA_DIR,"files","tars_other_train_short.paths")
44
+ _TARS_OTHER_TEST_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_other_test_short.paths")
45
+ _TARS_OTHER_DEV_SHORT = os.path.join(_BASE_DATA_DIR,"files", "tars_other_dev_short.paths")
46
+
47
+
48
+ _METADATA_CLEAN_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","clean_train_corts_long.csv")
49
+ _METADATA_CLEAN_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "clean_test_corts_long.csv")
50
+ _METADATA_CLEAN_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "clean_dev_corts_long.csv")
51
+
52
+ _METADATA_OTHER_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","other_train_corts_long.csv")
53
+ _METADATA_OTHER_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "other_test_corts_long.csv")
54
+ _METADATA_OTHER_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "other_dev_corts_long.csv")
55
+
56
+ _TARS_CLEAN_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","tars_clean_train_long.paths")
57
+ _TARS_CLEAN_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_test_long.paths")
58
+ _TARS_CLEAN_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_clean_dev_long.paths")
59
+
60
+ _TARS_OTHER_TRAIN_LONG = os.path.join(_BASE_DATA_DIR,"files","tars_other_train_long.paths")
61
+ _TARS_OTHER_TEST_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_other_test_long.paths")
62
+ _TARS_OTHER_DEV_LONG = os.path.join(_BASE_DATA_DIR,"files", "tars_other_dev_long.paths")
63
+
64
+
65
+
66
+ class CortsValencianesASRConfig(datasets.BuilderConfig):
67
+ """BuilderConfig for Corts Valencianes ASR"""
68
+
69
+ def __init__(self, name, **kwargs):
70
+ name=_NAME
71
+ super().__init__(name=name, **kwargs)
72
+
73
+ class CortsValencianesASR(datasets.GeneratorBasedBuilder):
74
+ """Corts Valencianes ASR"""
75
+
76
+ VERSION = datasets.Version(_VERSION)
77
+ BUILDER_CONFIGS = [
78
+ CortsValencianesASRConfig(
79
+ name=_NAME,
80
+ version=datasets.Version(_VERSION),
81
+ )
82
+ ]
83
+
84
+ def _info(self):
85
+ features = datasets.Features(
86
+ {
87
+ "identifier": datasets.Value("string"),
88
+ "audio": datasets.Audio(sampling_rate=16000),
89
+ "segment_path": datasets.Value("string"),
90
+ "text": datasets.Value("string"),
91
+ }
92
+ )
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=features,
96
+ homepage=_HOMEPAGE,
97
+ license=_LICENSE,
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+
103
+ metadata_clean_train_short=dl_manager.download_and_extract(_METADATA_CLEAN_TRAIN_SHORT)
104
+ metadata_clean_test_short=dl_manager.download_and_extract(_METADATA_CLEAN_TEST_SHORT)
105
+ metadata_clean_dev_short=dl_manager.download_and_extract(_METADATA_CLEAN_DEV_SHORT)
106
+
107
+ metadata_other_train_short=dl_manager.download_and_extract(_METADATA_OTHER_TRAIN_SHORT)
108
+ metadata_other_test_short=dl_manager.download_and_extract(_METADATA_OTHER_TEST_SHORT)
109
+ metadata_other_dev_short=dl_manager.download_and_extract(_METADATA_OTHER_DEV_SHORT)
110
+
111
+ tars_clean_train_short=dl_manager.download_and_extract(_TARS_CLEAN_TRAIN_SHORT)
112
+ tars_clean_test_short=dl_manager.download_and_extract(_TARS_CLEAN_TEST_SHORT)
113
+ tars_clean_dev_short=dl_manager.download_and_extract(_TARS_CLEAN_DEV_SHORT)
114
+
115
+ tars_other_train_short=dl_manager.download_and_extract(_TARS_OTHER_TRAIN_SHORT)
116
+ tars_other_test_short=dl_manager.download_and_extract(_TARS_OTHER_TEST_SHORT)
117
+ tars_other_dev_short=dl_manager.download_and_extract(_TARS_OTHER_DEV_SHORT)
118
+
119
+
120
+ metadata_clean_train_long=dl_manager.download_and_extract(_METADATA_CLEAN_TRAIN_LONG)
121
+ metadata_clean_test_long=dl_manager.download_and_extract(_METADATA_CLEAN_TEST_LONG)
122
+ metadata_clean_dev_long=dl_manager.download_and_extract(_METADATA_CLEAN_DEV_LONG)
123
+
124
+ metadata_other_train_long=dl_manager.download_and_extract(_METADATA_OTHER_TRAIN_LONG)
125
+ metadata_other_test_long=dl_manager.download_and_extract(_METADATA_OTHER_TEST_LONG)
126
+ metadata_other_dev_long=dl_manager.download_and_extract(_METADATA_OTHER_DEV_LONG)
127
+
128
+ tars_clean_train_long=dl_manager.download_and_extract(_TARS_CLEAN_TRAIN_LONG)
129
+ tars_clean_test_long=dl_manager.download_and_extract(_TARS_CLEAN_TEST_LONG)
130
+ tars_clean_dev_long=dl_manager.download_and_extract(_TARS_CLEAN_DEV_LONG)
131
+
132
+ tars_other_train_long=dl_manager.download_and_extract(_TARS_OTHER_TRAIN_LONG)
133
+ tars_other_test_long=dl_manager.download_and_extract(_TARS_OTHER_TEST_LONG)
134
+ tars_other_dev_long=dl_manager.download_and_extract(_TARS_OTHER_DEV_LONG)
135
+
136
+ hash_tar_files=defaultdict(dict)
137
+ with open(tars_clean_train_short,'r') as f:
138
+ hash_tar_files['clean_train_short']=[path.replace('\n','') for path in f]
139
+ with open(tars_clean_test_short,'r') as f:
140
+ hash_tar_files['clean_test_short']=[path.replace('\n','') for path in f]
141
+ with open(tars_clean_dev_short,'r') as f:
142
+ hash_tar_files['clean_dev_short']=[path.replace('\n','') for path in f]
143
+
144
+ with open(tars_other_train_short,'r') as f:
145
+ hash_tar_files['other_train_short']=[path.replace('\n','') for path in f]
146
+ with open(tars_other_test_short,'r') as f:
147
+ hash_tar_files['other_test_short']=[path.replace('\n','') for path in f]
148
+ with open(tars_other_dev_short,'r') as f:
149
+ hash_tar_files['other_dev_short']=[path.replace('\n','') for path in f]
150
+
151
+
152
+ with open(tars_clean_train_long,'r') as f:
153
+ hash_tar_files['clean_train_long']=[path.replace('\n','') for path in f]
154
+ with open(tars_clean_test_long,'r') as f:
155
+ hash_tar_files['clean_test_long']=[path.replace('\n','') for path in f]
156
+ with open(tars_clean_dev_long,'r') as f:
157
+ hash_tar_files['clean_dev_long']=[path.replace('\n','') for path in f]
158
+
159
+ with open(tars_other_train_long,'r') as f:
160
+ hash_tar_files['other_train_long']=[path.replace('\n','') for path in f]
161
+ with open(tars_other_test_long,'r') as f:
162
+ hash_tar_files['other_test_long']=[path.replace('\n','') for path in f]
163
+ with open(tars_other_dev_long,'r') as f:
164
+ hash_tar_files['other_dev_long']=[path.replace('\n','') for path in f]
165
+
166
+ hash_meta_paths={"clean_train_short":metadata_clean_train_short,
167
+ "clean_test_short":metadata_clean_test_short,
168
+ "clean_dev_short":metadata_clean_dev_short,
169
+ "other_train_short":metadata_other_train_short,
170
+ "other_test_short":metadata_other_test_short,
171
+ "other_dev_short":metadata_other_dev_short,
172
+ "clean_train_long":metadata_clean_train_long,
173
+ "clean_test_long":metadata_clean_test_long,
174
+ "clean_dev_long":metadata_clean_dev_long,
175
+ "other_train_long":metadata_other_train_long,
176
+ "other_test_long":metadata_other_test_long,
177
+ "other_dev_long":metadata_other_dev_long}
178
+
179
+ audio_paths = dl_manager.download(hash_tar_files)
180
+
181
+ splits=["clean_train_short","clean_test_short","clean_dev_short","other_train_short","other_test_short","other_dev_short","clean_train_long","clean_test_long","clean_dev_long","other_train_long","other_test_long","other_dev_long"]
182
+ local_extracted_audio_paths = (
183
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
184
+ {
185
+ split:[None] * len(audio_paths[split]) for split in splits
186
+ }
187
+ )
188
+
189
+ return [
190
+ datasets.SplitGenerator(
191
+ name="clean_train_short",
192
+ gen_kwargs={
193
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["clean_train_short"]],
194
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_train_short"],
195
+ "metadata_paths": hash_meta_paths["clean_train_short"],
196
+ }
197
+ ),
198
+ datasets.SplitGenerator(
199
+ name="clean_test_short",
200
+ gen_kwargs={
201
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_test_short"]],
202
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_test_short"],
203
+ "metadata_paths": hash_meta_paths["clean_test_short"],
204
+ }
205
+ ),
206
+ datasets.SplitGenerator(
207
+ name="clean_dev_short",
208
+ gen_kwargs={
209
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_dev_short"]],
210
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_dev_short"],
211
+ "metadata_paths": hash_meta_paths["clean_dev_short"],
212
+ }
213
+ ),
214
+ datasets.SplitGenerator(
215
+ name="other_train_short",
216
+ gen_kwargs={
217
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["other_train_short"]],
218
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_train_short"],
219
+ "metadata_paths": hash_meta_paths["other_train_short"],
220
+ }
221
+ ),
222
+ datasets.SplitGenerator(
223
+ name="other_test_short",
224
+ gen_kwargs={
225
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_test_short"]],
226
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_test_short"],
227
+ "metadata_paths": hash_meta_paths["other_test_short"],
228
+ }
229
+ ),
230
+ datasets.SplitGenerator(
231
+ name="other_dev_short",
232
+ gen_kwargs={
233
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_dev_short"]],
234
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_dev_short"],
235
+ "metadata_paths": hash_meta_paths["other_dev_short"],
236
+ }
237
+ ),
238
+ datasets.SplitGenerator(
239
+ name="clean_train_long",
240
+ gen_kwargs={
241
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["clean_train_long"]],
242
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_train_long"],
243
+ "metadata_paths": hash_meta_paths["clean_train_long"],
244
+ }
245
+ ),
246
+ datasets.SplitGenerator(
247
+ name="clean_test_long",
248
+ gen_kwargs={
249
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_test_long"]],
250
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_test_long"],
251
+ "metadata_paths": hash_meta_paths["clean_test_long"],
252
+ }
253
+ ),
254
+ datasets.SplitGenerator(
255
+ name="clean_dev_long",
256
+ gen_kwargs={
257
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["clean_dev_long"]],
258
+ "local_extracted_archives_paths": local_extracted_audio_paths["clean_dev_long"],
259
+ "metadata_paths": hash_meta_paths["clean_dev_long"],
260
+ }
261
+ ),
262
+ datasets.SplitGenerator(
263
+ name="other_train_long",
264
+ gen_kwargs={
265
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["other_train_long"]],
266
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_train_long"],
267
+ "metadata_paths": hash_meta_paths["other_train_long"],
268
+ }
269
+ ),
270
+ datasets.SplitGenerator(
271
+ name="other_test_long",
272
+ gen_kwargs={
273
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_test_long"]],
274
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_test_long"],
275
+ "metadata_paths": hash_meta_paths["other_test_long"],
276
+ }
277
+ ),
278
+ datasets.SplitGenerator(
279
+ name="other_dev_long",
280
+ gen_kwargs={
281
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["other_dev_long"]],
282
+ "local_extracted_archives_paths": local_extracted_audio_paths["other_dev_long"],
283
+ "metadata_paths": hash_meta_paths["other_dev_long"],
284
+ }
285
+ ),
286
+ ]
287
+
288
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
289
+
290
+ features = ["segment_path","text"]
291
+
292
+ with open(metadata_paths) as f:
293
+ metadata = {x["identifier"]: x for x in csv.DictReader(f, delimiter=",")}
294
+
295
+ for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
296
+ for audio_filename, audio_file in audio_archive:
297
+ audio_id =os.path.splitext(os.path.basename(audio_filename))[0]
298
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
299
+
300
+ yield audio_id, {
301
+ "identifier": audio_id,
302
+ **{feature: metadata[audio_id][feature] for feature in features},
303
+ "audio": {"path": path, "bytes": audio_file.read()},
304
+ }