Datasets:
				
			
			
	
			
	
		
			
	
		Tasks:
	
	
	
	
	Automatic Speech Recognition
	
	
	Formats:
	
	
	
		
	
	parquet
	
	
	Sub-tasks:
	
	
	
	
	keyword-spotting
	
	
	Size:
	
	
	
	
	10K - 100K
	
	
	ArXiv:
	
	
	
	
	
	
	
	
Tags:
	
	
	
	
	speech-recognition
	
	
	License:
	
	
	
	
	
	
	
| # coding=utf-8 | |
| # Copyright 2022 The PolyAI and HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import csv | |
| import os | |
| import datasets | |
| logger = datasets.logging.get_logger(__name__) | |
| """ MInDS-14 Dataset""" | |
| _CITATION = """\ | |
| @article{gerz2021multilingual, | |
| title={Multilingual and cross-lingual intent detection from spoken data}, | |
| author={Gerz, Daniela and Su, Pei-Hao and Kusztos, Razvan and Mondal, Avishek and Lis, Michal and Singhal, Eshan and Mrk{\v{s}}i{\'c}, Nikola and Wen, Tsung-Hsien and Vuli{\'c}, Ivan}, | |
| journal={arXiv preprint arXiv:2104.08524}, | |
| year={2021} | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| MINDS-14 is training and evaluation resource for intent | |
| detection task with spoken data. It covers 14 | |
| intents extracted from a commercial system | |
| in the e-banking domain, associated with spoken examples in 14 diverse language varieties. | |
| """ | |
| _ALL_CONFIGS = sorted([ | |
| "cs-CZ", "de-DE", "en-AU", "en-GB", "en-US", "es-ES", "fr-FR", "it-IT", "ko-KR", "nl-NL", "pl-PL", "pt-PT", "ru-RU", "zh-CN" | |
| ]) | |
| _DESCRIPTION = "MINDS-14 is a dataset for the intent detection task with spoken data. It covers 14 intents extracted from a commercial system in the e-banking domain, associated with spoken examples in 14 diverse language varieties." | |
| _HOMEPAGE_URL = "https://arxiv.org/abs/2104.08524" | |
| _DATA_URL = "data/MInDS-14.zip" | |
| class Minds14Config(datasets.BuilderConfig): | |
| """BuilderConfig for xtreme-s""" | |
| def __init__( | |
| self, name, description, homepage, data_url | |
| ): | |
| super(Minds14Config, self).__init__( | |
| name=self.name, | |
| version=datasets.Version("1.0.0", ""), | |
| description=self.description, | |
| ) | |
| self.name = name | |
| self.description = description | |
| self.homepage = homepage | |
| self.data_url = data_url | |
| def _build_config(name): | |
| return Minds14Config( | |
| name=name, | |
| description=_DESCRIPTION, | |
| homepage=_HOMEPAGE_URL, | |
| data_url=_DATA_URL, | |
| ) | |
| class Minds14(datasets.GeneratorBasedBuilder): | |
| DEFAULT_WRITER_BATCH_SIZE = 1000 | |
| BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS + ["all"]] | |
| def _info(self): | |
| langs = _ALL_CONFIGS | |
| features = datasets.Features( | |
| { | |
| "path": datasets.Value("string"), | |
| "audio": datasets.Audio(sampling_rate=8_000), | |
| "transcription": datasets.Value("string"), | |
| "english_transcription": datasets.Value("string"), | |
| "intent_class": datasets.ClassLabel( | |
| names=[ | |
| "abroad", | |
| "address", | |
| "app_error", | |
| "atm_limit", | |
| "balance", | |
| "business_loan", | |
| "card_issues", | |
| "cash_deposit", | |
| "direct_debit", | |
| "freeze", | |
| "high_value_payment", | |
| "joint_account", | |
| "latest_transactions", | |
| "pay_bill", | |
| ] | |
| ), | |
| "lang_id": datasets.ClassLabel(names=langs), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=features, | |
| supervised_keys=("audio", "transcription"), | |
| homepage=self.config.homepage, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| langs = ( | |
| _ALL_CONFIGS | |
| if self.config.name == "all" | |
| else [self.config.name] | |
| ) | |
| archive_path = dl_manager.download_and_extract(self.config.data_url) | |
| audio_path = dl_manager.extract( | |
| os.path.join(archive_path, "MInDS-14", "audio.zip") | |
| ) | |
| text_path = dl_manager.extract( | |
| os.path.join(archive_path, "MInDS-14", "text.zip") | |
| ) | |
| text_path = {l: os.path.join(text_path, f"{l}.csv") for l in langs} | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "audio_path": audio_path, | |
| "text_paths": text_path, | |
| }, | |
| ) | |
| ] | |
| def _generate_examples(self, audio_path, text_paths): | |
| key = 0 | |
| for lang in text_paths.keys(): | |
| text_path = text_paths[lang] | |
| with open(text_path, encoding="utf-8") as csv_file: | |
| csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True) | |
| next(csv_reader) | |
| for row in csv_reader: | |
| file_path, transcription, english_transcription, intent_class = row | |
| file_path = os.path.join(audio_path, *file_path.split("/")) | |
| yield key, { | |
| "path": file_path, | |
| "audio": file_path, | |
| "transcription": transcription, | |
| "english_transcription": english_transcription, | |
| "intent_class": intent_class.lower(), | |
| "lang_id": _ALL_CONFIGS.index(lang), | |
| } | |
| key += 1 | |

