import os import csv import datasets from datasets import Audio _DESCRIPTION = "Cleaned Nepali ASR dataset with audio and transcriptions." _CITATION = "" _HOMEPAGE = "" class NepaliASRConfig(datasets.BuilderConfig): def __init__(self, **kwargs): super(NepaliASRConfig, self).__init__(**kwargs) class NepaliASRDataset(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ NepaliASRConfig(name="default", version=datasets.Version("1.0.0"), description="Nepali ASR Dataset") ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ "utterance_id": datasets.Value("string"), "speaker_id": datasets.Value("string"), "transcription": datasets.Value("string"), "audio": Audio(sampling_rate=16_000), }), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): archive_path = dl_manager.download_and_extract(self.config.data_dir) return [ datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "tsv_path": os.path.join(archive_path, "validation_transcriptions.tsv"), "data_dir": archive_path, }, ) ] def _generate_examples(self, tsv_path, data_dir): with open(tsv_path, encoding="utf-8") as f: reader = csv.DictReader(f, delimiter="\t") for idx, row in enumerate(reader): audio_path = os.path.join(data_dir, row["utterance_path"]) yield idx, { "utterance_id": row["utterance_id"], "speaker_id": row["speaker_id"], "transcription": row["transcription"], "audio": audio_path, }