|
""" |
|
Copyright 2025 RobotsMali AI4D Lab. |
|
|
|
Licensed under the Creative Commons Attribution 4.0 International License (the "License"); |
|
you may not use this file except in compliance with the License. |
|
You may obtain a copy of the License at |
|
|
|
https://creativecommons.org/licenses/by/4.0/ |
|
|
|
Unless required by applicable law or agreed to in writing, software |
|
distributed under the License is distributed on an "AS IS" BASIS, |
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
See the License for the specific language governing permissions and |
|
limitations under the License. |
|
""" |
|
|
|
import csv |
|
import datasets |
|
from datasets import Split, SplitGenerator |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{bam_asr_all_2025, |
|
title={Bam-ASR-All Audio Dataset}, |
|
author={RobotsMali AI4D Lab}, |
|
year={2025}, |
|
publisher={Hugging Face} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
The **Bam-ASR-All** dataset is a combined Bambara speech dataset featuring multiple subsets: |
|
- Oza-Mali-Pense |
|
- Jeli-ASR |
|
- RT-Data-Collection |
|
All subsets contain audio samples in Bambara along with transcriptions and (potentially) |
|
French translations. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/RobotsMali/bam-asr-all" |
|
_LICENSE = "CC-BY-4.0" |
|
_VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
_BASE_URL = "https://huggingface.co/datasets/RobotsMali/bam-asr-all/resolve/main" |
|
|
|
|
|
|
|
|
|
class BamASRAllConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for different subsets of Bam-ASR-All dataset.""" |
|
|
|
class BamASRAll(datasets.GeneratorBasedBuilder): |
|
""" |
|
This class defines how to load and parse the Bam-ASR-All dataset |
|
from metadata.csv + audio files on the Hub. |
|
""" |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
BamASRAllConfig( |
|
name="oza-mali-pense", |
|
version=_VERSION, |
|
description="Load only the Oza-Mali-Pense subset (files under oza-mali-pense/).", |
|
), |
|
BamASRAllConfig( |
|
name="jeli-asr", |
|
version=_VERSION, |
|
description="Load only the Jeli-ASR subset (files under jeli-asr/).", |
|
), |
|
BamASRAllConfig( |
|
name="rt-data-collection", |
|
version=_VERSION, |
|
description="Load only the RT-Data-Collection subset (files under rt-data-collection/).", |
|
), |
|
|
|
BamASRAllConfig( |
|
name="bam-asr-all", |
|
version=_VERSION, |
|
description="Combine oza-mali-pense, jeli-asr, and rt-data-collection (all rows).", |
|
), |
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "bam-asr-all" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"duration": datasets.Value("float32"), |
|
"bam": datasets.Value("string"), |
|
"french": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
|
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
""" |
|
1) Download 'metadata.csv' from the Hub by specifying its raw URL. |
|
2) We'll then yield two splits (TRAIN, TEST) by reading that CSV |
|
and filtering rows by '/train/' or '/test/' in file paths. |
|
""" |
|
metadata_url = f"{_BASE_URL}/metadata.csv" |
|
local_metadata_path = dl_manager.download(metadata_url) |
|
|
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={ |
|
"metadata_path": local_metadata_path, |
|
"split": "train", |
|
"dl_manager": dl_manager, |
|
}, |
|
), |
|
SplitGenerator( |
|
name=Split.TEST, |
|
gen_kwargs={ |
|
"metadata_path": local_metadata_path, |
|
"split": "test", |
|
"dl_manager": dl_manager, |
|
}, |
|
), |
|
] |
|
|
|
|
|
|
|
|
|
def _generate_examples(self, metadata_path, split, dl_manager): |
|
""" |
|
Read metadata.csv row-by-row, filter by: |
|
- the config name (oza-mali-pense, jeli-asr, rt-data-collection, or all) |
|
- 'train' vs 'test' in file path |
|
Then download each audio file from the Hub, yield local path + metadata. |
|
""" |
|
audios_to_download = [] |
|
metadata_dict = {} |
|
|
|
with open(metadata_path, "r", encoding="utf-8") as f: |
|
reader = csv.DictReader(f) |
|
for idx, row in enumerate(reader): |
|
file_path = row["file_name"] |
|
|
|
|
|
if self.config.name == "oza-mali-pense": |
|
if "oza-mali-pense/" not in file_path: |
|
continue |
|
elif self.config.name == "jeli-asr": |
|
if "jeli-asr/" not in file_path: |
|
continue |
|
elif self.config.name == "rt-data-collection": |
|
if "rt-data-collection/" not in file_path: |
|
continue |
|
elif self.config.name == "bam-asr-all": |
|
|
|
pass |
|
|
|
|
|
if split == "train" and "/train/" not in file_path: |
|
continue |
|
if split == "test" and "/test/" not in file_path: |
|
continue |
|
|
|
|
|
audio_url = f"{_BASE_URL}/{file_path}" |
|
audios_to_download.append(audio_url) |
|
|
|
|
|
metadata_dict[audio_url] = { |
|
"duration": float(row["duration"]), |
|
"bam": row["bam"], |
|
"french": row["french"], |
|
} |
|
|
|
|
|
local_audio_paths = dl_manager.download(audios_to_download) |
|
for idx, audio_url in enumerate(audios_to_download): |
|
local_audio_path = local_audio_paths[idx] |
|
yield idx, { |
|
"audio": local_audio_path, |
|
"duration": metadata_dict[audio_url]["duration"], |
|
"bam": metadata_dict[audio_url]["bam"], |
|
"french": metadata_dict[audio_url]["french"], |
|
} |
|
|