stt-pseudo-labeled-whisper-large-v3-multilingual
/
stt-pseudo-labeled-whisper-large-v3-multilingual.py
# coding=utf-8 | |
import json | |
import re | |
from collections import OrderedDict | |
import datasets | |
from .audio_utils import get_waveform_from_audio_or_stored_zip | |
from .meta import SUBSET_NAMES_AND_PATHS | |
BASE_DIR = "https://huggingface.co/datasets/bofenghuang/stt-pseudo-labeled-whisper-large-v3-multilingual/resolve/main/" | |
# BASE_DIR = "" | |
VERSION = "0.0.1" | |
_DESCRIPTION = "" # todo | |
def jload(f, mode="r"): | |
"""Load a .json file into a dictionary.""" | |
with open(f, mode) as f: | |
return json.load(f) | |
def jsonl_load(f, mode="r"): | |
"""Load a .jsonl file into a dictionary.""" | |
with open(f, mode) as f: | |
return [json.loads(l.strip()) for l in f] | |
# SUBSET_NAMES_AND_PATHS = jload("./meta.json") | |
class MultilingualWhisperLargeV3PseudoLabeledSpeechDatasetConfig(datasets.BuilderConfig): | |
"""BuilderConfig for stt-pseudo-labeled-whisper-large-v3-multilingual.""" | |
def __init__(self, name, path, audio_zip_files, text_file, version, **kwargs): | |
self.base_data_path = path | |
self.audio_zip_files = audio_zip_files | |
self.text_file = text_file | |
description = f"stt-pseudo-labeled-whisper-large-v3-multilingual speech to text dataset in {name}." | |
super(MultilingualWhisperLargeV3PseudoLabeledSpeechDatasetConfig, self).__init__( | |
name=name, | |
version=datasets.Version(version), | |
description=description, | |
**kwargs, | |
) | |
class MultilingualWhisperLargeV3PseudoLabeledSpeechDataset(datasets.GeneratorBasedBuilder): | |
"""stt-pseudo-labeled-whisper-large-v3-multilingual dataset.""" | |
VERSION = datasets.Version(VERSION) | |
DEFAULT_CONFIG_NAME = "en-yodas-000" | |
BUILDER_CONFIGS = [ | |
MultilingualWhisperLargeV3PseudoLabeledSpeechDatasetConfig( | |
name, | |
SUBSET_NAMES_AND_PATHS[name]["dir"], | |
SUBSET_NAMES_AND_PATHS[name]["audio_zip_files"], | |
SUBSET_NAMES_AND_PATHS[name]["text_file"], | |
version=VERSION, | |
) | |
for name in SUBSET_NAMES_AND_PATHS | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
OrderedDict( | |
[ | |
# ("id", datasets.Value("string")), | |
# ("utt_id", datasets.Value("string")), | |
("audio_filepath", datasets.Value("string")), | |
("audio", datasets.Audio(sampling_rate=16_000)), | |
("duration", datasets.Value("float")), | |
("text", datasets.Value("string")), | |
("whisper_transcript", datasets.Value("string")), | |
("text_norm", datasets.Value("string")), | |
("whisper_transcript_norm", datasets.Value("string")), | |
("wer", datasets.Value("float")), | |
("prev_text", datasets.Value("string")), | |
("prev_whisper_transcript", datasets.Value("string")), | |
] | |
) | |
), | |
supervised_keys=None, | |
homepage="", # TODO | |
citation="", # TODO | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
if dl_manager.is_streaming: | |
raise NotImplementedError("The streaming mode is not supported yet.") | |
print("Downloading audio and text...") | |
audio_tar_files = dl_manager.download( | |
[f"{BASE_DIR}{self.config.base_data_path}/{audio_zip_file}" for audio_zip_file in self.config.audio_zip_files] | |
) | |
text_file = dl_manager.download(f"{BASE_DIR}{self.config.base_data_path}/{self.config.text_file}") | |
snapshot_path = text_file.split(self.config.base_data_path)[0] | |
text_archives = jsonl_load(text_file) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
# "is_streaming": dl_manager.is_streaming, | |
"text_archives": text_archives, | |
"snapshot_path": snapshot_path, | |
}, | |
), | |
] | |
def _generate_examples(self, text_archives, snapshot_path): | |
"""Yields examples.""" | |
id_ = 0 | |
for sample in text_archives: | |
# replace path | |
audio_filepath = sample["audio_zip_filepath"] | |
audio_filepath = re.sub( | |
rf"^.*/{self.config.base_data_path}", f"{snapshot_path}/{self.config.base_data_path}", audio_filepath | |
) | |
# read wav directly from zipped file | |
waveform, sample_rate = get_waveform_from_audio_or_stored_zip(audio_filepath) | |
result = { | |
"id": id_, | |
"audio_filepath": audio_filepath, | |
"audio": { | |
"path": audio_filepath, | |
# "bytes": waveform, | |
"array": waveform, | |
"sampling_rate": sample_rate, | |
}, | |
"duration": sample["duration"], | |
"text": sample["text"], | |
"whisper_transcript": sample["whisper_transcript"], | |
"text_norm": sample["text_norm"], | |
"whisper_transcript_norm": sample["whisper_transcript_norm"], | |
"wer": sample["wer"], | |
"prev_text": sample["prev_text"], | |
"prev_whisper_transcript": sample["prev_whisper_transcript"], | |
} | |
yield id_, result | |
id_ += 1 | |