|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@ARTICLE{Koizumi2023-hs, |
|
title = "{LibriTTS-R}: A restored multi-speaker text-to-speech corpus", |
|
author = "Koizumi, Yuma and Zen, Heiga and Karita, Shigeki and Ding, |
|
Yifan and Yatabe, Kohei and Morioka, Nobuyuki and Bacchiani, |
|
Michiel and Zhang, Yu and Han, Wei and Bapna, Ankur", |
|
abstract = "This paper introduces a new speech dataset called |
|
``LibriTTS-R'' designed for text-to-speech (TTS) use. It is |
|
derived by applying speech restoration to the LibriTTS |
|
corpus, which consists of 585 hours of speech data at 24 kHz |
|
sampling rate from 2,456 speakers and the corresponding |
|
texts. The constituent samples of LibriTTS-R are identical |
|
to those of LibriTTS, with only the sound quality improved. |
|
Experimental results show that the LibriTTS-R ground-truth |
|
samples showed significantly improved sound quality compared |
|
to those in LibriTTS. In addition, neural end-to-end TTS |
|
trained with LibriTTS-R achieved speech naturalness on par |
|
with that of the ground-truth samples. The corpus is freely |
|
available for download from |
|
\textbackslashurl\{http://www.openslr.org/141/\}.", |
|
month = may, |
|
year = 2023, |
|
copyright = "http://creativecommons.org/licenses/by-nc-nd/4.0/", |
|
archivePrefix = "arXiv", |
|
primaryClass = "eess.AS", |
|
eprint = "2305.18802" |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
LibriTTS-R [1] is a sound quality improved version of the LibriTTS corpus (http://www.openslr.org/60/) which is a |
|
multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate, |
|
published in 2019. The constituent samples of LibriTTS-R are identical to those of LibriTTS, with only the sound |
|
quality improved. To improve sound quality, a speech restoration model, Miipher proposed by Yuma Koizumi [2], was used. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.openslr.org/141/" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_DL_URL = "https://us.openslr.org/resources/141/" |
|
|
|
_DATA_URLS = { |
|
'dev.clean': _DL_URL + 'dev_clean.tar.gz', |
|
'dev.other': _DL_URL + 'dev_other.tar.gz', |
|
'test.clean': _DL_URL + 'test_clean.tar.gz', |
|
'test.other': _DL_URL + 'test_other.tar.gz', |
|
'train.clean.100': _DL_URL + 'train_clean_100.tar.gz', |
|
'train.clean.360': _DL_URL + 'train_clean_360.tar.gz', |
|
'train.other.500': _DL_URL + 'train_other_500.tar.gz', |
|
} |
|
|
|
|
|
def _generate_transcripts(transcript_csv_file): |
|
"""Generates partial examples from transcript CSV file.""" |
|
for line in transcript_csv_file: |
|
key, text_original, text_normalized = line.decode("utf-8").replace('\n', '').split("\t") |
|
speaker_id, chapter_id = [int(el) for el in key.split("_")[:2]] |
|
example = { |
|
"text_normalized": text_normalized, |
|
"text_original": text_original, |
|
"speaker_id": speaker_id, |
|
"chapter_id": chapter_id, |
|
"id_": key, |
|
} |
|
yield example |
|
|
|
|
|
class LibriTTS_R_Dataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
LibriTTS-R [1] is a sound quality improved version of the LibriTTS corpus (http://www.openslr.org/60/) which is a |
|
multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate, |
|
published in 2019. |
|
""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="dev", description="Only the 'dev.clean' split."), |
|
datasets.BuilderConfig(name="clean", description="'Clean' speech."), |
|
datasets.BuilderConfig(name="other", description="'Other', more challenging, speech."), |
|
datasets.BuilderConfig(name="all", description="Combined clean and other dataset."), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"audio": datasets.Audio(sampling_rate=24_000), |
|
"text_normalized": datasets.Value("string"), |
|
"text_original": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"chapter_id": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
split_names = _DATA_URLS.keys() |
|
|
|
if self.config.name == "clean": |
|
split_names = [k for k in _DATA_URLS.keys() if 'clean' in k] |
|
elif self.config.name == "other": |
|
split_names = [k for k in _DATA_URLS.keys() if 'other' in k] |
|
|
|
archive_path = dl_manager.download({k: v for k, v in _DATA_URLS.items() if k in split_names}) |
|
|
|
|
|
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {} |
|
|
|
all_splits = [ |
|
datasets.SplitGenerator( |
|
name=split_name, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive.get(split_name), |
|
"files": dl_manager.iter_archive(archive_path[split_name]), |
|
"split_name": split_name |
|
}, |
|
) for split_name in split_names |
|
] |
|
|
|
return all_splits |
|
|
|
def _generate_examples(self, split_name, files, local_extracted_archive): |
|
"""Generate examples from a LibriTTS-R archive_path.""" |
|
audio_extension = '.wav' |
|
|
|
key = 0 |
|
all_audio_data = {} |
|
transcripts = {} |
|
|
|
def get_return_data(transcript, audio_data): |
|
nonlocal key |
|
|
|
audio = {"path": transcript["path"], "bytes": audio_data} |
|
key += 1 |
|
|
|
return key, {"audio": audio, **transcript} |
|
|
|
for path, f in files: |
|
if path.endswith(audio_extension): |
|
id_ = path.split("/")[-1][: -len(audio_extension)] |
|
|
|
audio_data = f.read() |
|
|
|
|
|
|
|
transcript = transcripts.get(id_, None) |
|
|
|
if transcript is not None: |
|
yield get_return_data(transcript, audio_data) |
|
del transcripts[id_] |
|
else: |
|
all_audio_data[id_] = f.read() |
|
|
|
elif path.endswith(".trans.tsv"): |
|
for example in _generate_transcripts(f): |
|
example_id = example['id_'] |
|
|
|
audio_file = f"{example_id}{audio_extension}" |
|
|
|
audio_file = ( |
|
os.path.join( |
|
local_extracted_archive, 'LibriTTS_R', |
|
split_name.replace('.', '-'), |
|
str(example['speaker_id']), str(example['chapter_id']), audio_file) |
|
if local_extracted_archive |
|
else audio_file |
|
) |
|
|
|
transcript = { |
|
"id": example_id, |
|
"speaker_id": example['speaker_id'], |
|
"chapter_id": example['chapter_id'], |
|
"text_normalized": example['text_normalized'], |
|
"text_original": example['text_original'], |
|
"path": audio_file, |
|
} |
|
|
|
|
|
|
|
audio_data = all_audio_data.get(example_id, None) |
|
if audio_data is not None: |
|
yield get_return_data(transcript, audio_data) |
|
del all_audio_data[example_id] |
|
else: |
|
transcripts[example_id] = transcript |
|
|
|
for id_, audio_data in all_audio_data.items(): |
|
transcript = transcripts.get(id_, None) |
|
|
|
if transcript is None: |
|
|
|
|
|
continue |
|
|
|
else: |
|
yield get_return_data(transcript, audio_data) |
|
del transcripts[id_] |
|
|
|
for id_, transcript in transcripts.items(): |
|
audio_data = all_audio_data.get(id_, None) |
|
|
|
if audio_data is None: |
|
|
|
|
|
continue |
|
|
|
else: |
|
yield get_return_data(audio_data, transcript) |
|
|
|
|