speech_segment / speech_segment.py
subatomicseer's picture
Create speech_segment.py
536046d verified
raw
history blame
3.71 kB
# Lint as: python3
"""Speech Segment dataset.
"""
import os
from pathlib import Path
import datasets
import torchaudio
class SpeechSegmentConfig(datasets.BuilderConfig):
"""BuilderConfig for Speech Segment.
For long audio files, segment them into smaller segments of fixed length.
For short audio files, return the whole audio file.
"""
def __init__(self, segment_length, **kwargs):
super(SpeechSegmentConfig, self).__init__(**kwargs)
self.segment_length = segment_length
class SpeechSegment(datasets.GeneratorBasedBuilder):
"""Speech Segment dataset."""
BUILDER_CONFIGS = [
SpeechSegmentConfig(name="all", segment_length=60.0,),
]
@property
def manual_download_instructions(self):
return (
"Specify the data_dir as the path to the folder, will recursively search for .flac and .wav files. "
"`datasets.load_dataset('subatomicseer/speech_segment', data_dir='path/to/folder/folder_name')`"
)
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"file": datasets.Value("string"),
'sample_rate': datasets.Value('int64'),
'offset': datasets.Value('int64'),
'num_frames': datasets.Value('int64'),
}
)
return datasets.DatasetInfo(
features=features,
)
def _split_generators(self, dl_manager):
base_data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(base_data_dir):
raise FileNotFoundError(
f"{base_data_dir} does not exist. Manual download instructions: {self.manual_download_instructions}"
)
data_dirs = [str(p) for p in Path(base_data_dir).rglob('*') if p.suffix in ['.flac', '.wav']]
print(f"Found {len(data_dirs)} audio files in {base_data_dir}")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dirs": data_dirs},
),
]
def _generate_examples(self, data_dirs):
for key, path in enumerate(data_dirs):
path_split = path.split("/")
id_ = '/'.join(path_split[-4:]).replace(".flac", "")
audio_metadata = torchaudio.info(path)
segment_length = int(self.config.segment_length * audio_metadata.sample_rate)
total_length = audio_metadata.num_frames
if total_length <= segment_length:
yield id_, {
"id": id_,
"file": path,
'sample_rate': audio_metadata.sample_rate,
'offset': 0,
'num_frames': total_length,
}
else:
# generate non-overlapping segments of segment_length
offsets = list(range(0, total_length, segment_length))
if total_length - offsets[-1] < 1 * audio_metadata.sample_rate:
# if the last segment is less than 2 seconds, discard it
offsets.pop()
for segment_id, start in enumerate(offsets):
end = start + segment_length - 1
if end > total_length:
end = total_length
yield f'{id_}_{segment_id}', {
"id": f'{id_}_{segment_id}',
"file": path,
'sample_rate': audio_metadata.sample_rate,
'offset': start,
'num_frames': end-start+1,
}