wikitongues-darija / prepare_dataset.py
BrunoHays's picture
Update prepare_dataset.py
acf97bd verified
import dataclasses
from pathlib import Path
import shutil
import csv
from typing import Any, Optional
import srt
import soundfile as sf
@dataclasses.dataclass
class Subtitle:
start: float
end: float
content: str
@property
def duration(self):
return self.end - self.start
@classmethod
def from_sub(cls, sub: srt.Subtitle):
return cls(
start=sub.start.total_seconds(),
end=sub.end.total_seconds(),
content=sub.content
)
@classmethod
def merge(cls, sub1: 'Subtitle', sub2: 'Subtitle'):
return cls(
start=sub1.start,
end=sub2.end,
content=f"{sub1.content.strip()} {sub2.content.strip()}"
)
def merge_subs(subs: list[Subtitle], max_sample_duration_s: float) -> list[Subtitle]:
merged_subs: list[Subtitle] = []
previous_sub: Subtitle = subs[0]
for sub in subs[1:]:
# there is a short silence between subs or we reach max duration
if previous_sub.end + 0.2 < sub.start or previous_sub.duration + sub.duration > max_sample_duration_s:
merged_subs.append(previous_sub)
previous_sub = sub
elif previous_sub and previous_sub.end + 0.2 >= sub.start:
previous_sub = Subtitle.merge(previous_sub, sub)
else:
raise ValueError("Subtitles are not in order")
merged_subs.append(previous_sub)
return merged_subs
original_audios_path = Path("darija-test-folder")
dataset_path = Path("audio_dataset/data/test")
max_sample_duration_s = 30
if dataset_path.exists():
shutil.rmtree(dataset_path)
dataset_path.mkdir(parents=True)
file_to_subs: dict[Path, list[Subtitle]] = {}
for file in original_audios_path.iterdir():
if file.suffix == ".wav":
with open(file.parent / f"{file.stem}.srt", "r") as f:
subs = srt.parse(f.read())
file_to_subs[file] = merge_subs([Subtitle.from_sub(sub) for sub in subs],
max_sample_duration_s)
columns = ["file_name", "transcription", "sample_id", "start_timestamp", "end_timestamp", "audio_name"]
csv_lines: list[dict[str, Any]] = []
for file in file_to_subs:
audio, sr = sf.read(file)
for sub in file_to_subs[file]:
file_name = f"{file.stem}-{sub.start:.2f}-{sub.end:.2f}.wav"
audio_cut = audio[int(round(sub.start * sr)): int(round(sub.end * sr))]
sf.write(dataset_path / file_name, audio_cut, sr)
csv_lines.append({
"file_name": Path("data") / "test" / file_name,
"transcription": sub.content,
"sample_id": Path(file_name).stem,
"start_timestamp": sub.start,
"end_timestamp": sub.end,
"audio_name": file.stem
})
with (dataset_path.parent / "metadata.csv").open('w', encoding="utf-8") as f:
writer = csv.DictWriter(f, fieldnames=columns)
writer.writeheader()
writer.writerows(csv_lines)