|
"""Create a Hugging Face dataset from the JamendoLyrics dataset in its original layout.""" |
|
|
|
|
|
import json |
|
import logging |
|
from pathlib import Path |
|
import shutil |
|
|
|
import datasets |
|
import numpy as np |
|
from scipy.sparse import csr_array |
|
from scipy.sparse.csgraph import connected_components as scipy_connected_components |
|
import soundfile as sf |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
logging.basicConfig(level=logging.INFO) |
|
logger.setLevel(logging.DEBUG) |
|
|
|
|
|
dataset_src = datasets.load_dataset( |
|
"jamendolyrics/jam-alt", revision="v1.4.0", split="test" |
|
) |
|
|
|
|
|
language_fixes = ( |
|
[json.loads(li) for li in Path("language_fixes.jsonl").read_text().splitlines()] |
|
if Path("language_fixes.jsonl").exists() |
|
else [] |
|
) |
|
|
|
|
|
OVERLAP_SOFT_THRESHOLD = 0.1 |
|
OVERLAP_HARD_THRESHOLD = 0.2 |
|
PADDING = 0.5 |
|
MAX_DURATION = 20.0 |
|
|
|
SUBSETS_DIR = Path(".") |
|
SUBSETS = ["pure", "groups"] |
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"song_name": datasets.Value("string"), |
|
"file_name": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"language": datasets.Value("string"), |
|
"song_language": datasets.Value("string"), |
|
"line_indices": [datasets.Value("int64")], |
|
"start": datasets.Value("float64"), |
|
"end": datasets.Value("float64"), |
|
"merged": datasets.Value("bool"), |
|
"artist": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"genre": datasets.Value("string"), |
|
"license_type": datasets.Value("string"), |
|
} |
|
) |
|
|
|
|
|
|
|
def find_connected_components(matrix: np.ndarray) -> list[list[int]]: |
|
num_components, labels = scipy_connected_components( |
|
csgraph=csr_array(matrix), directed=False, return_labels=True |
|
) |
|
components = [np.where(labels == i)[0].tolist() for i in range(num_components)] |
|
return sorted(components) |
|
|
|
|
|
|
|
for subset in SUBSETS: |
|
if (SUBSETS_DIR / subset).exists(): |
|
shutil.rmtree(SUBSETS_DIR / subset) |
|
|
|
|
|
|
|
records = [] |
|
stats = [] |
|
|
|
for item in dataset_src: |
|
name = item["name"] |
|
song_language = item["language"] |
|
lines = item["lines"] |
|
audio = item["audio"]["array"] |
|
sr = item["audio"]["sampling_rate"] |
|
audio_duration = len(audio) / sr |
|
|
|
starts = np.array([line["start"] for line in lines], dtype=float) |
|
ends = np.array([line["end"] for line in lines], dtype=float) |
|
texts = [line["text"] for line in lines] |
|
|
|
|
|
overlap_ends = np.minimum(ends[:, None], ends) |
|
overlap_starts = np.maximum(starts[:, None], starts) |
|
overlaps = np.maximum(0.0, overlap_ends - overlap_starts) |
|
|
|
overlap_groups = find_connected_components(overlaps > OVERLAP_HARD_THRESHOLD) |
|
|
|
for indices in overlap_groups: |
|
group = [lines[i] for i in indices] |
|
group_name = ( |
|
f"{min(indices):03d}-{max(indices):03d}" |
|
if len(indices) > 1 |
|
else f"{indices[0]:03d}" |
|
) |
|
|
|
text = "\n".join(line["text"] for line in group) |
|
|
|
group_starts = [line["start"] for line in group] |
|
group_ends = [line["end"] for line in group] |
|
start, end = min(group_starts), max(group_ends) |
|
|
|
|
|
_, small_overlap_indices = np.where( |
|
(overlaps[indices] > 0) & (overlaps[indices] <= OVERLAP_HARD_THRESHOLD) |
|
) |
|
for i in small_overlap_indices: |
|
line = lines[i] |
|
if line["start"] < end and line["end"] >= end: |
|
msg = f"{name}: Group {group_name} has {overlaps[small_overlap_indices][:, indices].max():.2f} s overlap with line {i:03d}." |
|
if end - line["start"] < OVERLAP_SOFT_THRESHOLD: |
|
msg += " Ignoring" |
|
else: |
|
new_end = line["start"] + OVERLAP_SOFT_THRESHOLD |
|
msg += f"\n Adjusting end from {end:.2f} to {new_end:.2f} ({new_end - end:.2f} s)" |
|
end = new_end |
|
logger.debug(msg) |
|
|
|
|
|
non_group_indices = [i for i in range(len(lines)) if i not in indices] |
|
l_limit = max( |
|
[0.0] + [ends[i] + 0.1 for i in non_group_indices if ends[i] < end] |
|
) |
|
r_limit = min( |
|
[audio_duration] |
|
+ [starts[i] - 0.1 for i in non_group_indices if starts[i] > start] |
|
) |
|
|
|
max_total_pad = max(0.0, MAX_DURATION - (end - start)) |
|
l_pad = min(max(0.0, start - l_limit), PADDING) |
|
r_pad = min(max(0.0, r_limit - end), PADDING) |
|
|
|
if l_pad + r_pad > max_total_pad: |
|
l_pad = r_pad = min(l_pad, r_pad, max_total_pad / 2) |
|
extra = max(0.0, max_total_pad - (l_pad + r_pad)) - 1e-3 |
|
l_pad += extra / 2 |
|
r_pad += extra / 2 |
|
assert l_pad + r_pad < max_total_pad |
|
|
|
start, end = start - l_pad, end + r_pad |
|
duration = end - start |
|
|
|
stats.append( |
|
{ |
|
"name": name, |
|
"group_name": group_name, |
|
"duration": duration, |
|
"group_size": len(indices), |
|
"excluded": False, |
|
} |
|
) |
|
|
|
if duration > MAX_DURATION: |
|
logger.info(f"Excluding segment {name}.{group_name} of duration {duration}") |
|
stats[-1]["excluded"] = True |
|
continue |
|
|
|
start_frame, end_frame = round(start * sr), round(end * sr) |
|
line_audio = audio[start_frame:end_frame] |
|
|
|
file_name = f"{name}.{group_name}.flac" |
|
|
|
language = song_language |
|
for fix in language_fixes: |
|
if fix["file_name"] == file_name: |
|
assert fix["text"] == text, ( |
|
f"Text mismatch for {file_name}: {fix['text']} != {text}" |
|
) |
|
language = fix["language"] |
|
logger.debug( |
|
f"{name}: Fixing language of group {group_name} to {language}" |
|
) |
|
|
|
if len(group) > 1: |
|
subset_dir = SUBSETS_DIR / "groups" / language |
|
else: |
|
subset_dir = SUBSETS_DIR / "pure" / language |
|
out_audio_path = subset_dir / "audio" / file_name |
|
out_audio_path.parent.mkdir(parents=True, exist_ok=True) |
|
sf.write(out_audio_path, line_audio, sr) |
|
|
|
records.append( |
|
{ |
|
"song_name": name, |
|
"file_name": str(out_audio_path.relative_to(subset_dir)), |
|
"text": "\n".join(line["text"] for line in group), |
|
"language": language, |
|
"song_language": song_language, |
|
"line_indices": indices, |
|
"start": start, |
|
"end": end, |
|
"merged": len(group) > 1, |
|
**{k: item[k] for k in ["artist", "title", "genre", "license_type"]}, |
|
} |
|
) |
|
|
|
dataset_out = datasets.Dataset.from_list(records, features=features, split="test") |
|
|
|
|
|
|
|
for config_name in ["pure", "groups"]: |
|
for subset_language in ["en", "es", "de", "fr"]: |
|
subset_dir = SUBSETS_DIR / config_name / subset_language |
|
subset_dir.mkdir(exist_ok=True) |
|
|
|
subset = dataset_out.filter(lambda x: x["language"] == subset_language) |
|
if config_name == "pure": |
|
subset = subset.filter(lambda x: not x["merged"]) |
|
elif config_name == "groups": |
|
subset = subset.filter(lambda x: x["merged"]) |
|
|
|
subset.to_json(subset_dir / "metadata.jsonl") |
|
|