File size: 5,519 Bytes
21ebc85 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
#!/usr/bin/env python3
import os
import json
import argparse
import numpy as np
import pandas as pd
import soundfile as sf
from tqdm import tqdm
def generate_split_jsonl(data_path: str, split_name: str, tags: np.ndarray, binary: np.ndarray):
"""
Reads {split_name}.tsv, processes all .mp3 files, and generates MTT.{split_name}.jsonl.
Includes error handling, a progress bar, and bitrate calculation for MP3s.
"""
mt_dir = data_path
tsv_path = os.path.join(mt_dir, f'{split_name}.tsv')
if split_name == "valid":
out_split_name = "val"
else:
out_split_name = split_name
out_path = os.path.join(mt_dir, f'MTT.{out_split_name}.jsonl')
fail_log_path = os.path.join(mt_dir, f'fail.{out_split_name}.txt')
# Read index and filenames
df = pd.read_csv(tsv_path, sep='\t', header=None, names=['idx', 'title'])
failed_count = 0
failed_records = []
print(f"Processing split: {split_name}")
with open(out_path, 'w', encoding='utf-8') as fw:
# Use tqdm for a progress bar
for _, row in tqdm(df.iterrows(), total=df.shape[0], desc=f"-> Generating {split_name}.jsonl"):
try:
i = int(row['idx'])
title = row['title'] # e.g., "48/948.low.mp3"
audio_path = os.path.join(mt_dir, 'mp3', title)
# Read audio metadata (supports .mp3)
info = sf.info(audio_path)
duration = info.frames / info.samplerate
num_samples = info.frames
sample_rate = info.samplerate
channels = info.channels
# --- MODIFICATION START ---
# Calculate bitrate, which is more meaningful for compressed formats like MP3
bitrate = None
# Check if duration is valid to avoid division by zero
if duration > 0:
try:
file_size_bytes = os.path.getsize(audio_path)
# Bitrate in bits per second (bps)
bitrate = int((file_size_bytes * 8) / duration)
except OSError:
# File might not exist or other OS-level error
pass
# Infer bit depth from subtype, will be None for mp3
bit_depth = None
if hasattr(info, 'subtype') and info.subtype and info.subtype.startswith('PCM_'):
try:
bit_depth = int(info.subtype.split('_', 1)[1])
except (ValueError, IndexError):
pass # Could not parse bit depth from subtype
# --- MODIFICATION END ---
# Get the list of labels for this sample
labels = tags[binary[i].astype(bool)].tolist()
# Assemble the JSON object and write to file
record = {
"audio_path": audio_path,
"label": labels,
"duration": duration,
"sample_rate": sample_rate,
"num_samples": num_samples,
"bit_depth": bit_depth, # This will be null for MP3 files
"bitrate": bitrate, # This is the newly added field
"channels": channels
}
fw.write(json.dumps(record, ensure_ascii=False) + "\n")
except Exception as e:
# If any error occurs, log it and skip the file
failed_count += 1
failed_records.append(f"File: {title}, Error: {str(e)}")
continue
print(f"Successfully generated {out_path}")
# After the loop, report and log any failures
if failed_count > 0:
print(f"Skipped {failed_count} corrupted or problematic files for split '{split_name}'.")
# Append failures to fail.txt
with open(fail_log_path, 'a', encoding='utf-8') as f_fail:
f_fail.write(f"--- Failures for split: {split_name} ({failed_count} files) ---\n")
for record in failed_records:
f_fail.write(record + "\n")
f_fail.write("\n")
def main():
parser = argparse.ArgumentParser(
description="Generate JSONL files for MTT dataset splits (train/valid/test) for .mp3 files.")
parser.add_argument(
"data_path",
help="Root directory of the MTT dataset, containing annotations, tags, labels, and tsv splits.")
args = parser.parse_args()
mt_dir = args.data_path
# Use a generic failure log name that doesn't need to be cleaned for each split
fail_log_path = os.path.join(mt_dir, 'processing_failures.log')
if os.path.exists(fail_log_path):
os.remove(fail_log_path)
print(f"Removed old log file: {fail_log_path}")
try:
# Load tags and binary label matrix
tags = np.load(os.path.join(mt_dir, 'tags.npy'))
binary = np.load(os.path.join(mt_dir, 'binary_label.npy'))
except FileNotFoundError as e:
print(f"Error: Could not find required .npy file. {e}")
return
# Generate JSONL for each split
for split in ['train', 'valid', 'test']:
generate_split_jsonl(args.data_path, split, tags, binary)
print("\nProcessing complete.")
if os.path.exists(fail_log_path):
print(f"A log of all failed files has been saved to: {fail_log_path}")
if __name__ == "__main__":
main() |