pardi-speech / codec /scripts /eval_asr_from_filelist.py
Mehdi Lakbar
Initial demo of Lina-speech (pardi-speech)
56cfa73
import argparse
import json
from pathlib import Path
import nemo.collections.asr as nemo_asr
import torch
import yaml
from torchaudio import load
from torchaudio.functional import resample
from tqdm import tqdm
def load_config(config_path):
with open(config_path, "r") as f:
return yaml.safe_load(f)
def transcribe(audio: torch.Tensor, asr_model) -> str:
audio = audio.cpu().numpy(force=True)
with torch.inference_mode():
return asr_model.transcribe([audio[0]])[0].text
def main(args):
config = load_config(args.config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load ASR model
asr_model = nemo_asr.models.ASRModel.from_pretrained(
model_name=config.get("asr_model", "nvidia/parakeet-tdt-0.6b-v2")
)
# Read file list
with open(config["file_list"], "r") as f:
wav_files = [line.strip() for line in f if line.strip()]
results = []
for wav_path in tqdm(wav_files, desc="Transcribing"):
wav, sr = load(wav_path)
wav = resample(wav, orig_freq=sr, new_freq=16000).to(device)
transcript = transcribe(wav, asr_model)
results.append({"file": wav_path, "transcript": transcript})
# Save output
out_path = Path(config.get("output_jsonl", "asr_transcripts.jsonl"))
with out_path.open("w") as f:
for entry in results:
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
print(f"\nSaved {len(results)} transcripts to {out_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True, help="Path to YAML config")
args = parser.parse_args()
main(args)