File size: 6,463 Bytes
7271056 767436c 7271056 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
"""SVQ data reading."""
import io
import os
try:
from array_record.python import array_record_module as array_record
except:
import array_record
import datasets
import librosa
import numpy as np
import pandas as pd
from scipy.io import wavfile
def read_wav_bytes_to_normalized_float(
wav_bytes, resample_hz: float | None = None
):
"""Reads WAV bytes object and returns normalized float numpy array.
Args:
wav_bytes: WAV bytes object.
resample_hz: Optional resample rate.
Returns:
(waveform, original sample rate before any resample)
"""
rate, data = wavfile.read(io.BytesIO(wav_bytes))
if data.ndim > 1 and data.shape[1] > 1:
raise ValueError("Only mono WAV files are supported.")
# Convert data to float and normalize
if data.dtype == np.int16:
x = data.astype(np.float32) / np.iinfo(np.int16).max
elif data.dtype == np.int32:
x = data.astype(np.float32) / np.iinfo(np.int32).max
elif data.dtype == np.float32:
x = data
else:
raise TypeError(f"Unsupported data type: {data.dtype}")
if resample_hz is not None and resample_hz != rate:
x = librosa.resample(x, orig_sr=rate, target_sr=resample_hz)
return x, rate
def read_utt_index(basepath):
"""Read utt_index.jsonl file to a dict of {uttid: path:index}."""
df = pd.read_json(os.path.join(basepath, "utt_index.jsonl"), lines=True)
return dict(zip(df["utt_id"], df["index"]))
class UttLookup:
"""Lookup utterances by utt_id with optional resampling.
Usage:
utt_lookup = UttLookup(basepath)
waveform = utt_lookup(utt_id)
"""
def __init__(self, basepath, resample_hz: float | None = None):
self.basepath = basepath
self.resample_hz = resample_hz
self.utt_id_to_path_idx = read_utt_index(basepath)
self.readers = {}
self.orig_sample_rate_ = None
@property
def orig_sample_rate(self):
if self.orig_sample_rate_ is None:
utt_id = next(iter(self.utt_id_to_path_idx))
self(utt_id)
return self.orig_sample_rate_
def __call__(self, utt_id: str):
path, idx = self.utt_id_to_path_idx[utt_id].split(":")
if path not in self.readers:
array_record_path = os.path.join(self.basepath, f"{path}.array_record")
self.readers[path] = array_record.ArrayRecordReader(
array_record_path
)
b = self.readers[path].read([int(idx)])
waveform, sample_rate = read_wav_bytes_to_normalized_float(
b[0], resample_hz=self.resample_hz
)
if self.orig_sample_rate_ is None:
self.orig_sample_rate_ = sample_rate
if sample_rate != self.orig_sample_rate_:
raise ValueError(
f"Sample rate mismatch: {sample_rate} != {self.orig_sample_rate_}"
)
return waveform
def generate_examples(filepath, resample_hz: float | None = None):
"""Generate examples from a jsonl task file."""
basepath = os.path.dirname(filepath)
utt_lookup = UttLookup(basepath, resample_hz=resample_hz)
task = pd.read_json(filepath, lines=True)
for ex in task.to_dict(orient="records"):
utt = utt_lookup(ex["utt_id"])
ex["waveform"] = utt
yield ex
_CITATION = """\
@InProceedings{mseb,
title = {Massive Sound Embedding Benchmark (MSEB)},
author={Georg Heigold, Ehsan Variani, Tom Bagby, Ji Ma, Cyril Allauzen, Shankar Kumar, Michael Riley}
year={2025}
}
"""
_NUM_SHARDS = 128 # Internal sharding for parallel data loading.
class SvqDataset(datasets.GeneratorBasedBuilder):
"""SVQ dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, description=desc)
for name, desc in [
("span_reasoning_in_lang", "Span reasoning in language."),
("span_retrieval_in_lang", "Span retrieval in language."),
("span_reasoning_cross_lang", "Span reasoning cross language."),
("span_retrieval_cross_lang", "Span retrieval cross language."),
("passage_retrieval_in_lang", "Passage retrieval in language."),
("passage_retrieval_cross_lang", "Passage retrieval cross language."),
("document_retrieval_in_lang", "Document retrieval in language."),
(
"document_retrieval_cross_lang",
"Document retrieval cross language.",
),
]
]
DEFAULT_WRITER_BATCH_SIZE = 64
def _info(self):
task = self.config.name
features = {
"utt_id": datasets.Value("string"),
"waveform": datasets.Sequence(datasets.Value("float32")),
"text": datasets.Value("string"),
"locale": datasets.Value("string"),
"environment": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"speaker_age": datasets.Value("int32"),
"speaker_gender": datasets.Value("string"),
"page_id": datasets.Value("string"),
"page_title": datasets.Value("string"),
"passage_id": datasets.Value("string"),
"passage_text": datasets.Value("string"),
}
if "span" in task:
features["span"] = datasets.Value("string")
return datasets.DatasetInfo(
description=(
"Simple Voice Queries (SVQ) dataset, Task: span reasoning in"
" language."
),
features=datasets.Features(**features),
homepage="https://huggingface.co/datasets/google/svq",
license="Apache 2.0",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
basepath = os.getcwd()
task = self.config.name
return [
datasets.SplitGenerator(
name="eval",
gen_kwargs={
"filepath": os.path.join(
basepath, f"{task}.jsonl"
),
"shards": list(range(_NUM_SHARDS)),
"resample_hz": 16000,
"task_name": task,
},
),
]
def _generate_examples(
self, filepath=None, shards=None, resample_hz=None, task_name=None
):
basepath = os.path.dirname(filepath)
utt_lookup = UttLookup(basepath, resample_hz=resample_hz)
task = pd.read_json(filepath, lines=True)
task = np.array_split(task, _NUM_SHARDS)
task_shards = [task[idx].to_dict(orient="records") for idx in shards]
del task
for shard in task_shards:
for ex in shard:
utt = utt_lookup(ex["utt_id"])
ex["waveform"] = utt
del ex["task"]
if "span" not in task_name:
del ex["span"]
yield "_".join([ex["utt_id"], ex["passage_id"]]), ex
|