|
import json |
|
import os |
|
import datasets |
|
from datasets import Features, Value, DatasetInfo, SplitGenerator, BuilderConfig, LargeList, Sequence |
|
|
|
|
|
|
|
TASKS = [ |
|
"word_localization", |
|
"advertisement_localization", |
|
"named_entity_localization", |
|
"speaker_number_estimation", |
|
"entire_duration", |
|
"event_duration", |
|
"emotion_ranking", |
|
"emotion_reasoning", |
|
] |
|
|
|
_DOCUMENT_DATASET_VERSION = "1.0.0" |
|
|
|
|
|
|
|
|
|
|
|
class BLAB(datasets.GeneratorBasedBuilder): |
|
"""class BLAB(object): A dataset builder supporting various audio QA tasks, |
|
each with its own specific data schema. |
|
""" |
|
BUILDER_CONFIGS = [ |
|
BuilderConfig( |
|
name=task, |
|
version=datasets.Version(_DOCUMENT_DATASET_VERSION), |
|
description=f"BLAB dataset for task: {task}", |
|
) for task in TASKS |
|
] |
|
|
|
def _info(self): |
|
"""Defines the dataset schema (features) based on the selected task configuration.""" |
|
|
|
|
|
if self.config.name == "word_localization": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"groundtruth": LargeList( |
|
feature=Features({ |
|
"word": Value("string"), |
|
"start": Value("float32"), |
|
"end": Value("float32"), |
|
}) |
|
) |
|
}), |
|
description="Schema for the Word Localization task: segmenting and labeling words.", |
|
license="MIT", |
|
) |
|
|
|
elif self.config.name == "advertisement_localization": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"groundtruth": Features({ |
|
"ads_segment": LargeList( |
|
feature=Features({ |
|
"text": Value("string"), |
|
"start": Value("float32"), |
|
"end": Value("float32"), |
|
}), |
|
), |
|
"word_timestamp": LargeList( |
|
feature=Features({ |
|
"word": Value("string"), |
|
"start": Value("float32"), |
|
"end": Value("float32"), |
|
}), |
|
), |
|
}) |
|
}), |
|
description="Schema for Advertisement Localization task: identifying ad segments and their transcripts.", |
|
|
|
) |
|
|
|
elif self.config.name == "named_entity_localization": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"groundtruth": Features({ |
|
"entities": LargeList( |
|
feature=Features({ |
|
"entity_type": Value("string"), |
|
"entity": Value("string"), |
|
"start": Value("float32"), |
|
"end": Value("float32"), |
|
}), |
|
), |
|
"word_timestamp": LargeList( |
|
feature=Features({ |
|
"word": Value("string"), |
|
"start": Value("float32"), |
|
"end": Value("float32"), |
|
}), |
|
), |
|
}) |
|
}), |
|
description="Schema for Named Entity Localization task: identifying specific entities and their timestamps.", |
|
|
|
) |
|
|
|
elif self.config.name == "speaker_number_estimation": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"groundtruth": Sequence(Value("int32")) |
|
}), |
|
description="Schema for Speaker Number Estimation task: counting speakers in a segment.", |
|
|
|
) |
|
|
|
elif self.config.name == "entire_duration": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"groundtruth": Value("float32") |
|
}), |
|
description="Schema for Entire Duration task: determining the total duration of an audio.", |
|
|
|
) |
|
|
|
elif self.config.name == "event_duration": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"groundtruth": Value("float32"), |
|
"answer_type": Value("string"), |
|
}), |
|
description="Schema for Event Duration task: identifying and timing specific events.", |
|
|
|
) |
|
|
|
elif self.config.name == "emotion_ranking": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"type": Value("string"), |
|
"correct_option": Value("string"), |
|
"option_A": Value("string"), |
|
"option_B": Value("string"), |
|
"option_C": Value("string"), |
|
"option_D": Value("string"), |
|
"option_E": Value("string"), |
|
"correct_answer": Value("string"), |
|
}), |
|
description="Schema for Emotion Ranking task: selecting the best emotion option.", |
|
|
|
) |
|
|
|
elif self.config.name == "emotion_reasoning": |
|
return DatasetInfo( |
|
features=Features({ |
|
"video_url": Value("string"), |
|
"audio": Value("string"), |
|
"question": Value("string"), |
|
"type": Value("string"), |
|
"correct_option": Value("string"), |
|
"option_A": Value("string"), |
|
"option_B": Value("string"), |
|
"option_C": Value("string"), |
|
"option_D": Value("string"), |
|
"correct_answer": Value("string"), |
|
}), |
|
description="Schema for Emotion Reasoning task: explaining emotional context.", |
|
|
|
) |
|
else: |
|
raise ValueError(f"Unknown config name: {self.config.name}") |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators based on the selected task configuration.""" |
|
data_files = {} |
|
|
|
if self.config.name == "word_localization": |
|
data_files = {"word_localization": "blab_long_audio/word_localization.json"} |
|
elif self.config.name == "advertisement_localization": |
|
data_files = {"advertisement_localization": "blab_long_audio/advertisement_localization.json"} |
|
elif self.config.name == "named_entity_localization": |
|
data_files = {"named_entity_localization": "blab_long_audio/named_entity_localization.json"} |
|
elif self.config.name == "speaker_number_estimation": |
|
data_files = {"speaker_number_estimation": "blab_long_audio/speaker_number_estimation.json"} |
|
elif self.config.name == "entire_duration": |
|
data_files = {"entire_duration": "blab_long_audio/entire_duration.json"} |
|
elif self.config.name == "event_duration": |
|
data_files = {"event_duration": "blab_long_audio/event_duration.json"} |
|
elif self.config.name == "emotion_ranking": |
|
data_files = {"emotion_ranking": "blab_long_audio/emotion_ranking.json"} |
|
elif self.config.name == "emotion_reasoning": |
|
data_files = {"emotion_reasoning": "blab_long_audio/emotion_reasoning.json"} |
|
else: |
|
raise ValueError(f"Unknown config name: {self.config.name}") |
|
|
|
resolved_data_files = dl_manager.download_and_extract(data_files) |
|
|
|
generators = [] |
|
for split_name, filepath in resolved_data_files.items(): |
|
generators.append( |
|
SplitGenerator( |
|
name=split_name, |
|
gen_kwargs={"filepath": filepath} |
|
) |
|
) |
|
return generators |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples from the dataset files, parsing data based on the active config.""" |
|
with open(filepath, 'r', encoding='utf-8') as f: |
|
all_data = json.load(f) |
|
|
|
for id_, data in enumerate(all_data): |
|
try: |
|
|
|
video_url = data.get("video_url", None) |
|
audio = data.get("audio", None) |
|
question = data.get("question", None) |
|
|
|
|
|
example = { |
|
"video_url": video_url, |
|
"audio": audio, |
|
"question": question, |
|
|
|
} |
|
|
|
|
|
if self.config.name == "word_localization": |
|
raw_groundtruth = data.get("groundtruth", []) |
|
processed_groundtruth = [] |
|
for item in raw_groundtruth: |
|
if isinstance(item, dict): |
|
processed_groundtruth.append({ |
|
"word": item.get("word", None), |
|
"start": item.get("start", None), |
|
"end": item.get("end", None), |
|
}) |
|
example["groundtruth"] = processed_groundtruth |
|
|
|
elif self.config.name == "advertisement_localization": |
|
raw_groundtruth = data.get("groundtruth", {}) |
|
raw_ads_segments = raw_groundtruth.get("ads_segment", []) |
|
processed_ads_segments = [] |
|
for ad_item in raw_ads_segments: |
|
if isinstance(ad_item, dict): |
|
processed_ads_segments.append({ |
|
"text": ad_item.get("text", None), |
|
"start": ad_item.get("start", None), |
|
"end": ad_item.get("end", None), |
|
}) |
|
raw_word_timestamps = raw_groundtruth.get("word_timestamp", []) |
|
processed_word_timestamps = [] |
|
for word_item in raw_word_timestamps: |
|
if isinstance(word_item, dict): |
|
processed_word_timestamps.append({ |
|
"word": word_item.get("word", None), |
|
"start": word_item.get("start", None), |
|
"end": word_item.get("end", None), |
|
}) |
|
example["groundtruth"] = { |
|
"ads_segment": processed_ads_segments, |
|
"word_timestamp": processed_word_timestamps, |
|
} |
|
|
|
elif self.config.name == "named_entity_localization": |
|
raw_groundtruth = data.get("groundtruth", {}) |
|
raw_entities = raw_groundtruth.get("entities", []) |
|
processed_entities = [] |
|
for entity_item in raw_entities: |
|
if isinstance(entity_item, dict): |
|
processed_entities.append({ |
|
"entity_type": entity_item.get("entity_type", None), |
|
"entity": entity_item.get("entity", None), |
|
"start": entity_item.get("start", None), |
|
"end": entity_item.get("end", None), |
|
}) |
|
raw_word_timestamps = raw_groundtruth.get("word_timestamp", []) |
|
processed_word_timestamps = [] |
|
for word_item in raw_word_timestamps: |
|
if isinstance(word_item, dict): |
|
processed_word_timestamps.append({ |
|
"word": word_item.get("word", None), |
|
"start": word_item.get("start", None), |
|
"end": word_item.get("end", None), |
|
}) |
|
example["groundtruth"] = { |
|
"entities": processed_entities, |
|
"word_timestamp": processed_word_timestamps, |
|
} |
|
|
|
elif self.config.name == "speaker_number_estimation": |
|
raw_groundtruth = data.get("groundtruth", None) |
|
processed_groundtruth = [] |
|
if raw_groundtruth is not None: |
|
if isinstance(raw_groundtruth, list): |
|
processed_groundtruth = [int(x) for x in raw_groundtruth if isinstance(x, (int, float))] |
|
elif isinstance(raw_groundtruth, (int, float)): |
|
processed_groundtruth = [int(raw_groundtruth)] |
|
|
|
example["groundtruth"] = processed_groundtruth |
|
|
|
elif self.config.name == "entire_duration": |
|
example["groundtruth"] = data.get("groundtruth", None) |
|
|
|
elif self.config.name == "event_duration": |
|
example["groundtruth"] = data.get("groundtruth", None) |
|
example["answer_type"] = data.get("answer_type", None) |
|
|
|
elif self.config.name == "emotion_ranking": |
|
example["type"] = data.get("type", None) |
|
example["correct_option"] = data.get("correct_option", None) |
|
example["option_A"] = data.get("option_A", None) |
|
example["option_B"] = data.get("option_B", None) |
|
example["option_C"] = data.get("option_C", None) |
|
example["option_D"] = data.get("option_D", None) |
|
example["option_E"] = data.get("option_E", None) |
|
example["correct_answer"] = data.get("correct_answer", None) |
|
|
|
elif self.config.name == "emotion_reasoning": |
|
example["type"] = data.get("type", None) |
|
example["correct_option"] = data.get("correct_option", None) |
|
example["option_A"] = data.get("option_A", None) |
|
example["option_B"] = data.get("option_B", None) |
|
example["option_C"] = data.get("option_C", None) |
|
example["option_D"] = data.get("option_D", None) |
|
example["correct_answer"] = data.get("correct_answer", None) |
|
|
|
else: |
|
raise ValueError(f"Unknown config name: {self.config.name}. This should not happen if BUILDER_CONFIGS and _info are consistent.") |
|
|
|
yield id_, example |
|
|
|
except Exception as e: |
|
print(f"Error processing example {id_} in {filepath} for config {self.config.name}: {e}") |
|
|