m_qalm / test_repo.py
anand-s's picture
Update test_repo.py
ab7d45d verified
raw
history blame
No virus
3.22 kB
import json
import os
import datasets
import ast
# Metadata and descriptions for the dataset
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {Test Repo Dataset},
author={huggingface, Inc.},
year={2020}
}
"""
_DESCRIPTION = """\
The Test Repo dataset includes multiple choice questions tailored for NLP research and testing.
"""
_HOMEPAGE = "https://huggingface.co/datasets/anand-s/test_repo"
_LICENSE = "Apache License 2.0"
# Define URLs for different parts of the dataset if applicable
_URLS = {
"mcq_domain": "https://huggingface.co/datasets/anand-s/test_repo/resolve/main/train_mcq.zip",
}
class TestRepo(datasets.GeneratorBasedBuilder):
"""Dataset for multiple choice questions from Test Repo."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="mcq_domain", version=VERSION, description="This configuration covers multiple choice questions."),
]
DEFAULT_CONFIG_NAME = "mcq_domain"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"context": datasets.Value("string"), # Assuming all data includes context
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# Download and extract all the files in the directory
data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
print(data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"directory": data_dir, "split": "train"},
),
]
def _generate_examples(self, directory, split):
# Iterate over each file in the directory
key_idx = 0
for filename in os.listdir(directory):
filepath = os.path.join(directory, filename)
if filepath.endswith(".jsonl"):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
yield key_idx, {
"prompt": data.get("prompt", ""),
"question": data["question"],
"options": data["options"],
"answer": data.get("answer", ""),
"context": data.get("context", ""),
"num_options": data.get("num_options", ""),
"question_type": data.get("question_type", ""),
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl","")
}
key_idx +=1