File size: 12,457 Bytes
6b97273 d2d1068 6b97273 d1db45d 6b97273 cde3727 6b97273 cde3727 989b9c2 6b97273 cde3727 6b97273 e99a430 6b97273 e7bbf28 6b97273 30995ae 6b97273 9067b7a 6b97273 e7bbf28 d1db45d de4f187 d1db45d de4f187 53976be d1db45d de4f187 53976be 87d190c e99a430 48e1e34 87d190c e99a430 48e1e34 e99a430 87d190c e99a430 48e1e34 e99a430 87d190c e7bbf28 80ee158 6b97273 e1bb4f0 6b97273 0c22e14 6b97273 00733b2 6b97273 cb2c56d 6b97273 e7bbf28 989b9c2 d1db45d 989b9c2 d1db45d 989b9c2 e7bbf28 d1db45d e7bbf28 d1db45d e7bbf28 2eff796 d1db45d 8a0cf3d d1db45d 8a0cf3d 1043494 d1db45d 53976be d1db45d 53976be 87d190c 5655df3 989b9c2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 |
import json
import os
import datasets
import ast
_DESCRIPTION = """\
The M-QALM Dataset Repository contains Multiple-Choice and Abstractive Questions for evaluating the performance of LLMs in the clinical and biomedical domain.
"""
_HOMEPAGE = "https://huggingface.co/datasets/anand-s/m_qalm"
_LICENSE = "Apache License 2.0"
# Define URLs for different parts of the dataset if applicable
_URLS = {
"train_normal_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/train_normal_mcqa.zip",
"val_normal_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/val_normal_mcqa.zip",
"test_normal_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/test_normal_mcqa.zip",
"train_context_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/train_context_mcqa.zip",
"val_context_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/val_context_mcqa.zip",
"test_context_mcqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/test_context_mcqa.zip",
"train_aqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/train_aqa.zip",
"val_aqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/val_aqa.zip",
"test_aqa": "https://huggingface.co/datasets/anand-s/m_qalm/resolve/main/test_aqa.zip",
}
class MQalm(datasets.GeneratorBasedBuilder):
"""Dataset for multiple choice questions from Test Repo."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="train_normal_mcqa", version=VERSION, description="Train set MCQA"),
datasets.BuilderConfig(name="val_normal_mcqa", version=VERSION, description="Val set MCQA"),
datasets.BuilderConfig(name="test_normal_mcqa", version=VERSION, description="Test set MCQA"),
datasets.BuilderConfig(name="train_context_mcqa", version=VERSION, description="Train set context MCQA"),
datasets.BuilderConfig(name="val_context_mcqa", version=VERSION, description="Val set context MCQA"),
datasets.BuilderConfig(name="test_context_mcqa", version=VERSION, description="Test set context MCQA"),
datasets.BuilderConfig(name="train_aqa", version=VERSION, description="Train set AQA"),
datasets.BuilderConfig(name="val_aqa", version=VERSION, description="Val set AQA"),
datasets.BuilderConfig(name="test_aqa", version=VERSION, description="Test set AQA"),
]
def _info(self):
features_dict = {
"train_normal_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
"val_normal_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"few_shot_prompt": datasets.Sequence(datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
})),
}),
"test_normal_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"few_shot_prompt": datasets.Sequence(datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
})),
}),
"train_context_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"context": datasets.Sequence(datasets.Value("string"))
}),
"val_context_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"context": datasets.Sequence(datasets.Value("string")),
"few_shot_prompt": [{
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"context": datasets.Sequence(datasets.Value("string")),
}],
}),
"test_context_mcqa": datasets.Features({
"prompt": datasets.Value("string"),
"question": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
"num_options": datasets.Value("string"),
"question_type": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
"context": datasets.Sequence(datasets.Value("string")),
"few_shot_prompt": datasets.Sequence(datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"options": datasets.Sequence(datasets.Value("string")),
"context": datasets.Sequence(datasets.Value("string")),
})),
}),
"train_aqa": datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"prompt": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
"val_aqa": datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Sequence(datasets.Value("string")),
"prompt": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
"test_aqa": datasets.Features({
"question": datasets.Value("string"),
"answer": datasets.Sequence(datasets.Value("string")),
"prompt": datasets.Value("string"),
"dataset_name": datasets.Value("string"),
}),
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features_dict[self.config.name],
homepage=_HOMEPAGE,
license=_LICENSE
)
def _split_generators(self, dl_manager):
# Download and extract all the files in the directory
data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
return [
datasets.SplitGenerator(
name=self.config.name,
gen_kwargs={"directory": data_dir, "split": self.config.name},
)
]
def _generate_examples(self, directory, split):
# Iterate over each file in the directory
key_idx = 0
for filename in os.listdir(directory):
filepath = os.path.join(directory, filename)
if filepath.endswith(".jsonl"):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
if split == "train_normal_mcqa":
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl","")
}
key_idx +=1
elif split in ["val_normal_mcqa", "test_normal_mcqa"]:
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl",""),
"few_shot_prompt": [{
"question": item["question"],
"answer": item["answer"],
"options": item["options"],
} for item in data["few_shot_prompt"]]
}
key_idx +=1
elif split == "train_context_mcqa":
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"context": data["context"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl","")
}
key_idx +=1
elif split in ["val_context_mcqa", "test_context_mcqa"]:
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"options": data["options"],
"answer": data["answer"],
"num_options": data["num_options"],
"context": data["context"],
"question_type": data["question_type"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl",""),
"few_shot_prompt": data["few_shot_prompt"]}
key_idx +=1
elif split in ["train_aqa", "val_aqa", "test_aqa"]:
yield key_idx, {
"prompt": data["prompt"],
"question": data["question"],
"answer": data["answer"],
"dataset_name": os.path.split(filepath)[-1].replace(".jsonl","")
}
key_idx +=1 |