Cloze_QA_Dataset_Wikitext2 / Sample_Generate_dataset_file.py
Chaithra26's picture
Create Sample_Generate_dataset_file.py
ba61e47 verified
"""
create_cloze_qa_dataset.py
-------------------------------------
Generate Cloze-style QA dataset from WikiText-2.
Each sentence produces one 'fill-in-the-blank' question
with a single correct answer.
Output: JSONL files for train / validation / test.
"""
from datasets import load_dataset
import re
import json
from pathlib import Path
import random
# Load WikiText-2
print("🔹 Loading WikiText-2 dataset ...")
dataset = load_dataset("wikitext", "wikitext-2-raw-v1")
# Output directories
output_dir = Path("cloze_qa_dataset")
output_dir.mkdir(exist_ok=True, parents=True)
def create_cloze_question(sentence: str):
"""
Convert a sentence into a Cloze-style question by masking one entity/keyword.
Returns (question, answer) or None if unsuitable.
"""
words = re.findall(r"\b[A-Z][a-zA-Z]+\b", sentence) # find capitalized words (possible entities)
if not words:
return None
answer = random.choice(words)
question = sentence.replace(answer, "____", 1)
if question == sentence or len(answer) < 3:
return None
return question.strip(), answer.strip()
def generate_qa_split(split_name, data):
"""
Generate QA pairs for each sentence in the given split.
"""
output_path = output_dir / f"{split_name}.jsonl"
count = 0
with open(output_path, "w", encoding="utf-8") as f:
for doc_id, text in enumerate(data["text"]):
if not text.strip():
continue
sentences = re.split(r'(?<=[.!?]) +', text.strip())
for sent_id, sent in enumerate(sentences):
qa = create_cloze_question(sent)
if qa:
question, answer = qa
record = {
"doc_id": doc_id,
"sent_id": sent_id,
"title": None,
"question": question,
"answer": answer
}
f.write(json.dumps(record, ensure_ascii=False) + "\n")
count += 1
print(f" Saved {count} QA pairs to {output_path}")
# Generate datasets
for split in ["train", "validation", "test"]:
generate_qa_split(split, dataset[split])
print("\nAll splits processed and saved successfully")