Faquad-nli needs to be updated

#1
by nicholasKluge - opened

Hi Ruan!

As of datasets==4.0, loading scripts and trust_remote_code are no longer supported.

This breaks things, like the lm-evaluation-harness-pt, which people who work with Portuguese LLMs need for running evals.

I made a script for you to convert your dataset:

"""Convert FaQUAD-NLI to Parquet format"""

import json
import os
import pandas as pd
from datasets import Dataset, DatasetDict, DownloadManager

_URLS = {
    "data": "https://raw.githubusercontent.com/liafacom/faquad/6ad978f20672bb41625b3b71fbe4a88b893d0a86/data/dataset.json",
    "spans": "https://huggingface.co/datasets/ruanchaves/faquad-nli/raw/main/spans.csv"
}

def check_overlap(interval1, interval2):
    """Check for overlap between two integer intervals"""
    return not (interval1[1] < interval2[0] or interval2[1] < interval1[0])

def process_data(json_data, spans_data, split):
    """Process the data similar to the original script's _generate_examples method"""
    examples = {
        "document_index": [],
        "document_title": [],
        "paragraph_index": [],
        "question": [],
        "answer": [],
        "label": []
    }
    
    for span_row in spans_data:
        if span_row["split"] != split:
            continue

        document_title = json_data["data"][
            span_row["document_index"]
        ]["title"]

        sentence = json_data["data"][
            span_row["document_index"]
        ]["paragraphs"][
            span_row["paragraph_index"]
        ]["context"][
            span_row["sentence_start_char"]:span_row["sentence_end_char"]
        ]
        sentence_interval = (span_row["sentence_start_char"], span_row["sentence_end_char"])

        for qas_row in json_data["data"][
            span_row["document_index"]
        ]["paragraphs"][
            span_row["paragraph_index"]
        ]["qas"]:
            question = qas_row["question"]
            question_spans = []
            for qas_answer in qas_row["answers"]:
                qas_answer_start_span = qas_answer["answer_start"]
                qas_answer_end_span = qas_answer["answer_start"] + len(qas_answer["text"])
                question_spans.append((qas_answer_start_span, qas_answer_end_span))
            
            overlap_found = False
            for question_interval in question_spans:
                if check_overlap(sentence_interval, question_interval):
                    examples["document_index"].append(span_row["document_index"])
                    examples["document_title"].append(document_title)
                    examples["paragraph_index"].append(span_row["paragraph_index"])
                    examples["question"].append(question)
                    examples["answer"].append(sentence)
                    examples["label"].append(1)
                    overlap_found = True
                    break
            
            if not overlap_found:
                examples["document_index"].append(span_row["document_index"])
                examples["document_title"].append(document_title)
                examples["paragraph_index"].append(span_row["paragraph_index"])
                examples["question"].append(question)
                examples["answer"].append(sentence)
                examples["label"].append(0)
    
    return examples

def main():
    # Download the data
    download_manager = DownloadManager()
    downloaded_files = download_manager.download(_URLS)
    
    # Load the data
    with open(downloaded_files["data"], 'r') as f:
        json_data = json.load(f)
    
    spans = pd.read_csv(downloaded_files["spans"]).to_dict("records")
    
    # Create datasets for each split
    dataset_dict = DatasetDict()
    
    for split in ["train", "validation", "test"]:
        examples = process_data(json_data, spans, split)
        dataset_dict[split] = Dataset.from_dict(examples)
    
    # Save the dataset in parquet format
    output_dir = "./faquad-nli-parquet"
    os.makedirs(output_dir, exist_ok=True)
    dataset_dict.save_to_disk(output_dir)
    
    print(f"Dataset saved to {output_dir}")
    print("Dataset statistics:")
    for split, dataset in dataset_dict.items():
        print(f"  {split}: {len(dataset)} examples")

if __name__ == "__main__":
    main()

from datasets import load_from_disk

dataset = load_from_disk("./faquad-nli-parquet")
dataset.push_to_hub("ruanchaves/faquad-nli-parquet")

I made a working copy of your dataset just so I can keep running evals (nicholasKluge/faquad-nli-parquet). I'll delete it as soon as you update your dataset.

Sign up or log in to comment