|
|
import glob |
|
|
import json |
|
|
import os |
|
|
from io import BytesIO |
|
|
|
|
|
import ijson |
|
|
import more_itertools |
|
|
import pandas as pd |
|
|
|
|
|
import datasets |
|
|
from datasets import Dataset, DatasetDict, DatasetInfo, Features, Sequence, Value |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from tqdm.auto import tqdm |
|
|
|
|
|
_CITATION = """ """ |
|
|
|
|
|
_DESCRIPTION = """ """ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def to_dict_element(el, cols): |
|
|
bucked_fields = more_itertools.bucket(cols, key=lambda x: x.split(".")[0]) |
|
|
final_dict = {} |
|
|
for parent_name in list(bucked_fields): |
|
|
|
|
|
fields = [y.split(".")[-1] for y in list(bucked_fields[parent_name])] |
|
|
if len(fields) == 1 and fields[0] == parent_name: |
|
|
final_dict[parent_name] = el[fields[0]] |
|
|
else: |
|
|
parent_list = [] |
|
|
zipped_fields = list(zip(*[el[f"{parent_name}.{child}"] for child in fields])) |
|
|
for x in zipped_fields: |
|
|
parent_list.append({k: v for k, v in zip(fields, x)}) |
|
|
final_dict[parent_name] = parent_list |
|
|
return final_dict |
|
|
|
|
|
|
|
|
def get_json_dataset(dataset): |
|
|
flat_dataset = dataset.flatten() |
|
|
json_dataset = dataset_to_json(flat_dataset) |
|
|
return [to_dict_element(el, cols=flat_dataset.column_names) for el in json_dataset] |
|
|
|
|
|
|
|
|
def dataset_to_json(dataset): |
|
|
new_str = BytesIO() |
|
|
dataset.to_json(new_str) |
|
|
new_str.seek(0) |
|
|
return [json.loads(line.decode()) for line in new_str] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NatQuestionsConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for NatQuestionsDPR.""" |
|
|
|
|
|
def __init__(self, features, retriever, feature_format, url, **kwargs): |
|
|
"""BuilderConfig for NatQuestions. |
|
|
|
|
|
Args: |
|
|
**kwargs: keyword arguments forwarded to super. |
|
|
""" |
|
|
super(NatQuestionsConfig, self).__init__(**kwargs) |
|
|
self.features = features |
|
|
self.retriever = retriever |
|
|
self.feature_format = feature_format |
|
|
self.url = url |
|
|
|
|
|
|
|
|
RETBM25_RERANKING_URLS = { |
|
|
split: f"https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-{split}.json.gz" |
|
|
for split in ["train", "dev"] |
|
|
} |
|
|
RETDPR_RERANKING_URLS = { |
|
|
split: f"https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-adv-hn-{split}.json.gz" |
|
|
for split in ["train"] |
|
|
} |
|
|
|
|
|
|
|
|
RETDPR_INF_URLS = { |
|
|
split: f"https://dl.fbaipublicfiles.com/dpr/data/retriever_results/single/nq-{split}.json.gz" |
|
|
for split in ["train", "dev", "test"] |
|
|
} |
|
|
|
|
|
RETBM25_INF_URLS = { |
|
|
split:f"https://www.cs.tau.ac.il/~ohadr/nq-{split}.json.gz" for split in ["dev","test"] |
|
|
|
|
|
} |
|
|
RETBM25_RERANKING_features = Features( |
|
|
{ |
|
|
"dataset": Value(dtype="string"), |
|
|
"qid": Value(dtype="string"), |
|
|
"question": Value(dtype="string"), |
|
|
"answers": Sequence(feature=Value(dtype="string")), |
|
|
"positive_ctxs": Sequence( |
|
|
feature={ |
|
|
"title": Value(dtype="string"), |
|
|
"text": Value(dtype="string"), |
|
|
"score": Value(dtype="float32"), |
|
|
|
|
|
"passage_id": Value(dtype="string"), |
|
|
} |
|
|
), |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"hard_negative_ctxs": Sequence( |
|
|
feature={ |
|
|
"title": Value(dtype="string"), |
|
|
"text": Value(dtype="string"), |
|
|
"score": Value(dtype="float32"), |
|
|
|
|
|
"passage_id": Value(dtype="string"), |
|
|
} |
|
|
), |
|
|
} |
|
|
) |
|
|
|
|
|
RETDPR_RERANKING_features = Features( |
|
|
{ |
|
|
"qid": Value(dtype="string"), |
|
|
"question": Value(dtype="string"), |
|
|
"answers": Sequence(feature=Value(dtype="string")), |
|
|
|
|
|
"hard_negative_ctxs": Sequence( |
|
|
feature={ |
|
|
"passage_id": Value(dtype="string"), |
|
|
"title": Value(dtype="string"), |
|
|
"text": Value(dtype="string"), |
|
|
"score": Value(dtype="string"), |
|
|
|
|
|
} |
|
|
), |
|
|
"positive_ctxs": Sequence( |
|
|
feature={ |
|
|
"title": Value(dtype="string"), |
|
|
"text": Value(dtype="string"), |
|
|
"score": Value(dtype="float32"), |
|
|
|
|
|
|
|
|
"passage_id": Value(dtype="string"), |
|
|
} |
|
|
), |
|
|
} |
|
|
) |
|
|
|
|
|
|
|
|
RETDPR_INF_features = Features( |
|
|
{ |
|
|
"question": Value(dtype="string"), |
|
|
"qid": Value(dtype="string"), |
|
|
"answers": Sequence(feature=Value(dtype="string")), |
|
|
"ctxs": Sequence( |
|
|
feature={ |
|
|
"id": Value(dtype="string"), |
|
|
"title": Value(dtype="string"), |
|
|
"text": Value(dtype="string"), |
|
|
"score": Value(dtype="float32"), |
|
|
|
|
|
} |
|
|
), |
|
|
} |
|
|
) |
|
|
URL_DICT = {"reranking_dprnq":RETDPR_RERANKING_URLS, |
|
|
"reranking_bm25":RETBM25_RERANKING_URLS, |
|
|
"inference_dprnq":RETDPR_INF_URLS} |
|
|
|
|
|
class NatQuestions(datasets.GeneratorBasedBuilder): |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
NatQuestionsConfig( |
|
|
name="reranking_dprnq", |
|
|
version=datasets.Version("1.0.1", ""), |
|
|
description="NatQuestions dataset in DPR format with the dprnq retrieval results", |
|
|
features=RETDPR_RERANKING_features, |
|
|
retriever="dprnq", |
|
|
feature_format="dpr", |
|
|
url=URL_DICT, |
|
|
), |
|
|
NatQuestionsConfig( |
|
|
name="reranking_bm25", |
|
|
version=datasets.Version("1.0.1", ""), |
|
|
description="NatQuestions dataset in DPR format with the bm25 retrieval results", |
|
|
features=RETBM25_RERANKING_features, |
|
|
retriever="bm25", |
|
|
feature_format="dpr", |
|
|
url=URL_DICT, |
|
|
), |
|
|
NatQuestionsConfig( |
|
|
name="inference_dprnq", |
|
|
version=datasets.Version("1.0.1", ""), |
|
|
description="NatQuestions dataset in a format accepted by the inference model, performing reranking on the dprnq retrieval results", |
|
|
features=RETDPR_INF_features, |
|
|
retriever="dprnq", |
|
|
feature_format="inference", |
|
|
url=URL_DICT, |
|
|
), |
|
|
NatQuestionsConfig( |
|
|
name="inference_bm25", |
|
|
version=datasets.Version("1.0.1", ""), |
|
|
description="NatQuestions dataset in a format accepted by the inference model, performing reranking on the bm25 retrieval results", |
|
|
features=RETDPR_INF_features, |
|
|
retriever="bm25", |
|
|
feature_format="inference", |
|
|
url=URL_DICT, |
|
|
), |
|
|
|
|
|
] |
|
|
|
|
|
def _info(self): |
|
|
self.features = self.config.features |
|
|
self.retriever = self.config.retriever |
|
|
self.feature_format = self.config.feature_format |
|
|
self.url = self.config.url |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=self.config.features, |
|
|
supervised_keys=None, |
|
|
homepage="", |
|
|
citation=_CITATION, |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
print(self.url) |
|
|
if len(self.url) > 0: |
|
|
filepath = dl_manager.download_and_extract(self.url) |
|
|
else: |
|
|
filepath = "" |
|
|
|
|
|
|
|
|
result = [] |
|
|
if "train" in filepath[self.info.config_name]: |
|
|
result.append( |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={"filepath": filepath, "split": "train"}, |
|
|
) |
|
|
) |
|
|
if "dev" in filepath[self.info.config_name] or self.info.config_name=="reranking_dprnq": |
|
|
result.append( |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.VALIDATION, |
|
|
gen_kwargs={"filepath": filepath, "split": "dev"}, |
|
|
) |
|
|
) |
|
|
if "test" in filepath[self.info.config_name] or self.info.config_name=="reranking_dprnq": |
|
|
result.append( |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
gen_kwargs={"filepath": filepath, "split": "test"}, |
|
|
) |
|
|
) |
|
|
|
|
|
return result |
|
|
|
|
|
def _prepare_split(self, split_generator, **kwargs): |
|
|
self.info.features = self.config.features |
|
|
super()._prepare_split(split_generator, **kwargs) |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
if self.info.config_name=="reranking_dprnq" and split in ["dev","test"]: |
|
|
for i,dict_element in new_method(split, "inference_dprnq", f"{filepath['inference_dprnq'][split]}"): |
|
|
dict_element['positive_ctxs'] = [] |
|
|
answers = dict_element['answers'] |
|
|
any_true = False |
|
|
for x in dict_element['ctxs']: |
|
|
x['passage_id'] = x.pop('id') |
|
|
x['has_answer'] = False |
|
|
for ans in answers: |
|
|
if ans in x['title'] or ans in x['text']: |
|
|
if 'id' in x: |
|
|
x['passage_id'] = x.pop('id') |
|
|
x['has_answer'] = True |
|
|
dict_element['positive_ctxs'].append(x) |
|
|
any_true = True |
|
|
negative_candidates = [x for x in dict_element['ctxs'] if not x['has_answer']] |
|
|
dict_element['hard_negative_ctxs'] = negative_candidates[:len(dict_element['positive_ctxs'])] |
|
|
dict_element['ctxs'] = dict_element.pop("ctxs") |
|
|
for name in ['positive_ctxs',"hard_negative_ctxs"]: |
|
|
for x in dict_element[name]: |
|
|
x.pop("has_answer",None) |
|
|
if any_true: |
|
|
dict_element.pop("ctxs") |
|
|
yield i,dict_element |
|
|
else: |
|
|
yield from new_method(split, self.info.config_name, f"{filepath[self.info.config_name][split]}") |
|
|
|
|
|
def new_method(split, config_name, object_path): |
|
|
count = 0 |
|
|
with open(object_path) as f: |
|
|
items = ijson.items(f, "item") |
|
|
for element in items: |
|
|
element.pop("negative_ctxs",None) |
|
|
for name in ["positive_ctxs","hard_negative_ctxs","ctxs"]: |
|
|
for x in element.get(name,[]): |
|
|
x.pop("title_score",None) |
|
|
x.pop("has_answer", None) |
|
|
if "reranking" in config_name and "id" in x: |
|
|
x["passage_id"] = x.pop("id") |
|
|
element["qid"] = f"{count}_{split}" |
|
|
yield count, element |
|
|
count += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|