iohadrubin commited on
Commit
2fc81ec
·
1 Parent(s): 6f9daf0

Create new file

Browse files
Files changed (1) hide show
  1. nq.py +359 -0
nq.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ from io import BytesIO
5
+
6
+ import ijson
7
+ import more_itertools
8
+ import pandas as pd
9
+
10
+ import datasets
11
+ from datasets import Dataset, DatasetDict, DatasetInfo, Features, Sequence, Value
12
+
13
+ logger = datasets.logging.get_logger(__name__)
14
+ # _URL = "https://www.cs.tau.ac.il/~ohadr/NatQuestions.zip"
15
+
16
+ # RERANKING_URLS = {
17
+ # "train": "https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-train.json.gz",
18
+ # "validation": "https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-dev.json.gz",
19
+ # # "test": "https://dl.fbaipublicfiles.com/dpr/data/retriever/nq-test.qa.csv",
20
+ # }
21
+
22
+ from tqdm.auto import tqdm
23
+
24
+ _CITATION = """ """
25
+
26
+ _DESCRIPTION = """ """
27
+
28
+ # def
29
+ # def read_glob(paths):
30
+ # paths = glob.glob(paths)
31
+ # data = []
32
+ # for path in paths:
33
+ # with open(path) as f:
34
+ # if path.endswith(".json"):
35
+ # data.extend(json.load(f))
36
+ # elif path.endswith(".jsonl"):
37
+ # for line in f:
38
+ # data.append(json.loads(line))
39
+ # return data
40
+
41
+
42
+ def to_dict_element(el, cols):
43
+ bucked_fields = more_itertools.bucket(cols, key=lambda x: x.split(".")[0])
44
+ final_dict = {}
45
+ for parent_name in list(bucked_fields):
46
+
47
+ fields = [y.split(".")[-1] for y in list(bucked_fields[parent_name])]
48
+ if len(fields) == 1 and fields[0] == parent_name:
49
+ final_dict[parent_name] = el[fields[0]]
50
+ else:
51
+ parent_list = []
52
+ zipped_fields = list(zip(*[el[f"{parent_name}.{child}"] for child in fields]))
53
+ for x in zipped_fields:
54
+ parent_list.append({k: v for k, v in zip(fields, x)})
55
+ final_dict[parent_name] = parent_list
56
+ return final_dict
57
+
58
+
59
+ def get_json_dataset(dataset):
60
+ flat_dataset = dataset.flatten()
61
+ json_dataset = dataset_to_json(flat_dataset)
62
+ return [to_dict_element(el, cols=flat_dataset.column_names) for el in json_dataset]
63
+
64
+
65
+ def dataset_to_json(dataset):
66
+ new_str = BytesIO()
67
+ dataset.to_json(new_str)
68
+ new_str.seek(0)
69
+ return [json.loads(line.decode()) for line in new_str]
70
+
71
+
72
+ # inference_features = datasets.Features(
73
+ # {
74
+ # "source": Value(dtype="string"),
75
+ # "meta": {
76
+ # "question": Value(dtype="string"),
77
+ # "text": Value(dtype="string"),
78
+ # "title": Value(dtype="string"),
79
+ # "qid": Value(dtype="string"),
80
+ # "id": Value(dtype="string"),
81
+ # },
82
+ # }
83
+ # )
84
+
85
+
86
+ class NatQuestionsConfig(datasets.BuilderConfig):
87
+ """BuilderConfig for NatQuestionsDPR."""
88
+
89
+ def __init__(self, features, retriever, feature_format, url, **kwargs):
90
+ """BuilderConfig for NatQuestions.
91
+
92
+ Args:
93
+ **kwargs: keyword arguments forwarded to super.
94
+ """
95
+ super(NatQuestionsConfig, self).__init__(**kwargs)
96
+ self.features = features
97
+ self.retriever = retriever
98
+ self.feature_format = feature_format
99
+ self.url = url
100
+
101
+
102
+ RETBM25_RERANKING_URLS = {
103
+ split: f"https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-{split}.json.gz"
104
+ for split in ["train", "dev"]
105
+ }
106
+ RETDPR_RERANKING_URLS = {
107
+ split: f"https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-adv-hn-{split}.json.gz"
108
+ for split in ["train"]
109
+ }
110
+
111
+
112
+ RETDPR_INF_URLS = {
113
+ split: f"https://dl.fbaipublicfiles.com/dpr/data/retriever_results/single/nq-{split}.json.gz"
114
+ for split in ["train", "dev", "test"]
115
+ }
116
+
117
+ RETBM25_INF_URLS = {
118
+ split:f"https://www.cs.tau.ac.il/~ohadr/nq-{split}.json.gz" for split in ["dev","test"]
119
+
120
+ }
121
+ RETBM25_RERANKING_features = Features(
122
+ {
123
+ "dataset": Value(dtype="string"),
124
+ "qid": Value(dtype="string"),
125
+ "question": Value(dtype="string"),
126
+ "answers": Sequence(feature=Value(dtype="string")),
127
+ "positive_ctxs": Sequence(
128
+ feature={
129
+ "title": Value(dtype="string"),
130
+ "text": Value(dtype="string"),
131
+ "score": Value(dtype="float32"),
132
+ # 'title_score': Value(dtype='int32'),
133
+ "passage_id": Value(dtype="string"),
134
+ }
135
+ ),
136
+ # 'negative_ctxs': Sequence(feature={'title': Value(dtype='string'),
137
+ # 'text': Value(dtype='string'),
138
+ # 'score': Value(dtype='float32'),
139
+ # # 'title_score': Value(dtype='int32'),
140
+ # 'passage_id': Value(dtype='string')}),
141
+ "hard_negative_ctxs": Sequence(
142
+ feature={
143
+ "title": Value(dtype="string"),
144
+ "text": Value(dtype="string"),
145
+ "score": Value(dtype="float32"),
146
+ # 'title_score': Value(dtype='int32'),
147
+ "passage_id": Value(dtype="string"),
148
+ }
149
+ ),
150
+ }
151
+ )
152
+
153
+ RETDPR_RERANKING_features = Features(
154
+ {
155
+ "qid": Value(dtype="string"),
156
+ "question": Value(dtype="string"),
157
+ "answers": Sequence(feature=Value(dtype="string")),
158
+ # 'negative_ctxs': Sequence(feature=[]),
159
+ "hard_negative_ctxs": Sequence(
160
+ feature={
161
+ "passage_id": Value(dtype="string"),
162
+ "title": Value(dtype="string"),
163
+ "text": Value(dtype="string"),
164
+ "score": Value(dtype="string"),
165
+ # 'has_answer': Value(dtype='int32')
166
+ }
167
+ ),
168
+ "positive_ctxs": Sequence(
169
+ feature={
170
+ "title": Value(dtype="string"),
171
+ "text": Value(dtype="string"),
172
+ "score": Value(dtype="float32"),
173
+ # 'title_score': Value(dtype='int32'),
174
+ # 'has_answer': Value(dtype='int32'),
175
+ "passage_id": Value(dtype="string"),
176
+ }
177
+ ),
178
+ }
179
+ )
180
+
181
+
182
+ RETDPR_INF_features = Features(
183
+ {
184
+ "question": Value(dtype="string"),
185
+ "qid": Value(dtype="string"),
186
+ "answers": Sequence(feature=Value(dtype="string")),
187
+ "ctxs": Sequence(
188
+ feature={
189
+ "id": Value(dtype="string"),
190
+ "title": Value(dtype="string"),
191
+ "text": Value(dtype="string"),
192
+ "score": Value(dtype="float32"),
193
+ # "has_answer": Value(dtype="int32"),
194
+ }
195
+ ),
196
+ }
197
+ )
198
+ URL_DICT = {"reranking_dprnq":RETDPR_RERANKING_URLS,
199
+ "reranking_bm25":RETBM25_RERANKING_URLS,
200
+ "inference_dprnq":RETDPR_INF_URLS}
201
+
202
+ class NatQuestions(datasets.GeneratorBasedBuilder):
203
+
204
+ BUILDER_CONFIGS = [
205
+ NatQuestionsConfig(
206
+ name="reranking_dprnq",
207
+ version=datasets.Version("1.0.1", ""),
208
+ description="NatQuestions dataset in DPR format with the dprnq retrieval results",
209
+ features=RETDPR_RERANKING_features,
210
+ retriever="dprnq",
211
+ feature_format="dpr",
212
+ url=URL_DICT,
213
+ ),
214
+ NatQuestionsConfig(
215
+ name="reranking_bm25",
216
+ version=datasets.Version("1.0.1", ""),
217
+ description="NatQuestions dataset in DPR format with the bm25 retrieval results",
218
+ features=RETBM25_RERANKING_features,
219
+ retriever="bm25",
220
+ feature_format="dpr",
221
+ url=URL_DICT,
222
+ ),
223
+ NatQuestionsConfig(
224
+ name="inference_dprnq",
225
+ version=datasets.Version("1.0.1", ""),
226
+ description="NatQuestions dataset in a format accepted by the inference model, performing reranking on the dprnq retrieval results",
227
+ features=RETDPR_INF_features,
228
+ retriever="dprnq",
229
+ feature_format="inference",
230
+ url=URL_DICT,
231
+ ),
232
+ NatQuestionsConfig(
233
+ name="inference_bm25",
234
+ version=datasets.Version("1.0.1", ""),
235
+ description="NatQuestions dataset in a format accepted by the inference model, performing reranking on the bm25 retrieval results",
236
+ features=RETDPR_INF_features,
237
+ retriever="bm25",
238
+ feature_format="inference",
239
+ url=URL_DICT,
240
+ ),
241
+
242
+ ]
243
+
244
+ def _info(self):
245
+ self.features = self.config.features
246
+ self.retriever = self.config.retriever
247
+ self.feature_format = self.config.feature_format
248
+ self.url = self.config.url
249
+ return datasets.DatasetInfo(
250
+ description=_DESCRIPTION,
251
+ features=self.config.features,
252
+ supervised_keys=None,
253
+ homepage="",
254
+ citation=_CITATION,
255
+
256
+ )
257
+
258
+
259
+ def _split_generators(self, dl_manager):
260
+ print(self.url)
261
+ if len(self.url) > 0:
262
+ filepath = dl_manager.download_and_extract(self.url)
263
+ else:
264
+ filepath = ""
265
+ # filepath = "/home/joberant/home/ohadr/testbed/notebooks/NatQuestions_retrievers"
266
+
267
+ result = []
268
+ if "train" in filepath[self.info.config_name]:
269
+ result.append(
270
+ datasets.SplitGenerator(
271
+ name=datasets.Split.TRAIN,
272
+ gen_kwargs={"filepath": filepath, "split": "train"},
273
+ )
274
+ )
275
+ if "dev" in filepath[self.info.config_name] or self.info.config_name=="reranking_dprnq":
276
+ result.append(
277
+ datasets.SplitGenerator(
278
+ name=datasets.Split.VALIDATION,
279
+ gen_kwargs={"filepath": filepath, "split": "dev"},
280
+ )
281
+ )
282
+ if "test" in filepath[self.info.config_name] or self.info.config_name=="reranking_dprnq":
283
+ result.append(
284
+ datasets.SplitGenerator(
285
+ name=datasets.Split.TEST,
286
+ gen_kwargs={"filepath": filepath, "split": "test"},
287
+ )
288
+ )
289
+
290
+ return result
291
+
292
+ def _prepare_split(self, split_generator, **kwargs):
293
+ self.info.features = self.config.features
294
+ super()._prepare_split(split_generator, **kwargs)
295
+
296
+ def _generate_examples(self, filepath, split):
297
+ if self.info.config_name=="reranking_dprnq" and split in ["dev","test"]:
298
+ for i,dict_element in new_method(split, "inference_dprnq", f"{filepath['inference_dprnq'][split]}"):
299
+ dict_element['positive_ctxs'] = []
300
+ answers = dict_element['answers']
301
+ any_true = False
302
+ for x in dict_element['ctxs']:
303
+ x['passage_id'] = x.pop('id')
304
+ x['has_answer'] = False
305
+ for ans in answers:
306
+ if ans in x['title'] or ans in x['text']:
307
+ if 'id' in x:
308
+ x['passage_id'] = x.pop('id')
309
+ x['has_answer'] = True
310
+ dict_element['positive_ctxs'].append(x)
311
+ any_true = True
312
+ negative_candidates = [x for x in dict_element['ctxs'] if not x['has_answer']]
313
+ dict_element['hard_negative_ctxs'] = negative_candidates[:len(dict_element['positive_ctxs'])]
314
+ dict_element['ctxs'] = dict_element.pop("ctxs")
315
+ for name in ['positive_ctxs',"hard_negative_ctxs"]:
316
+ for x in dict_element[name]:
317
+ x.pop("has_answer",None)
318
+ if any_true:
319
+ dict_element.pop("ctxs")
320
+ yield i,dict_element
321
+ else:
322
+ yield from new_method(split, self.info.config_name, f"{filepath[self.info.config_name][split]}")
323
+
324
+ def new_method(split, config_name, object_path):
325
+ count = 0
326
+ with open(object_path) as f:
327
+ items = ijson.items(f, "item")
328
+ for element in items:
329
+ element.pop("negative_ctxs",None)
330
+ for name in ["positive_ctxs","hard_negative_ctxs","ctxs"]:
331
+ for x in element.get(name,[]):
332
+ x.pop("title_score",None)
333
+ x.pop("has_answer", None)
334
+ if "reranking" in config_name and "id" in x:
335
+ x["passage_id"] = x.pop("id")
336
+ element["qid"] = f"{count}_{split}"
337
+ yield count, element
338
+ count += 1
339
+
340
+
341
+ # def single_inference_format_example(ctx, question, qid):
342
+ # datum = {}
343
+ # datum["source"] = f"Title: {ctx['meta']['title']}\nText: {ctx['meta']['content']}\nQuestion: {question}\n"
344
+ # datum["meta"] = {}
345
+ # datum["meta"]["question"] = question
346
+ # datum["meta"]["qid"] = qid
347
+ # datum["meta"]["title"] = ctx["meta"]["title"]
348
+ # datum["meta"]["text"] = ctx["meta"]["content"]
349
+ # datum["meta"]["id"] = ctx["id"]
350
+ # return datum
351
+
352
+
353
+ # def inference_format_example(element):
354
+ # return [
355
+ # single_inference_format_example(ctx, element["proof"], element["pid"]) for ctx in element["query_res"]
356
+ # ]
357
+
358
+
359
+ # def inference_example(example):