Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
baber commited on
Commit
82e11af
·
verified ·
1 Parent(s): add010b

Delete coqa.py

Browse files
Files changed (1) hide show
  1. coqa.py +0 -244
coqa.py DELETED
@@ -1,244 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """CoQA dataset.
15
-
16
- This `CoQA` adds the "additional_answers" feature that's missing in the original
17
- datasets version:
18
- https://github.com/huggingface/datasets/blob/master/datasets/coqa/coqa.py
19
- """
20
-
21
-
22
- import json
23
-
24
- import datasets
25
-
26
-
27
- _CITATION = """\
28
- @misc{reddy2018coqa,
29
- title={CoQA: A Conversational Question Answering Challenge},
30
- author={Siva Reddy and Danqi Chen and Christopher D. Manning},
31
- year={2018},
32
- eprint={1808.07042},
33
- archivePrefix={arXiv},
34
- primaryClass={cs.CL}
35
- }
36
- """
37
-
38
- _DESCRIPTION = """\
39
- CoQA is a large-scale dataset for building Conversational Question Answering
40
- systems. The goal of the CoQA challenge is to measure the ability of machines to
41
- understand a text passage and answer a series of interconnected questions that
42
- appear in a conversation.
43
- """
44
-
45
- _HOMEPAGE = "https://stanfordnlp.github.io/coqa/"
46
-
47
- _LICENSE = "Different licenses depending on the content (see https://stanfordnlp.github.io/coqa/ for details)"
48
-
49
- _URLS = {
50
- "train": "https://downloads.cs.stanford.edu/nlp/data/coqa/coqa-train-v1.0.json",
51
- "validation": "https://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json",
52
- }
53
-
54
- # `additional_answers` are not available in the train set so we fill them with
55
- # empty dicts of the same form.
56
- _EMPTY_ADDITIONAL_ANSWER = {
57
- "0": [
58
- {
59
- "span_start": -1,
60
- "span_end": -1,
61
- "span_text": "",
62
- "input_text": "",
63
- "turn_id": -1,
64
- }
65
- ],
66
- "1": [
67
- {
68
- "span_start": -1,
69
- "span_end": -1,
70
- "span_text": "",
71
- "input_text": "",
72
- "turn_id": -1,
73
- }
74
- ],
75
- "2": [
76
- {
77
- "span_start": -1,
78
- "span_end": -1,
79
- "span_text": "",
80
- "input_text": "",
81
- "turn_id": -1,
82
- }
83
- ],
84
- }
85
-
86
-
87
- class Coqa(datasets.GeneratorBasedBuilder):
88
- """CoQA is a large-scale dataset for building Conversational Question Answering systems."""
89
-
90
- VERSION = datasets.Version("0.0.1")
91
-
92
- BUILDER_CONFIGS = [
93
- datasets.BuilderConfig(
94
- name="coqa", version=VERSION, description="The CoQA dataset."
95
- ),
96
- ]
97
-
98
- def _info(self):
99
- features = datasets.Features(
100
- {
101
- "id": datasets.Value("string"),
102
- "source": datasets.Value("string"),
103
- "story": datasets.Value("string"),
104
- "questions": datasets.features.Sequence(
105
- {
106
- "input_text": datasets.Value("string"),
107
- "turn_id": datasets.Value("int32"),
108
- }
109
- ),
110
- "answers": datasets.features.Sequence(
111
- {
112
- "span_start": datasets.Value("int32"),
113
- "span_end": datasets.Value("int32"),
114
- "span_text": datasets.Value("string"),
115
- "input_text": datasets.Value("string"),
116
- "turn_id": datasets.Value("int32"),
117
- }
118
- ),
119
- "additional_answers": {
120
- "0": datasets.features.Sequence(
121
- {
122
- "span_start": datasets.Value("int32"),
123
- "span_end": datasets.Value("int32"),
124
- "span_text": datasets.Value("string"),
125
- "input_text": datasets.Value("string"),
126
- "turn_id": datasets.Value("int32"),
127
- }
128
- ),
129
- "1": datasets.features.Sequence(
130
- {
131
- "span_start": datasets.Value("int32"),
132
- "span_end": datasets.Value("int32"),
133
- "span_text": datasets.Value("string"),
134
- "input_text": datasets.Value("string"),
135
- "turn_id": datasets.Value("int32"),
136
- }
137
- ),
138
- "2": datasets.features.Sequence(
139
- {
140
- "span_start": datasets.Value("int32"),
141
- "span_end": datasets.Value("int32"),
142
- "span_text": datasets.Value("string"),
143
- "input_text": datasets.Value("string"),
144
- "turn_id": datasets.Value("int32"),
145
- }
146
- ),
147
- },
148
- }
149
- )
150
- return datasets.DatasetInfo(
151
- description=_DESCRIPTION,
152
- features=features,
153
- homepage=_HOMEPAGE,
154
- license=_LICENSE,
155
- citation=_CITATION,
156
- )
157
-
158
- def _split_generators(self, dl_manager):
159
- urls = {"train": _URLS["train"], "validation": _URLS["validation"]}
160
- data_dirs = dl_manager.download_and_extract(urls)
161
- return [
162
- datasets.SplitGenerator(
163
- name=datasets.Split.TRAIN,
164
- # These kwargs will be passed to _generate_examples
165
- gen_kwargs={
166
- "filepath": data_dirs["train"],
167
- "split": datasets.Split.TRAIN,
168
- },
169
- ),
170
- datasets.SplitGenerator(
171
- name=datasets.Split.VALIDATION,
172
- # These kwargs will be passed to _generate_examples
173
- gen_kwargs={
174
- "filepath": data_dirs["validation"],
175
- "split": datasets.Split.VALIDATION,
176
- },
177
- ),
178
- ]
179
-
180
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
181
- def _generate_examples(self, filepath, split):
182
- with open(filepath, encoding="utf-8") as f:
183
- data = json.load(f)
184
- for row in data["data"]:
185
- id = row["id"]
186
- source = row["source"]
187
- story = row["story"]
188
- questions = [
189
- {"input_text": q["input_text"], "turn_id": q["turn_id"]}
190
- for q in row["questions"]
191
- ]
192
- answers = [
193
- {
194
- "span_start": a["span_start"],
195
- "span_end": a["span_end"],
196
- "span_text": a["span_text"],
197
- "input_text": a["input_text"],
198
- "turn_id": a["turn_id"],
199
- }
200
- for a in row["answers"]
201
- ]
202
- if split == datasets.Split.TRAIN:
203
- additional_answers = _EMPTY_ADDITIONAL_ANSWER
204
- else:
205
- additional_answers = {
206
- "0": [
207
- {
208
- "span_start": a0["span_start"],
209
- "span_end": a0["span_end"],
210
- "span_text": a0["span_text"],
211
- "input_text": a0["input_text"],
212
- "turn_id": a0["turn_id"],
213
- }
214
- for a0 in row["additional_answers"]["0"]
215
- ],
216
- "1": [
217
- {
218
- "span_start": a1["span_start"],
219
- "span_end": a1["span_end"],
220
- "span_text": a1["span_text"],
221
- "input_text": a1["input_text"],
222
- "turn_id": a1["turn_id"],
223
- }
224
- for a1 in row["additional_answers"]["1"]
225
- ],
226
- "2": [
227
- {
228
- "span_start": a2["span_start"],
229
- "span_end": a2["span_end"],
230
- "span_text": a2["span_text"],
231
- "input_text": a2["input_text"],
232
- "turn_id": a2["turn_id"],
233
- }
234
- for a2 in row["additional_answers"]["2"]
235
- ],
236
- }
237
- yield row["id"], {
238
- "id": id,
239
- "story": story,
240
- "source": source,
241
- "questions": questions,
242
- "answers": answers,
243
- "additional_answers": additional_answers,
244
- }