parquet-converter commited on
Commit
f667029
·
1 Parent(s): 904b9ce

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,29 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- dummy/checklist filter=lfs diff=lfs merge=lfs -text
29
- dummy/robust filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,115 +0,0 @@
1
- .idea/
2
- .vscode/
3
- log/
4
- logs/
5
- testdata
6
-
7
- models/
8
- *.imp
9
- *.pyc
10
- .DS_Store
11
- __pycache__
12
- # Byte-compiled / optimized / DLL files
13
- __pycache__/
14
- *.py[cod]
15
- *$py.class
16
- *.ipynb
17
- # C extensions
18
- *.so
19
-
20
- # Distribution / packaging
21
- .Python
22
- build/
23
- develop-eggs/
24
- dist/
25
- downloads/
26
- eggs/
27
- .eggs/
28
- lib/
29
- lib64/
30
- parts/
31
- sdist/
32
- var/
33
- wheels/
34
- *.egg-info/
35
- .installed.cfg
36
- *.egg
37
- MANIFEST
38
- ifchange
39
- # PyInstaller
40
- # Usually these files are written by a python script from a template
41
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
42
- *.manifest
43
- *.spec
44
-
45
- # Installer logs
46
- pip-log.txt
47
- pip-delete-this-directory.txt
48
-
49
- # Unit test / coverage reports
50
- htmlcov/
51
- .tox/
52
- .coverage
53
- .coverage.*
54
- .cache
55
- nosetests.xml
56
- coverage.xml
57
- *.cover
58
- .hypothesis/
59
- .pytest_cache/
60
-
61
- # Translations
62
- *.mo
63
- *.pot
64
-
65
- # Django stuff:
66
- *.log
67
- local_settings.py
68
- db.sqlite3
69
-
70
- # Flask stuff:
71
- instance/
72
- .webassets-cache
73
-
74
- # Scrapy stuff:
75
- .scrapy
76
-
77
- # Sphinx documentation
78
- docs/_build/
79
-
80
- # PyBuilder
81
- target/
82
-
83
- # Jupyter Notebook
84
- .ipynb_checkpoints
85
-
86
- # pyenv
87
- .python-version
88
-
89
- # celery beat schedule file
90
- celerybeat-schedule
91
-
92
- # SageMath parsed files
93
- *.sage.py
94
-
95
- # Environments
96
- .env
97
- .venv
98
- env/
99
- venv/
100
- ENV/
101
- env.bak/
102
- venv.bak/
103
-
104
- # Spyder project settings
105
- .spyderproject
106
- .spyproject
107
-
108
- # Rope project settings
109
- .ropeproject
110
-
111
- # mkdocs documentation
112
- /site
113
-
114
- # mypy
115
- .mypy_cache/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,19 +0,0 @@
1
- # dureader
2
-
3
- 数据来自千言DuReader数据集,这里是原始地址 [千言数据集:阅读理解](https://aistudio.baidu.com/aistudio/competition/detail/49/0/task-definition)。
4
-
5
- > 本数据集只用作学术研究使用。如果本仓库涉及侵权行为,会立即删除。
6
-
7
- 目前包含以下两个子集:
8
-
9
- * DuReader-robust
10
- * DuReader-checklist
11
-
12
- ```python
13
- from datasets import load_dataset
14
-
15
- robust = load_dataset("luozhouyang/dureader", "robust")
16
-
17
- checklist = load_dataset("luozhouyang/dureader", "checklist")
18
-
19
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dummy/checklist/1.0.0/dummy_data.zip → checklist/dureader-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c544e2556abcda13b207bcdde9370ab8fa04a2e6f4bc7c69137afa0c99463e70
3
- size 19408914
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ad1280c2fd3772f8e66bef94c5ab028703329caae75a3ae57bee645539018a
3
+ size 23851364
dummy/robust/1.0.0/dummy_data.zip → checklist/dureader-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a0c92db271696a132031fbc7136281a88fa13a3831935ed65551faa4d097ce7
3
- size 21712594
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abbf7c9122549893e7fbfa3bbeb081a26bd976d057c45907ba74821c1f62d76d
3
+ size 1738753
checklist/dureader-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ed685107bcf3ad63710b8e07d3aae75bc1ad0c6ee6c133697771a93202109e
3
+ size 633843
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"robust": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "du_reader", "config_name": "robust", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12509537, "num_examples": 14520, "dataset_name": "du_reader"}, "validation": {"name": "validation", "num_bytes": 1228154, "num_examples": 1417, "dataset_name": "du_reader"}, "test": {"name": "test", "num_bytes": 46912731, "num_examples": 50000, "dataset_name": "du_reader"}}, "download_checksums": {"https://dataset-bj.cdn.bcebos.com/qianyan/dureader_robust-data.tar.gz": {"num_bytes": 20518631, "checksum": "99bed9ced8995df1c89b9789f890c27a13b4650a56b4d973907cc28da8bd9f0f"}}, "download_size": 20518631, "post_processing_size": null, "dataset_size": 60650422, "size_in_bytes": 81169053}, "checklist": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "is_impossible": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "type": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "du_reader", "config_name": "checklist", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2459480, "num_examples": 2999, "dataset_name": "du_reader"}, "validation": {"name": "validation", "num_bytes": 879626, "num_examples": 1130, "dataset_name": "du_reader"}, "test": {"name": "test", "num_bytes": 32421655, "num_examples": 49992, "dataset_name": "du_reader"}}, "download_checksums": {"https://dataset-bj.cdn.bcebos.com/qianyan/dureader_checklist-data.tar.gz": {"num_bytes": 18319191, "checksum": "223c370696b9f1e8c89d84b7935c8da88128004788ad80fb9c3830130461e6f3"}}, "download_size": 18319191, "post_processing_size": null, "dataset_size": 35760761, "size_in_bytes": 54079952}}
 
 
dummy/checklist/1.0.0/dummy_data.zip.lock DELETED
File without changes
dummy/robust/1.0.0/dummy_data.zip.lock DELETED
File without changes
dureader.py DELETED
@@ -1,229 +0,0 @@
1
- import json
2
- import os
3
-
4
- import datasets
5
- from datasets import DatasetInfo, DownloadManager
6
-
7
-
8
- class DuReaderConfig(datasets.BuilderConfig):
9
- """Config for DuReader dataset"""
10
-
11
- def __init__(self, name, data_url, **kwargs):
12
- super().__init__(name=name, version=datasets.Version("1.0.0", ""))
13
- self.data_url = data_url
14
-
15
-
16
- class DuReader(datasets.GeneratorBasedBuilder):
17
- """ """
18
-
19
- BUILDER_CONFIGS = [
20
- DuReaderConfig(
21
- name="robust",
22
- data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_robust-data.tar.gz",
23
- ),
24
- DuReaderConfig(
25
- name="checklist",
26
- data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_checklist-data.tar.gz",
27
- ),
28
- # DuReaderConfig(
29
- # name="yesno",
30
- # data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_yesno-data.tar.gz",
31
- # ),
32
- ]
33
-
34
- def _info(self) -> DatasetInfo:
35
- if self.config.name == "robust":
36
- features = {
37
- "id": datasets.Value("string"),
38
- "context": datasets.Value("string"),
39
- "question": datasets.Value("string"),
40
- "answers": datasets.Sequence(
41
- {
42
- "text": datasets.Value("string"),
43
- "answer_start": datasets.Value("int32"),
44
- }
45
- ),
46
- }
47
- return datasets.DatasetInfo(
48
- description="",
49
- citation="",
50
- homepage="",
51
- features=datasets.Features(features),
52
- )
53
- if self.config.name == "checklist":
54
- features = {
55
- "id": datasets.Value("string"),
56
- "title": datasets.Value("string"),
57
- "context": datasets.Value("string"),
58
- "question": datasets.Value("string"),
59
- "is_impossible": datasets.Value("string"),
60
- "answers": datasets.Sequence(
61
- {
62
- "text": datasets.Value("string"),
63
- "answer_start": datasets.Value("int32"),
64
- }
65
- ),
66
- "type": datasets.Value("string"),
67
- }
68
- return datasets.DatasetInfo(
69
- description="",
70
- citation="",
71
- homepage="",
72
- features=datasets.Features(features),
73
- )
74
-
75
- return None
76
-
77
- def _split_generators(self, dl_manager: DownloadManager):
78
- """Split generators"""
79
-
80
- def _build(train_files, valid_files, test_files):
81
- train_split = datasets.SplitGenerator(
82
- name=datasets.Split.TRAIN,
83
- gen_kwargs={
84
- "data_file": train_files,
85
- "split": "train",
86
- },
87
- )
88
- valid_split = datasets.SplitGenerator(
89
- name=datasets.Split.VALIDATION,
90
- gen_kwargs={
91
- "data_file": valid_files,
92
- "split": "dev",
93
- },
94
- )
95
- test_split = datasets.SplitGenerator(
96
- name=datasets.Split.TEST,
97
- gen_kwargs={
98
- "data_file": test_files,
99
- "split": "test",
100
- },
101
- )
102
- return [train_split, valid_split, test_split]
103
-
104
- if self.config.name == "robust":
105
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
106
- splits = _build(
107
- train_files=os.path.join(dl_dir, "dureader_robust-data", "train.json"),
108
- valid_files=os.path.join(dl_dir, "dureader_robust-data", "dev.json"),
109
- test_files=os.path.join(dl_dir, "dureader_robust-data", "test.json"),
110
- )
111
- return splits
112
-
113
- if self.config.name == "checklist":
114
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
115
- splits = _build(
116
- train_files=os.path.join(dl_dir, "dureader_checklist-data", "train.json"),
117
- valid_files=os.path.join(dl_dir, "dureader_checklist-data", "dev.json"),
118
- test_files=os.path.join(dl_dir, "dureader_checklist-data", "test.json"),
119
- )
120
- return splits
121
-
122
- return []
123
-
124
- def _generate_examples(self, data_file, split):
125
- if self.config.name == "robust":
126
- if split == "train" or split == "dev":
127
- return self._generate_robust_examples(data_file)
128
- return self._generate_robust_test_examples(data_file)
129
-
130
- if self.config.name == "checklist":
131
- if split == "train" or split == "dev":
132
- return self._generate_checklist_examples(data_file)
133
- return self._generate_checklist_test_examples(data_file)
134
-
135
- def _generate_robust_examples(self, data_file):
136
- with open(data_file, mode="rt", encoding="utf-8") as fin:
137
- data = json.load(fin)["data"]
138
- for d in data:
139
- for p in d["paragraphs"]:
140
- context = p["context"]
141
- for qa in p["qas"]:
142
- starts = [x["answer_start"] for x in qa["answers"]]
143
- answers = [x["text"] for x in qa["answers"]]
144
- example = {
145
- "id": qa["id"],
146
- "context": context,
147
- "question": qa["question"],
148
- "answers": {
149
- "text": answers,
150
- "answer_start": starts,
151
- },
152
- }
153
- yield example["id"], example
154
-
155
- def _generate_robust_test_examples(self, data_file):
156
- with open(data_file, mode="rt", encoding="utf-8") as fin:
157
- data = json.load(fin)["data"]
158
- for d in data:
159
- for p in d["paragraphs"]:
160
- context = p["context"]
161
- for qa in p["qas"]:
162
- qid = qa["id"]
163
- example = {
164
- "id": qid,
165
- "context": context,
166
- "question": qa["question"],
167
- "answers": {
168
- "text": [],
169
- "answer_start": [],
170
- },
171
- }
172
- yield example["id"], example
173
-
174
- def _generate_checklist_examples(self, data_file):
175
- with open(data_file, mode="rt", encoding="utf-8") as fin:
176
- data = json.load(fin)["data"]
177
- exist_ids = set()
178
- for d in data:
179
- for p in d["paragraphs"]:
180
- title = p["title"].strip()
181
- context = p["context"].strip()
182
- for qa in p["qas"]:
183
- qid = qa["id"]
184
- # skip dumplicate keys
185
- if qid in exist_ids:
186
- continue
187
- exist_ids.add(qid)
188
- starts = [x["answer_start"] for x in qa["answers"]]
189
- answers = [x["text"].strip() for x in qa["answers"]]
190
- example = {
191
- "id": qid,
192
- "title": title,
193
- "context": context,
194
- "question": qa["question"].strip(),
195
- "is_impossible": qa["is_impossible"],
196
- "answers": {
197
- "text": answers,
198
- "answer_start": starts,
199
- },
200
- "type": qa["type"].strip(),
201
- }
202
- yield example["id"], example
203
-
204
- def _generate_checklist_test_examples(self, data_file):
205
- with open(data_file, mode="rt", encoding="utf-8") as fin:
206
- data = json.load(fin)["data"]
207
- exist_ids = set()
208
- for d in data:
209
- for p in d["paragraphs"]:
210
- title = p["title"]
211
- context = p["context"]
212
- for qa in p["qas"]:
213
- qid = qa["id"]
214
- if qid in exist_ids:
215
- continue
216
- exist_ids.add(qid)
217
- example = {
218
- "id": qid,
219
- "title": title,
220
- "context": context,
221
- "question": qa["question"],
222
- "is_impossible": None,
223
- "answers": {
224
- "text": [],
225
- "answer_start": [],
226
- },
227
- "type": None,
228
- }
229
- yield example["id"], example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
robust/dureader-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5575b575c26b31a876ebe91f1e88a5ba69c5e963146be71816fc33cb4fd79762
3
+ size 18648671
robust/dureader-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a223ef5cd117c0eeb8dd4e857c245d95787c6660b1ee6c034b297af41c2c728
3
+ size 9041044
robust/dureader-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:febdee20eab2ff0979a0e889f0d212742499f4c444345aef6836d3f67a2c078d
3
+ size 888421