Datasets:
Commit
·
68d3d76
1
Parent(s):
26b4712
Add wnli data files
Browse files- README.md +15 -7
- dataset_infos.json +15 -29
- wnli/test-00000-of-00001.parquet +3 -0
- wnli/train-00000-of-00001.parquet +3 -0
- wnli/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
|
@@ -332,17 +332,17 @@ dataset_info:
|
|
| 332 |
- name: idx
|
| 333 |
dtype: int32
|
| 334 |
splits:
|
| 335 |
-
- name: test
|
| 336 |
-
num_bytes: 37992
|
| 337 |
-
num_examples: 146
|
| 338 |
- name: train
|
| 339 |
-
num_bytes:
|
| 340 |
num_examples: 635
|
| 341 |
- name: validation
|
| 342 |
-
num_bytes:
|
| 343 |
num_examples: 71
|
| 344 |
-
|
| 345 |
-
|
|
|
|
|
|
|
|
|
|
| 346 |
configs:
|
| 347 |
- config_name: cola
|
| 348 |
data_files:
|
|
@@ -424,6 +424,14 @@ configs:
|
|
| 424 |
path: stsb/validation-*
|
| 425 |
- split: test
|
| 426 |
path: stsb/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
train-eval-index:
|
| 428 |
- config: cola
|
| 429 |
task: text-classification
|
|
|
|
| 332 |
- name: idx
|
| 333 |
dtype: int32
|
| 334 |
splits:
|
|
|
|
|
|
|
|
|
|
| 335 |
- name: train
|
| 336 |
+
num_bytes: 107109
|
| 337 |
num_examples: 635
|
| 338 |
- name: validation
|
| 339 |
+
num_bytes: 12162
|
| 340 |
num_examples: 71
|
| 341 |
+
- name: test
|
| 342 |
+
num_bytes: 37889
|
| 343 |
+
num_examples: 146
|
| 344 |
+
download_size: 63522
|
| 345 |
+
dataset_size: 157160
|
| 346 |
configs:
|
| 347 |
- config_name: cola
|
| 348 |
data_files:
|
|
|
|
| 424 |
path: stsb/validation-*
|
| 425 |
- split: test
|
| 426 |
path: stsb/test-*
|
| 427 |
+
- config_name: wnli
|
| 428 |
+
data_files:
|
| 429 |
+
- split: train
|
| 430 |
+
path: wnli/train-*
|
| 431 |
+
- split: validation
|
| 432 |
+
path: wnli/validation-*
|
| 433 |
+
- split: test
|
| 434 |
+
path: wnli/test-*
|
| 435 |
train-eval-index:
|
| 436 |
- config: cola
|
| 437 |
task: text-classification
|
dataset_infos.json
CHANGED
|
@@ -593,39 +593,32 @@
|
|
| 593 |
},
|
| 594 |
"wnli": {
|
| 595 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
| 596 |
-
"citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n
|
| 597 |
"homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
|
| 598 |
"license": "",
|
| 599 |
"features": {
|
| 600 |
"sentence1": {
|
| 601 |
"dtype": "string",
|
| 602 |
-
"id": null,
|
| 603 |
"_type": "Value"
|
| 604 |
},
|
| 605 |
"sentence2": {
|
| 606 |
"dtype": "string",
|
| 607 |
-
"id": null,
|
| 608 |
"_type": "Value"
|
| 609 |
},
|
| 610 |
"label": {
|
| 611 |
-
"num_classes": 2,
|
| 612 |
"names": [
|
| 613 |
"not_entailment",
|
| 614 |
"entailment"
|
| 615 |
],
|
| 616 |
-
"names_file": null,
|
| 617 |
-
"id": null,
|
| 618 |
"_type": "ClassLabel"
|
| 619 |
},
|
| 620 |
"idx": {
|
| 621 |
"dtype": "int32",
|
| 622 |
-
"id": null,
|
| 623 |
"_type": "Value"
|
| 624 |
}
|
| 625 |
},
|
| 626 |
-
"post_processed": null,
|
| 627 |
-
"supervised_keys": null,
|
| 628 |
"builder_name": "glue",
|
|
|
|
| 629 |
"config_name": "wnli",
|
| 630 |
"version": {
|
| 631 |
"version_str": "1.0.0",
|
|
@@ -635,35 +628,28 @@
|
|
| 635 |
"patch": 0
|
| 636 |
},
|
| 637 |
"splits": {
|
| 638 |
-
"test": {
|
| 639 |
-
"name": "test",
|
| 640 |
-
"num_bytes": 37992,
|
| 641 |
-
"num_examples": 146,
|
| 642 |
-
"dataset_name": "glue"
|
| 643 |
-
},
|
| 644 |
"train": {
|
| 645 |
"name": "train",
|
| 646 |
-
"num_bytes":
|
| 647 |
"num_examples": 635,
|
| 648 |
-
"dataset_name":
|
| 649 |
},
|
| 650 |
"validation": {
|
| 651 |
"name": "validation",
|
| 652 |
-
"num_bytes":
|
| 653 |
"num_examples": 71,
|
| 654 |
-
"dataset_name":
|
| 655 |
-
}
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
|
| 659 |
-
"
|
| 660 |
-
"
|
| 661 |
}
|
| 662 |
},
|
| 663 |
-
"download_size":
|
| 664 |
-
"
|
| 665 |
-
"
|
| 666 |
-
"size_in_bytes": 186723
|
| 667 |
},
|
| 668 |
"ax": {
|
| 669 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
|
|
|
| 593 |
},
|
| 594 |
"wnli": {
|
| 595 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
| 596 |
+
"citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
|
| 597 |
"homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
|
| 598 |
"license": "",
|
| 599 |
"features": {
|
| 600 |
"sentence1": {
|
| 601 |
"dtype": "string",
|
|
|
|
| 602 |
"_type": "Value"
|
| 603 |
},
|
| 604 |
"sentence2": {
|
| 605 |
"dtype": "string",
|
|
|
|
| 606 |
"_type": "Value"
|
| 607 |
},
|
| 608 |
"label": {
|
|
|
|
| 609 |
"names": [
|
| 610 |
"not_entailment",
|
| 611 |
"entailment"
|
| 612 |
],
|
|
|
|
|
|
|
| 613 |
"_type": "ClassLabel"
|
| 614 |
},
|
| 615 |
"idx": {
|
| 616 |
"dtype": "int32",
|
|
|
|
| 617 |
"_type": "Value"
|
| 618 |
}
|
| 619 |
},
|
|
|
|
|
|
|
| 620 |
"builder_name": "glue",
|
| 621 |
+
"dataset_name": "glue",
|
| 622 |
"config_name": "wnli",
|
| 623 |
"version": {
|
| 624 |
"version_str": "1.0.0",
|
|
|
|
| 628 |
"patch": 0
|
| 629 |
},
|
| 630 |
"splits": {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 631 |
"train": {
|
| 632 |
"name": "train",
|
| 633 |
+
"num_bytes": 107109,
|
| 634 |
"num_examples": 635,
|
| 635 |
+
"dataset_name": null
|
| 636 |
},
|
| 637 |
"validation": {
|
| 638 |
"name": "validation",
|
| 639 |
+
"num_bytes": 12162,
|
| 640 |
"num_examples": 71,
|
| 641 |
+
"dataset_name": null
|
| 642 |
+
},
|
| 643 |
+
"test": {
|
| 644 |
+
"name": "test",
|
| 645 |
+
"num_bytes": 37889,
|
| 646 |
+
"num_examples": 146,
|
| 647 |
+
"dataset_name": null
|
| 648 |
}
|
| 649 |
},
|
| 650 |
+
"download_size": 63522,
|
| 651 |
+
"dataset_size": 157160,
|
| 652 |
+
"size_in_bytes": 220682
|
|
|
|
| 653 |
},
|
| 654 |
"ax": {
|
| 655 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
wnli/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:766d3754c46a80f3275cb81a32ee6b7b49176fa8c1ef85ea92a4a3676510b902
|
| 3 |
+
size 13620
|
wnli/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:40f4c0c60db68addeda8e9cbe25e6344cd99d5bbb80125535994a9a3141ee0a9
|
| 3 |
+
size 38835
|
wnli/validation-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:880037e45e03df868d5799ca21dc03f3a6378f0adf3c01c7bfc46b94fa61f1cb
|
| 3 |
+
size 11067
|