upload the next config
Browse files
README.md
CHANGED
|
@@ -73,6 +73,32 @@ dataset_info:
|
|
| 73 |
num_examples: 131
|
| 74 |
download_size: 139737042
|
| 75 |
dataset_size: 148730932.64
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
- config_name: matching
|
| 77 |
features:
|
| 78 |
- name: image
|
|
@@ -214,6 +240,14 @@ configs:
|
|
| 214 |
path: explanation/validation-*
|
| 215 |
- split: test
|
| 216 |
path: explanation/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
- config_name: matching
|
| 218 |
data_files:
|
| 219 |
- split: train
|
|
|
|
| 73 |
num_examples: 131
|
| 74 |
download_size: 139737042
|
| 75 |
dataset_size: 148730932.64
|
| 76 |
+
- config_name: explanation_from_pixels
|
| 77 |
+
features:
|
| 78 |
+
- name: image
|
| 79 |
+
dtype: image
|
| 80 |
+
- name: contest_number
|
| 81 |
+
dtype: int32
|
| 82 |
+
- name: caption_choices
|
| 83 |
+
dtype: string
|
| 84 |
+
- name: label
|
| 85 |
+
dtype: string
|
| 86 |
+
- name: n_tokens_label
|
| 87 |
+
dtype: int32
|
| 88 |
+
- name: instance_id
|
| 89 |
+
dtype: string
|
| 90 |
+
splits:
|
| 91 |
+
- name: train
|
| 92 |
+
num_bytes: 23039316.0
|
| 93 |
+
num_examples: 390
|
| 94 |
+
- name: validation
|
| 95 |
+
num_bytes: 7956182.0
|
| 96 |
+
num_examples: 130
|
| 97 |
+
- name: test
|
| 98 |
+
num_bytes: 6778892.0
|
| 99 |
+
num_examples: 131
|
| 100 |
+
download_size: 37552582
|
| 101 |
+
dataset_size: 37774390.0
|
| 102 |
- config_name: matching
|
| 103 |
features:
|
| 104 |
- name: image
|
|
|
|
| 240 |
path: explanation/validation-*
|
| 241 |
- split: test
|
| 242 |
path: explanation/test-*
|
| 243 |
+
- config_name: explanation_from_pixels
|
| 244 |
+
data_files:
|
| 245 |
+
- split: train
|
| 246 |
+
path: explanation_from_pixels/train-*
|
| 247 |
+
- split: validation
|
| 248 |
+
path: explanation_from_pixels/validation-*
|
| 249 |
+
- split: test
|
| 250 |
+
path: explanation_from_pixels/test-*
|
| 251 |
- config_name: matching
|
| 252 |
data_files:
|
| 253 |
- split: train
|
dataset_infos.json
CHANGED
|
@@ -431,44 +431,34 @@
|
|
| 431 |
"license": "",
|
| 432 |
"features": {
|
| 433 |
"image": {
|
| 434 |
-
"decode": true,
|
| 435 |
-
"id": null,
|
| 436 |
"_type": "Image"
|
| 437 |
},
|
| 438 |
"contest_number": {
|
| 439 |
"dtype": "int32",
|
| 440 |
-
"id": null,
|
| 441 |
"_type": "Value"
|
| 442 |
},
|
| 443 |
"caption_choices": {
|
| 444 |
"dtype": "string",
|
| 445 |
-
"id": null,
|
| 446 |
"_type": "Value"
|
| 447 |
},
|
| 448 |
"label": {
|
| 449 |
"dtype": "string",
|
| 450 |
-
"id": null,
|
| 451 |
"_type": "Value"
|
| 452 |
},
|
| 453 |
"n_tokens_label": {
|
| 454 |
"dtype": "int32",
|
| 455 |
-
"id": null,
|
| 456 |
"_type": "Value"
|
| 457 |
},
|
| 458 |
"instance_id": {
|
| 459 |
"dtype": "string",
|
| 460 |
-
"id": null,
|
| 461 |
"_type": "Value"
|
| 462 |
}
|
| 463 |
},
|
| 464 |
-
"post_processed": null,
|
| 465 |
-
"supervised_keys": null,
|
| 466 |
-
"task_templates": null,
|
| 467 |
"builder_name": "newyorker_caption_contest",
|
|
|
|
| 468 |
"config_name": "explanation_from_pixels",
|
| 469 |
"version": {
|
| 470 |
"version_str": "1.0.0",
|
| 471 |
-
"description": null,
|
| 472 |
"major": 1,
|
| 473 |
"minor": 0,
|
| 474 |
"patch": 0
|
|
@@ -476,37 +466,26 @@
|
|
| 476 |
"splits": {
|
| 477 |
"train": {
|
| 478 |
"name": "train",
|
| 479 |
-
"num_bytes":
|
| 480 |
"num_examples": 390,
|
| 481 |
-
"dataset_name":
|
| 482 |
},
|
| 483 |
"validation": {
|
| 484 |
"name": "validation",
|
| 485 |
-
"num_bytes":
|
| 486 |
"num_examples": 130,
|
| 487 |
-
"dataset_name":
|
| 488 |
},
|
| 489 |
"test": {
|
| 490 |
"name": "test",
|
| 491 |
-
"num_bytes":
|
| 492 |
"num_examples": 131,
|
| 493 |
-
"dataset_name":
|
| 494 |
-
}
|
| 495 |
-
},
|
| 496 |
-
"download_checksums": {
|
| 497 |
-
"https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/explanation_from_pixels.zip": {
|
| 498 |
-
"num_bytes": 122918,
|
| 499 |
-
"checksum": "1c9c3a65bd255c7bccfbf394b266c781bb3762b4a318c5cff8f3da0bfeea75a7"
|
| 500 |
-
},
|
| 501 |
-
"https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/all_contest_images.zip": {
|
| 502 |
-
"num_bytes": 39523209,
|
| 503 |
-
"checksum": "741527b4ef7198d16cee42ae74eacbe239bcc7377f8b86811c27d627fdc77748"
|
| 504 |
}
|
| 505 |
},
|
| 506 |
-
"download_size":
|
| 507 |
-
"
|
| 508 |
-
"
|
| 509 |
-
"size_in_bytes": 40036689
|
| 510 |
},
|
| 511 |
"matching_1": {
|
| 512 |
"description": "There are 3 caption contest tasks, described in the paper. In the Matching multiple choice task, models must recognize a caption written about a cartoon (vs. options that were not). In the Quality Ranking task, models must evaluate the quality\nof that caption by scoring it more highly than a lower quality option from the same contest. In the Explanation Generation task, models must explain why the joke is funny.\nYou are given a cartoon and 5 captions. Only one of the captions was truly written about the cartoon. You must select it.\n",
|
|
|
|
| 431 |
"license": "",
|
| 432 |
"features": {
|
| 433 |
"image": {
|
|
|
|
|
|
|
| 434 |
"_type": "Image"
|
| 435 |
},
|
| 436 |
"contest_number": {
|
| 437 |
"dtype": "int32",
|
|
|
|
| 438 |
"_type": "Value"
|
| 439 |
},
|
| 440 |
"caption_choices": {
|
| 441 |
"dtype": "string",
|
|
|
|
| 442 |
"_type": "Value"
|
| 443 |
},
|
| 444 |
"label": {
|
| 445 |
"dtype": "string",
|
|
|
|
| 446 |
"_type": "Value"
|
| 447 |
},
|
| 448 |
"n_tokens_label": {
|
| 449 |
"dtype": "int32",
|
|
|
|
| 450 |
"_type": "Value"
|
| 451 |
},
|
| 452 |
"instance_id": {
|
| 453 |
"dtype": "string",
|
|
|
|
| 454 |
"_type": "Value"
|
| 455 |
}
|
| 456 |
},
|
|
|
|
|
|
|
|
|
|
| 457 |
"builder_name": "newyorker_caption_contest",
|
| 458 |
+
"dataset_name": "newyorker_caption_contest",
|
| 459 |
"config_name": "explanation_from_pixels",
|
| 460 |
"version": {
|
| 461 |
"version_str": "1.0.0",
|
|
|
|
| 462 |
"major": 1,
|
| 463 |
"minor": 0,
|
| 464 |
"patch": 0
|
|
|
|
| 466 |
"splits": {
|
| 467 |
"train": {
|
| 468 |
"name": "train",
|
| 469 |
+
"num_bytes": 23039316.0,
|
| 470 |
"num_examples": 390,
|
| 471 |
+
"dataset_name": null
|
| 472 |
},
|
| 473 |
"validation": {
|
| 474 |
"name": "validation",
|
| 475 |
+
"num_bytes": 7956182.0,
|
| 476 |
"num_examples": 130,
|
| 477 |
+
"dataset_name": null
|
| 478 |
},
|
| 479 |
"test": {
|
| 480 |
"name": "test",
|
| 481 |
+
"num_bytes": 6778892.0,
|
| 482 |
"num_examples": 131,
|
| 483 |
+
"dataset_name": null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 484 |
}
|
| 485 |
},
|
| 486 |
+
"download_size": 37552582,
|
| 487 |
+
"dataset_size": 37774390.0,
|
| 488 |
+
"size_in_bytes": 75326972.0
|
|
|
|
| 489 |
},
|
| 490 |
"matching_1": {
|
| 491 |
"description": "There are 3 caption contest tasks, described in the paper. In the Matching multiple choice task, models must recognize a caption written about a cartoon (vs. options that were not). In the Quality Ranking task, models must evaluate the quality\nof that caption by scoring it more highly than a lower quality option from the same contest. In the Explanation Generation task, models must explain why the joke is funny.\nYou are given a cartoon and 5 captions. Only one of the captions was truly written about the cartoon. You must select it.\n",
|
explanation_from_pixels/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dd9fce4d99361e32141693c31c69bbdf24c8586a5bf20032edf5e6ccbc5261b5
|
| 3 |
+
size 6737755
|
explanation_from_pixels/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:105a2658831acb69a14ef5d6a12f2d6287f83782f8359fef874dc6509a428440
|
| 3 |
+
size 22913730
|
explanation_from_pixels/validation-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6911a6a888896cee33da996659f5fb108af079ca77927e2d79120c5269fb8587
|
| 3 |
+
size 7901097
|