File size: 7,046 Bytes
3e8d3c5 7d5b162 114bacb 3e8d3c5 114bacb 3e8d3c5 114bacb 3e8d3c5 0ddddcd 3e8d3c5 0ddddcd 3e8d3c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
{
"overview": {
"where": {
"has-leaderboard": "no",
"leaderboard-url": "N/A",
"leaderboard-description": "N/A",
"website": "[Website](http://abductivecommonsense.xyz/)",
"data-url": "[Google Storage](https://storage.googleapis.com/ai2-mosaic/public/abductive-commonsense-reasoning-iclr2020/anlg.zip)",
"paper-url": "[OpenReview](https://openreview.net/pdf?id=Byg1v1HKDB)",
"paper-bibtext": "```\n@inproceedings{\nBhagavatula2020Abductive,\ntitle={Abductive Commonsense Reasoning},\nauthor={Chandra Bhagavatula and Ronan Le Bras and Chaitanya Malaviya and Keisuke Sakaguchi and Ari Holtzman and Hannah Rashkin and Doug Downey and Wen-tau Yih and Yejin Choi},\nbooktitle={International Conference on Learning Representations},\nyear={2020},\nurl={https://openreview.net/forum?id=Byg1v1HKDB}\n}\n```",
"contact-name": "Chandra Bhagavatulla",
"contact-email": "[email protected]"
},
"languages": {
"is-multilingual": "no",
"license": "apache-2.0: Apache License 2.0",
"task-other": "N/A",
"language-names": [
"English"
],
"language-speakers": "Crowdworkers on the Amazon Mechanical Turk platform based in the U.S, Canada, U.K and Australia. ",
"intended-use": "To study the viability of language-based abductive reasoning. Training and evaluating models to generate a plausible hypothesis to explain two given observations.",
"license-other": "N/A",
"task": "Reasoning"
},
"credit": {
"organization-type": [
"industry"
],
"organization-names": "Allen Institute for AI",
"creators": "Chandra Bhagavatula (AI2), Ronan Le Bras (AI2), Chaitanya Malaviya (AI2), Keisuke Sakaguchi (AI2), Ari Holtzman (AI2, UW), Hannah Rashkin (AI2, UW), Doug Downey (AI2), Wen-tau Yih (AI2), Yejin Choi (AI2, UW)",
"funding": "Allen Institute for AI",
"gem-added-by": "Chandra Bhagavatula (AI2), Ronan LeBras (AI2), Aman Madaan (CMU), Nico Daheim (RWTH Aachen University)"
},
"structure": {
"data-fields": "- `observation_1`: A string describing an observation / event.\n- `observation_2`: A string describing an observation / event.\n- `label`: A string that plausibly explains why observation_1 and observation_2 might have happened.",
"structure-labels": "Explanations were authored by crowdworkers on the Amazon Mechanical Turk platform using a custom template designed by the creators of the dataset.",
"structure-example": "```\n{\n'gem_id': 'GEM-ART-validation-0',\n'observation_1': 'Stephen was at a party.',\n'observation_2': 'He checked it but it was completely broken.',\n'label': 'Stephen knocked over a vase while drunk.'\n}\n```",
"structure-splits": "- `train`: Consists of training instances. \n- `dev`: Consists of dev instances.\n- `test`: Consists of test instances.\n"
},
"what": {
"dataset": "Abductive reasoning is inference to the most plausible explanation. For example, if Jenny finds her house in a mess when she returns from work, and remembers that she left a window open, she can hypothesize that a thief broke into her house and caused the mess, as the most plausible explanation.\nThis data loader focuses on abductive NLG: a conditional English generation task for explaining given observations in natural language. "
}
},
"gem": {
"rationale": {
"contribution": "Abductive reasoning is a crucial capability of humans and ART is the first dataset curated to study language-based abductive reasoning.",
"sole-task-dataset": "no",
"distinction-description": "N/A",
"model-ability": "Whether models can reason abductively about a given pair of observations."
},
"curation": {
"has-additional-curation": "no",
"modification-types": [],
"modification-description": "N/A",
"has-additional-splits": "no",
"additional-splits-description": "N/A",
"additional-splits-capacicites": "N/A"
},
"starting": {
"research-pointers": "- [Paper](https://arxiv.org/abs/1908.05739)\n- [Code](https://github.com/allenai/abductive-commonsense-reasoning)"
}
},
"results": {
"results": {
"model-abilities": "Whether models can reason abductively about a given pair of observations.",
"metrics": [
"BLEU",
"BERT-Score",
"ROUGE"
],
"other-metrics-definitions": "N/A",
"has-previous-results": "no",
"current-evaluation": "N/A",
"previous-results": "N/A"
}
},
"curation": {
"original": {
"is-aggregated": "no",
"aggregated-sources": "N/A"
},
"language": {
"obtained": [
"Crowdsourced"
],
"found": [],
"crowdsourced": [
"Amazon Mechanical Turk"
],
"created": "N/A",
"machine-generated": "N/A",
"producers-description": "Language producers were English speakers in U.S., Canada, U.K and Australia.",
"topics": "No",
"validated": "validated by crowdworker",
"pre-processed": "N/A",
"is-filtered": "algorithmically",
"filtered-criteria": "Adversarial filtering algorithm as described in the [paper](https://arxiv.org/abs/1908.05739)"
},
"annotations": {
"origin": "automatically created",
"rater-number": "N/A",
"rater-qualifications": "N/A",
"rater-training-num": "N/A",
"rater-test-num": "N/A",
"rater-annotation-service-bool": "no",
"rater-annotation-service": [],
"values": "Each observation is associated with a list of COMET (https://arxiv.org/abs/1906.05317) inferences.",
"quality-control": "none",
"quality-control-details": "N/A"
},
"consent": {
"has-consent": "no",
"consent-policy": "N/A",
"consent-other": "N/A"
},
"pii": {
"has-pii": "no PII",
"no-pii-justification": "The dataset contains day-to-day events. It does not contain names, emails, addresses etc. ",
"pii-categories": [],
"is-pii-identified": "N/A",
"pii-identified-method": "N/A",
"is-pii-replaced": "N/A",
"pii-replaced-method": "N/A"
},
"maintenance": {
"has-maintenance": "no",
"description": "N/A",
"contact": "N/A",
"contestation-mechanism": "N/A",
"contestation-link": "N/A",
"contestation-description": "N/A"
}
},
"context": {
"previous": {
"is-deployed": "no",
"described-risks": "N/A",
"changes-from-observation": "N/A"
},
"underserved": {
"helps-underserved": "no",
"underserved-description": "N/A"
},
"biases": {
"has-biases": "no",
"bias-analyses": "N/A"
}
},
"considerations": {
"pii": {
"risks-description": "None"
},
"licenses": {
"dataset-restrictions": [
"public domain"
],
"dataset-restrictions-other": "N/A",
"data-copyright": [
"public domain"
],
"data-copyright-other": "N/A"
},
"limitations": {}
}
} |