holylovenia commited on
Commit
65ec6e8
·
1 Parent(s): 3f44203

Upload id_am2ico.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. id_am2ico.py +184 -0
id_am2ico.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+ from nusacrowd.utils.constants import Tasks
25
+
26
+ _CITATION = """\
27
+ @inproceedings{liu-etal-2021-am2ico,
28
+ title = "{AM}2i{C}o: Evaluating Word Meaning in Context across Low-Resource Languages with Adversarial Examples",
29
+ author = "Liu, Qianchu and
30
+ Ponti, Edoardo Maria and
31
+ McCarthy, Diana and
32
+ Vuli{\'c}, Ivan and
33
+ Korhonen, Anna",
34
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
35
+ month = nov,
36
+ year = "2021",
37
+ address = "Online and Punta Cana, Dominican Republic",
38
+ publisher = "Association for Computational Linguistics",
39
+ url = "https://aclanthology.org/2021.emnlp-main.571",
40
+ doi = "10.18653/v1/2021.emnlp-main.571",
41
+ pages = "7151--7162",
42
+ abstract = "Capturing word meaning in context and distinguishing between correspondences and variations across languages is key to building successful multilingual and cross-lingual text representation models. However, existing multilingual evaluation datasets that evaluate lexical semantics {``}in-context{''} have various limitations. In particular, 1) their language coverage is restricted to high-resource languages and skewed in favor of only a few language families and areas, 2) a design that makes the task solvable via superficial cues, which results in artificially inflated (and sometimes super-human) performances of pretrained encoders, and 3) no support for cross-lingual evaluation. In order to address these gaps, we present AM2iCo (Adversarial and Multilingual Meaning in Context), a wide-coverage cross-lingual and multilingual evaluation set; it aims to faithfully assess the ability of state-of-the-art (SotA) representation models to understand the identity of word meaning in cross-lingual contexts for 14 language pairs. We conduct a series of experiments in a wide range of setups and demonstrate the challenging nature of AM2iCo. The results reveal that current SotA pretrained encoders substantially lag behind human performance, and the largest gaps are observed for low-resource languages and languages dissimilar to English.",
43
+ }
44
+ """
45
+
46
+ _LANGUAGES = ["ind", "eng"]
47
+ _LOCAL = False
48
+
49
+ _DATASETNAME = "id_am2ico"
50
+
51
+ _DESCRIPTION = """\
52
+ In this work, we present AM2iCo, a wide-coverage and carefully designed cross-lingual and multilingual evaluation set;
53
+ it aims to assess the ability of state-of-the-art representation models to reason over cross-lingual
54
+ lexical-level concept alignment in context for 14 language pairs.
55
+
56
+ This dataset only contain Indonesian - English language pair.
57
+ """
58
+
59
+ _HOMEPAGE = "https://github.com/cambridgeltl/AM2iCo"
60
+
61
+ _LICENSE = "CC-BY 4.0"
62
+
63
+ _URLS = {
64
+ _DATASETNAME: {
65
+ "train": "https://raw.githubusercontent.com/cambridgeltl/AM2iCo/master/data/id/train.tsv",
66
+ "dev": "https://raw.githubusercontent.com/cambridgeltl/AM2iCo/master/data/id/dev.tsv",
67
+ "test": "https://raw.githubusercontent.com/cambridgeltl/AM2iCo/master/data/id/test.tsv",
68
+ }
69
+ }
70
+
71
+ _SUPPORTED_TASKS = [Tasks.CONCEPT_ALIGNMENT_CLASSIFICATION]
72
+
73
+ _SOURCE_VERSION = "1.0.0"
74
+ _NUSANTARA_VERSION = "1.0.0"
75
+
76
+
77
+ class NewDataset(datasets.GeneratorBasedBuilder):
78
+ """IndoCollex: A Testbed for Morphological Transformation of Indonesian
79
+ Colloquial Words"""
80
+
81
+ label_classes = ["T", "F"]
82
+
83
+ BUILDER_CONFIGS = (
84
+ NusantaraConfig(
85
+ name="id_am2ico_source",
86
+ version=_SOURCE_VERSION,
87
+ description="Indonesia am2ico source schema",
88
+ schema="source",
89
+ subset_id="id_am2ico",
90
+ ),
91
+ NusantaraConfig(
92
+ name="id_am2ico_nusantara_pairs",
93
+ version=_NUSANTARA_VERSION,
94
+ description="Indonesia am2ico Nusantara schema",
95
+ schema="nusantara_pairs",
96
+ subset_id="id_am2ico",
97
+ ),
98
+ )
99
+
100
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
101
+
102
+ def _info(self) -> datasets.DatasetInfo:
103
+
104
+ if self.config.schema == "source":
105
+ features = datasets.Features(
106
+ {
107
+ "no": datasets.Value("string"),
108
+ "context1": datasets.Value("string"),
109
+ "context2": datasets.Value("string"),
110
+ "label": datasets.Value("string"),
111
+ }
112
+ )
113
+
114
+ elif self.config.schema == "nusantara_pairs":
115
+ features = schemas.pairs_features(self.label_classes)
116
+
117
+ else:
118
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
119
+
120
+ return datasets.DatasetInfo(
121
+ description=_DESCRIPTION,
122
+ features=features,
123
+ homepage=_HOMEPAGE,
124
+ license=_LICENSE,
125
+ citation=_CITATION,
126
+ )
127
+
128
+ def _split_generators(
129
+ self, dl_manager: datasets.DownloadManager
130
+ ) -> List[datasets.SplitGenerator]:
131
+ """Returns SplitGenerators."""
132
+
133
+ urls = _URLS[self.config.subset_id]
134
+
135
+ data_paths = dl_manager.download(urls)
136
+
137
+ ret = [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ gen_kwargs={"filepath": data_paths["train"]},
141
+ )
142
+ ]
143
+
144
+ if len(data_paths) > 1:
145
+ ret.extend(
146
+ [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TEST,
149
+ gen_kwargs={"filepath": data_paths["test"]},
150
+ ),
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.VALIDATION,
153
+ gen_kwargs={"filepath": data_paths["dev"]},
154
+ ),
155
+ ]
156
+ )
157
+
158
+ return ret
159
+
160
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
161
+ """Yields examples as (key, example) tuples."""
162
+
163
+ # Read tsv (separated by tab).
164
+ df = pd.read_csv(filepath, sep="\t")
165
+
166
+ if self.config.schema == "source":
167
+ for row in df.itertuples():
168
+ ex = {
169
+ "no": str(row.Index),
170
+ "context1": str(row.context1).rstrip(),
171
+ "context2": str(row.context2).rstrip(),
172
+ "label": str(row.label).rstrip(),
173
+ }
174
+ yield row.Index, ex
175
+
176
+ elif self.config.schema == "nusantara_pairs":
177
+ for row in df.itertuples():
178
+ ex = {
179
+ "id": str(row.Index),
180
+ "text_1": str(row.context1).rstrip(),
181
+ "text_2": str(row.context2).rstrip(),
182
+ "label": str(row.label).rstrip(),
183
+ }
184
+ yield row.Index, ex