Datasets:
cjvt
/

Modalities:
Text
Languages:
Slovenian
Libraries:
Datasets
License:
File size: 5,964 Bytes
3b1ddfa
 
 
 
c3ccd8e
3b1ddfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3ccd8e
 
 
 
 
 
 
 
 
 
3b1ddfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea77161
c3ccd8e
 
 
 
 
 
 
 
 
 
 
 
ea77161
c3ccd8e
3b1ddfa
 
 
 
 
ea77161
3b1ddfa
 
 
 
 
 
ea77161
3b1ddfa
 
 
 
 
c3ccd8e
ea77161
3b1ddfa
 
 
 
 
ea77161
 
 
3b1ddfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
"""SI-NLI is a Slovene natural language inference dataset."""


import csv
import logging
import os

import datasets


_CITATION = """\
@misc{sinli,
    title = {Slovene Natural Language Inference Dataset {SI}-{NLI}},
    author = {Klemen, Matej and {\v Z}agar, Ale{\v s} and {\v C}ibej, Jaka and Robnik-{\v S}ikonja, Marko},
    url = {http://hdl.handle.net/11356/1707},
    note = {Slovenian language resource repository {CLARIN}.{SI}},
    year = {2022}
}
"""

_DESCRIPTION = """\
SI-NLI (Slovene Natural Language Inference Dataset) contains 5,937 human-created Slovene sentence pairs 
(premise and hypothesis) that are manually labeled with the labels "entailment", "contradiction", and "neutral". 
The dataset was created using sentences that appear in the Slovenian reference corpus ccKres. 
Annotators were tasked to modify the hypothesis in a candidate pair in a way that reflects one of the labels. 
The dataset is balanced since the annotators created three modifications (entailment, contradiction, neutral) 
for each candidate sentence pair.
"""

_HOMEPAGE = "http://hdl.handle.net/11356/1707"

_LICENSE = "Creative Commons - Attribution 4.0 International (CC BY 4.0)"

_URLS = {
    "si-nli": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1707/SI-NLI.zip"
}

NA_STR = ""
UNIFIED_LABELS = {"E": "entailment", "N": "neutral", "C": "contradiction"}


class SINLI(datasets.GeneratorBasedBuilder):
    """SI-NLI is a Slovene natural language inference dataset."""

    VERSION = datasets.Version("1.0.1")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="public", version=VERSION,
                               description="Load the publicly available dataset (without test labels)."),
        datasets.BuilderConfig(name="private", version=VERSION,
                               description="Load the privately available dataset by manuallly providing the path to the data."),
    ]

    DEFAULT_CONFIG_NAME = "public"

    def _info(self):
        features = datasets.Features({
            "pair_id": datasets.Value("string"),
            "premise": datasets.Value("string"),
            "hypothesis": datasets.Value("string"),
            "annotation1": datasets.Value("string"),
            "annotator1_id": datasets.Value("string"),
            "annotation2": datasets.Value("string"),
            "annotator2_id": datasets.Value("string"),
            "annotation3": datasets.Value("string"),
            "annotator3_id": datasets.Value("string"),
            "annotation_final": datasets.Value("string"),
            "label": datasets.Value("string")
        })

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION
        )

    def _split_generators(self, dl_manager):
        split_prefix = ""
        if self.config.name == "public":
            urls = _URLS["si-nli"]
            data_dir = dl_manager.download_and_extract(urls)
        else:
            # `data_dir` must have the map SI-NLI inside and train.tsv, dev.tsv, test.tsv
            if dl_manager.manual_dir is None or not os.path.exists(dl_manager.manual_dir):
                logging.warning("data_dir does not point to a valid directory")

            # Allow user to specify path to the private data directory: `load_dataset(..., data_dir=...)`
            data_dir = dl_manager.manual_dir

            if data_dir is None:
                split_prefix = "dummy_"

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "file_path": os.path.join(data_dir, "SI-NLI", "train.tsv"),
                    "split": f"{split_prefix}train"
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "file_path": os.path.join(data_dir, "SI-NLI", "dev.tsv"),
                    "split": f"{split_prefix}dev"
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "file_path": os.path.join(data_dir, "SI-NLI", "test.tsv"),
                    "split": f"{split_prefix}test"
                }
            )
        ]

    def _generate_examples(self, file_path, split):
        if split.startswith("dummy"):
            return None

        with open(file_path, encoding="utf-8") as f:
            reader = csv.reader(f, delimiter="\t", quotechar='"')
            header = next(reader)

            for i, row in enumerate(reader):
                pair_id = annotation1 = annotator1_id = annotation2 = annotator2_id = annotation3 = annotator3_id = \
                    annotation_final = label = NA_STR

                # Public test set only contains the premise and the hypothesis
                if len(row) == 2:
                    premise, hypothesis = row
                # Public train/validation set and private test set contain additional annotation data
                else:
                    pair_id, premise, hypothesis, annotation1, _, annotator1_id, annotation2, _, annotator2_id, \
                    annotation3, _, annotator3_id, annotation_final, label = row

                yield i, {
                    "pair_id": pair_id,
                    "premise": premise, "hypothesis": hypothesis,
                    "annotation1": UNIFIED_LABELS.get(annotation1, annotation1), "annotator1_id": annotator1_id,
                    "annotation2": UNIFIED_LABELS.get(annotation2, annotation2), "annotator2_id": annotator2_id,
                    "annotation3": UNIFIED_LABELS.get(annotation3, annotation3), "annotator3_id": annotator3_id,
                    "annotation_final": UNIFIED_LABELS.get(annotation_final, annotation_final),
                    "label": label
                }