File size: 6,949 Bytes
f31bdc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d41061f
 
 
 
 
f31bdc3
dc0c501
f31bdc3
 
dc0c501
 
 
f31bdc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc0c501
 
f31bdc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
"""This module defines a HuggingFace dataset builder for the QT30 dataset used in the DialAM-2024
shared task. See http://dialam.arg.tech/ for more information about the DialAM-2024 shared task.

Unfortunately, there are some nodesets that are not suitable for conversion to documents. These nodesets are
excluded from the dataset. The following nodesets are excluded:
- excluded by the organizers (23): 24255, 24807, 24808, 24809, 24903, 24905, 24992, 25045, 25441, 25442,
    25443, 25444, 25445, 25452, 25461, 25462, 25463, 25465, 25468, 25472, 25473, 25474, 25475
- excluded because of warning (6): "Could not align I-node (dummy-L-node was selected)": 21083, 18888,
    23701, 18484, 17938, 19319
- excluded because of error "could not determine direction of RA-nodes ... because there is no TA
    relation between any combination of anchoring I-nodes!" (26): 25411, 25510, 25516, 25901, 25902,
    25904, 25906, 25907, 25936, 25937, 25938, 25940, 26066, 26067, 26068, 26087, 17964, 18459, 19091,
    19146, 19149, 19757, 19761, 19908, 21449, 23749
- excluded because of error "S-node arguments are not unique!" (7): 25552, 19165, 22969, 21342, 25400,
    21681, 23710
- excluded because of error "direction of RA-node 587841 is ambiguous!" (16): 19059, 19217, 19878, 20479,
    20507, 20510, 20766, 20844, 20888, 20992, 21401, 21477, 21588, 23114, 23766, 23891
- excluded because of error "I-node texts are not unique!" (1): 19911
- still problematic (19): 19897, 18321, 18877, 18874, 19174, 23552, 23799, 23517, 20729, 25691, 21023,
    23144, 23120, 23560, 23892, 23959, 19173, 19918, 25511
"""
import glob
import json
import logging
import os

import datasets
from datasets import Features, GeneratorBasedBuilder

logger = logging.getLogger(__name__)

DATA_URL = "http://dialam.arg.tech/res/files/dataset.zip"
SUBDIR = "dataset"
NODESET_BLACKLIST = [
    "24255",
    "24807",
    "24808",
    "24809",
    "24903",
    "24905",
    "24992",
    "25045",
    "25441",
    "25442",
    "25443",
    "25444",
    "25445",
    "25452",
    "25461",
    "25462",
    "25463",
    "25465",
    "25468",
    "25472",
    "25473",
    "25474",
    "25475",
    "21083",
    "18888",
    "23701",
    "18484",
    "17938",
    "19319",
    "25411",
    "25510",
    "25516",
    "25901",
    "25902",
    "25904",
    "25906",
    "25907",
    "25936",
    "25937",
    "25938",
    "25940",
    "26066",
    "26067",
    "26068",
    "26087",
    "17964",
    "18459",
    "19091",
    "19146",
    "19149",
    "19757",
    "19761",
    "19908",
    "21449",
    "23749",
    "25552",
    "19165",
    "22969",
    "21342",
    "25400",
    "21681",
    "23710",
    "19059",
    "19217",
    "19878",
    "20479",
    "20507",
    "20510",
    "20766",
    "20844",
    "20888",
    "20992",
    "21401",
    "21477",
    "21588",
    "23114",
    "23766",
    "23891",
    "19911",
    "19897",
    "18321",
    "18877",
    "18874",
    "19174",
    "23552",
    "23799",
    "23517",
    "20729",
    "25691",
    "21023",
    "23144",
    "23120",
    "23560",
    "23892",
    "23959",
    "19173",
    "19918",
    "25511",
]


def is_blacklisted(nodeset_filename: str) -> bool:
    nodeset_id = get_node_id_from_filename(nodeset_filename)
    return nodeset_id in NODESET_BLACKLIST


def get_node_id_from_filename(filename: str) -> str:
    """Get the ID of a nodeset from a filename."""

    return filename.split("nodeset")[1].split(".json")[0]


class DialAM2024(GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="dialam_2024",
            version=datasets.Version("1.0.0"),
            description="DialAM-2024 dataset",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            features=Features(
                {
                    "id": datasets.Value("string"),
                    "nodes": datasets.Sequence(
                        {
                            "nodeID": datasets.Value("string"),
                            "text": datasets.Value("string"),
                            "type": datasets.Value("string"),
                            "timestamp": datasets.Value("string"),
                            # Since optional fields are not supported in HuggingFace datasets, we exclude the
                            # scheme and schemeID fields from the dataset. Note that the scheme field has the
                            # same value as the text field where it is present.
                            # "scheme": datasets.Value("string"),
                            # "schemeID": datasets.Value("string"),
                        }
                    ),
                    "edges": datasets.Sequence(
                        {
                            "edgeID": datasets.Value("string"),
                            "fromID": datasets.Value("string"),
                            "toID": datasets.Value("string"),
                            "formEdgeID": datasets.Value("string"),
                        }
                    ),
                    "locutions": datasets.Sequence(
                        {
                            "nodeID": datasets.Value("string"),
                            "personID": datasets.Value("string"),
                            "timestamp": datasets.Value("string"),
                            "start": datasets.Value("string"),
                            "end": datasets.Value("string"),
                            "source": datasets.Value("string"),
                        }
                    ),
                }
            )
        )

    def _split_generators(self, dl_manager):
        """We handle string, list and dicts in datafiles."""
        if dl_manager.manual_dir is None:
            data_dir = os.path.join(dl_manager.download_and_extract(DATA_URL), SUBDIR)
        else:
            # make absolute path of the manual_dir
            data_dir = os.path.abspath(dl_manager.manual_dir)
        # collect all json files in the data_dir with glob
        file_names = glob.glob(os.path.join(data_dir, "*.json"))
        # filter out blacklisted nodesets and sort to get deterministic order
        file_names_filtered = sorted([fn for fn in file_names if not is_blacklisted(fn)])
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"file_names": file_names_filtered},
            )
        ]

    def _generate_examples(self, file_names):
        idx = 0
        for file_name in file_names:
            with open(file_name, encoding="utf-8", errors=None) as f:
                data = json.load(f)
            data["id"] = get_node_id_from_filename(file_name)
            # delete optional node fields: scheme, schemeID
            for node in data["nodes"]:
                node.pop("scheme", None)
                node.pop("schemeID", None)

            yield idx, data
            idx += 1