Matej Klemen
commited on
Commit
·
b630229
1
Parent(s):
28e4327
Add first version of Sopomenke 1.0
Browse files- dataset_infos.json +1 -0
- slo_thesaurus.py +127 -0
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"default": {"description": "This is an automatically created Slovene thesaurus from Slovene data available in a comprehensive \nEnglish\u2013Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary \nword co-occurrence graph was used, together with additional information from the distributional thesaurus data \navailable as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the \nmonolingual dictionary.\n", "citation": "@article{krek2017translation,\n title={From translation equivalents to synonyms: creation of a Slovene thesaurus using word co-occurrence network analysis},\n author={Krek, Simon and Laskowski, Cyprian and Robnik-{\u000b{S}}ikonja, Marko},\n journal={Proceedings of eLex},\n pages={93--109},\n year={2017}\n}\n", "homepage": "http://hdl.handle.net/11356/1166", "license": "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"id_headword": {"dtype": "string", "id": null, "_type": "Value"}, "headword": {"dtype": "string", "id": null, "_type": "Value"}, "groups_core": [{"id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "scores": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}], "groups_near": [{"id_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "scores": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "slo_thesaurus", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18723255, "num_examples": 105473, "dataset_name": "slo_thesaurus"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1166/CJVT_Thesaurus-v1.0.zip": {"num_bytes": 5680952, "checksum": "3432cef5f272ff5c33a7984fdede34d53cc5dbf1b075e45011946a7b7f244c4d"}}, "download_size": 5680952, "post_processing_size": null, "dataset_size": 18723255, "size_in_bytes": 24404207}}
|
slo_thesaurus.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" An automatically created Slovene thesaurus. """
|
2 |
+
import logging
|
3 |
+
import xml.etree.ElementTree as ET
|
4 |
+
import os
|
5 |
+
|
6 |
+
import datasets
|
7 |
+
|
8 |
+
|
9 |
+
_CITATION = """\
|
10 |
+
@article{krek2017translation,
|
11 |
+
title={From translation equivalents to synonyms: creation of a Slovene thesaurus using word co-occurrence network analysis},
|
12 |
+
author={Krek, Simon and Laskowski, Cyprian and Robnik-{\v{S}}ikonja, Marko},
|
13 |
+
journal={Proceedings of eLex},
|
14 |
+
pages={93--109},
|
15 |
+
year={2017}
|
16 |
+
}
|
17 |
+
"""
|
18 |
+
|
19 |
+
_DESCRIPTION = """\
|
20 |
+
This is an automatically created Slovene thesaurus from Slovene data available in a comprehensive
|
21 |
+
English–Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary
|
22 |
+
word co-occurrence graph was used, together with additional information from the distributional thesaurus data
|
23 |
+
available as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the
|
24 |
+
monolingual dictionary.
|
25 |
+
"""
|
26 |
+
|
27 |
+
_HOMEPAGE = "http://hdl.handle.net/11356/1166"
|
28 |
+
|
29 |
+
_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
|
30 |
+
|
31 |
+
_URLS = {
|
32 |
+
"slo_thesaurus": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1166/CJVT_Thesaurus-v1.0.zip",
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
class SloThesaurus(datasets.GeneratorBasedBuilder):
|
37 |
+
"""An automatically created Slovene thesaurus."""
|
38 |
+
|
39 |
+
VERSION = datasets.Version("1.0.0")
|
40 |
+
|
41 |
+
def _info(self):
|
42 |
+
features = datasets.Features(
|
43 |
+
{
|
44 |
+
"id_headword": datasets.Value("string"),
|
45 |
+
"headword": datasets.Value("string"),
|
46 |
+
"groups_core": [
|
47 |
+
{
|
48 |
+
"id_words": datasets.Sequence(datasets.Value("string")),
|
49 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
50 |
+
"scores": datasets.Sequence(datasets.Value("float32"))
|
51 |
+
}
|
52 |
+
],
|
53 |
+
"groups_near": [
|
54 |
+
{
|
55 |
+
"id_words": datasets.Sequence(datasets.Value("string")),
|
56 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
57 |
+
"scores": datasets.Sequence(datasets.Value("float32"))
|
58 |
+
}
|
59 |
+
]
|
60 |
+
}
|
61 |
+
)
|
62 |
+
|
63 |
+
return datasets.DatasetInfo(
|
64 |
+
description=_DESCRIPTION,
|
65 |
+
features=features,
|
66 |
+
homepage=_HOMEPAGE,
|
67 |
+
license=_LICENSE,
|
68 |
+
citation=_CITATION,
|
69 |
+
)
|
70 |
+
|
71 |
+
def _split_generators(self, dl_manager):
|
72 |
+
urls = _URLS["slo_thesaurus"]
|
73 |
+
data_dir = dl_manager.download_and_extract(urls)
|
74 |
+
return [
|
75 |
+
datasets.SplitGenerator(
|
76 |
+
name=datasets.Split.TRAIN,
|
77 |
+
gen_kwargs={"file_path": os.path.join(data_dir, "CJVT_Thesaurus-v1.0.xml")}
|
78 |
+
)
|
79 |
+
]
|
80 |
+
|
81 |
+
def _generate_examples(self, file_path):
|
82 |
+
curr_doc = ET.parse(file_path)
|
83 |
+
root = curr_doc.getroot()
|
84 |
+
|
85 |
+
for idx_entry, curr_entry in enumerate(root.iterfind(f".//entry")):
|
86 |
+
head_word = curr_entry.find("headword")
|
87 |
+
if head_word is None:
|
88 |
+
logging.warning("<headword> is missing for an entry, which should likely not happen. "
|
89 |
+
"Please open an issue on the dataset repository if you are seeing this.")
|
90 |
+
head_word = {"text": "UNK_headword", "id": "NA_id"}
|
91 |
+
else:
|
92 |
+
head_word = {"text": head_word.text.strip(), "id": head_word.attrib["id"]}
|
93 |
+
|
94 |
+
all_core_groups = []
|
95 |
+
core_groups = curr_entry.find("groups_core")
|
96 |
+
if core_groups is not None:
|
97 |
+
for idx_group, core_group in enumerate(core_groups.iterfind("group"), start=0):
|
98 |
+
parsed_group = {"id_words": [], "words": [], "scores": []}
|
99 |
+
all_candidates = core_group.iterfind("candidate")
|
100 |
+
for candidate in all_candidates:
|
101 |
+
candidate_s = candidate.find("s")
|
102 |
+
parsed_group["id_words"].append(candidate_s.attrib["id"])
|
103 |
+
parsed_group["words"].append(candidate_s.text.strip())
|
104 |
+
parsed_group["scores"].append(float(candidate.attrib["score"]))
|
105 |
+
|
106 |
+
all_core_groups.append(parsed_group)
|
107 |
+
|
108 |
+
all_near_groups = []
|
109 |
+
near_groups = curr_entry.find("groups_near")
|
110 |
+
if near_groups is not None:
|
111 |
+
for idx_group, core_group in enumerate(near_groups.iterfind("group"), start=0):
|
112 |
+
parsed_group = {"id_words": [], "words": [], "scores": []}
|
113 |
+
all_candidates = core_group.iterfind("candidate")
|
114 |
+
for candidate in all_candidates:
|
115 |
+
candidate_s = candidate.find("s")
|
116 |
+
parsed_group["id_words"].append(candidate_s.attrib["id"])
|
117 |
+
parsed_group["words"].append(candidate_s.text.strip())
|
118 |
+
parsed_group["scores"].append(float(candidate.attrib["score"]))
|
119 |
+
|
120 |
+
all_near_groups.append(parsed_group)
|
121 |
+
|
122 |
+
yield idx_entry, {
|
123 |
+
"id_headword": head_word["id"],
|
124 |
+
"headword": head_word["text"],
|
125 |
+
"groups_core": all_core_groups,
|
126 |
+
"groups_near": all_near_groups
|
127 |
+
}
|