wsd_plwordnet_glex / wsd_plwordnet_glex.py
ajanz's picture
Update wsd_plwordnet_glex.py (#4)
938888a
import json
import datasets
_DESCRIPTION = "definitions, usage examples and lemma candidates datasets from PlWn"
class WsdPlwordnetGlexConfig(datasets.BuilderConfig):
"""BuilderConfig for wsd_plwordnet_glex"""
def __init__(self, url, data_dir, **kwargs):
"""
BuilderConfig for wsd_plwordnet_glex.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.url = url
self.data_dir = data_dir
class WsdPlwordnetGlex(datasets.GeneratorBasedBuilder):
"""WSD PLWordnet GLEX dataset"""
BUILDER_CONFIGS = [
WsdPlwordnetGlexConfig(
data_dir="data/definitions.jsonl",
name="definitions",
url = 'definitions.jsonl',
description="Definitions from PLWordnet",
),
WsdPlwordnetGlexConfig(
data_dir="data/usage_examples.jsonl",
name="usage_examples",
description="Usage examples from PLWordnet",
url = 'usage_examples.jsonl',
),
WsdPlwordnetGlexConfig(
data_dir="data/lemma_candidates.json",
name="lemma_candidates",
url = 'lemma_candidates.json',
description="Lemma candidates from PLWordnet",
),
]
def _info(self):
features = datasets.Features()
if self.config.name == "usage_examples":
features = features = datasets.Features(
{
"text": datasets.Value("string"),
"tokens": datasets.features.Sequence(
dict(
{
"index": datasets.Value("int32"),
"position": datasets.features.Sequence(
length=2,
feature=datasets.Value("int32"),
),
"orth": datasets.Value("string"),
"lemma": datasets.Value("string"),
"pos": datasets.Value("string"),
"ctag": datasets.Value("string"),
}
),
),
"phrases": datasets.features.Sequence(datasets.Value("string")),
"wsd": datasets.features.Sequence(
dict({
"index": datasets.Value("int32"),
"pl_sense": datasets.Value("string"),
"plWN_lex_id": datasets.Value("string"),
"plWN_syn_id": datasets.Value("string"),
"plWN_lex_legacy_id": datasets.Value("string"),
"plWN_syn_legacy_id": datasets.Value("string"),
"PWN_syn_id": datasets.Value("string"),
"bn_syn_id": datasets.Value("string"),
"mapping_relation": datasets.Value("string"),
})
),
'context_file': datasets.Value('string'),
}
)
if self.config.name == "definitions":
features = datasets.Features(
{
"pl_sense": datasets.Value("string"),
"text": datasets.Value("string"),
"tokens": datasets.features.Sequence(
dict(
{
"index": datasets.Value("int32"),
"position": datasets.features.Sequence(
length=2,
feature=datasets.Value("int32"),
),
"orth": datasets.Value("string"),
"lemma": datasets.Value("string"),
"pos": datasets.Value("string"),
"ctag": datasets.Value("string"),
}
),
),
"phrases": datasets.features.Sequence(datasets.Value("string")),
"plWN_lex_id": datasets.Value("string"),
"plWN_syn_id": datasets.Value("string"),
"wsd_indices": datasets.features.Sequence(datasets.Value("int32")),
}
)
if self.config.name == "lemma_candidates":
features = datasets.Features(
{
"lemma": datasets.Value("string"),
"candidates": datasets.features.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"path": dl_dir},
),
]
def _generate_examples(self, path):
"""Generate examples."""
with open(path, encoding="utf-8") as f:
if self.config.name == "usage_examples":
for k,line in enumerate(f):
data = json.loads(line)
yield k, {
"text": data["text"],
"tokens": data["tokens"],
"phrases": data["phrases"],
"wsd": data["wsd"],
'context_file': data['context_file'],
}
if self.config.name == "definitions":
for k,line in enumerate(f):
data = json.loads(line)
yield k, {
"pl_sense": data["pl_sense"],
"text": data["text"],
"tokens": data["tokens"],
"phrases": data["phrases"],
"plWN_lex_id": data["plWN_lex_id"],
"plWN_syn_id": data["plWN_syn_id"],
"wsd_indices": data["wsd_indices"],
}
if self.config.name == "lemma_candidates":
data = json.loads(f.read())
processed = ((ind,{"lemma": k, "candidates": v}) for ind,(k,v) in enumerate(data.items()))
yield from processed