Rodrigo1771 commited on
Commit
1509bdf
·
verified ·
1 Parent(s): 2947c19

Upload cantemist-fasttext-8-ner.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. cantemist-fasttext-8-ner.py +112 -0
cantemist-fasttext-8-ner.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the Cantemist NER dataset.
2
+ import datasets
3
+
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+
8
+ _CITATION = """\
9
+ @inproceedings{miranda2020named,
10
+ title={Named entity recognition, concept normalization and clinical coding: Overview of the cantemist track for cancer text mining in spanish, corpus, guidelines, methods and results},
11
+ author={Miranda-Escalada, A and Farr{\'e}, E and Krallinger, M},
12
+ booktitle={Proceedings of the Iberian Languages Evaluation Forum (IberLEF 2020), CEUR Workshop Proceedings},
13
+ year={2020}
14
+ }"""
15
+
16
+ _DESCRIPTION = """\
17
+ https://temu.bsc.es/cantemist/
18
+ """
19
+
20
+ _URL = "https://huggingface.co/datasets/Rodrigo1771/cantemist-fasttext-8-ner/resolve/main/"
21
+ _TRAINING_FILE = "train.conll"
22
+ _DEV_FILE = "dev.conll"
23
+ _TEST_FILE = "test.conll"
24
+
25
+ class CantemistNerConfig(datasets.BuilderConfig):
26
+ """BuilderConfig for Cantemist Ner dataset"""
27
+
28
+ def __init__(self, **kwargs):
29
+ """BuilderConfig for CantemistNer.
30
+
31
+ Args:
32
+ **kwargs: keyword arguments forwarded to super.
33
+ """
34
+ super(CantemistNerConfig, self).__init__(**kwargs)
35
+
36
+
37
+ class CantemistNer(datasets.GeneratorBasedBuilder):
38
+ """Cantemist Ner dataset."""
39
+
40
+ BUILDER_CONFIGS = [
41
+ CantemistNerConfig(
42
+ name="CantemistNer",
43
+ version=datasets.Version("1.0.0"),
44
+ description="CantemistNer dataset"),
45
+ ]
46
+
47
+ def _info(self):
48
+ return datasets.DatasetInfo(
49
+ description=_DESCRIPTION,
50
+ features=datasets.Features(
51
+ {
52
+ "id": datasets.Value("string"),
53
+ "tokens": datasets.Sequence(datasets.Value("string")),
54
+ "ner_tags": datasets.Sequence(
55
+ datasets.features.ClassLabel(
56
+ names=[
57
+ "O",
58
+ "B-MORFOLOGIA_NEOPLASIA",
59
+ "I-MORFOLOGIA_NEOPLASIA",
60
+ ]
61
+ )
62
+ ),
63
+ }
64
+ ),
65
+ supervised_keys=None,
66
+ homepage="https://temu.bsc.es/cantemist/",
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ """Returns SplitGenerators."""
72
+ urls_to_download = {
73
+ "train": f"{_URL}{_TRAINING_FILE}",
74
+ "dev": f"{_URL}{_DEV_FILE}",
75
+ "test": f"{_URL}{_TEST_FILE}",
76
+ }
77
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
78
+
79
+ return [
80
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
81
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
82
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
83
+ ]
84
+
85
+ def _generate_examples(self, filepath):
86
+ logger.info("⏳ Generating examples from = %s", filepath)
87
+ with open(filepath, encoding="utf-8") as f:
88
+ guid = 0
89
+ tokens = []
90
+ ner_tags = []
91
+ for line in f:
92
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
93
+ if tokens:
94
+ yield guid, {
95
+ "id": str(guid),
96
+ "tokens": tokens,
97
+ "ner_tags": ner_tags,
98
+ }
99
+ guid += 1
100
+ tokens = []
101
+ ner_tags = []
102
+ else:
103
+ # Cantemist tokens are tab separated
104
+ splits = line.split("\t")
105
+ tokens.append(splits[0])
106
+ ner_tags.append(splits[-1].rstrip())
107
+ # last example
108
+ yield guid, {
109
+ "id": str(guid),
110
+ "tokens": tokens,
111
+ "ner_tags": ner_tags,
112
+ }