amsa02 commited on
Commit
d33c368
·
verified ·
1 Parent(s): d1d7214

Upload botanical_ner.py

Browse files
Files changed (1) hide show
  1. botanical_ner.py +151 -0
botanical_ner.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # botanical_ner.py
2
+ # Author : Amir Safari
3
+ # @date: 2025-08-30
4
+ import datasets
5
+ from pathlib import Path
6
+ import logging
7
+
8
+ _CITATION = """\
9
+ @mastersthesis{meraner2019grasping,
10
+ title={Grasping the Nettle: Neural Entity Recognition for Scientific and Vernacular Plant Names},
11
+ author={Meraner, Isabel},
12
+ year={2019},
13
+ school={Institute of Computational Linguistics, University of Zurich},
14
+ note={Available at: https://github.com/IsabelMeraner/BotanicalNER}
15
+ }
16
+ """
17
+
18
+ _DESCRIPTION = """\
19
+ BotanicalNER is a Named Entity Recognition dataset for scientific and vernacular plant names in German and English.
20
+ The dataset was created for a master thesis project at the University of Zurich focusing on identifying and
21
+ disambiguating plant names across multiple text genres to extract and preserve (ethno-)botanical knowledge.
22
+ """
23
+
24
+ _HOMEPAGE = "https://github.com/IsabelMeraner/BotanicalNER"
25
+ _LICENSE = "GPL-3.0"
26
+ _URL = "https://github.com/IsabelMeraner/BotanicalNER/archive/refs/heads/master.zip"
27
+
28
+ _NER_TAGS = ["O", "B-Scientific", "I-Scientific", "B-Vernacular", "I-Vernacular"]
29
+
30
+
31
+ _FILE_PATHS = {
32
+ "de": {
33
+ "train": [
34
+ "RESOURCES/corpora/training corpora/de/plantblog_corpus_de.tok.pos.iob.txt",
35
+ "RESOURCES/corpora/training corpora/de/wiki_abstractcorpus_de.tok.pos.iob.txt",
36
+ "RESOURCES/corpora/training corpora/de/TextBerg_subcorpus_de.tok.pos.iob.txt",
37
+ "RESOURCES/corpora/training corpora/de/botlit_corpus_de.tok.pos.iob.txt",
38
+ ],
39
+ "test": ["RESOURCES/corpora/gold_standard/de/combined.test.fold1GOLD_de.txt"],
40
+ "fungi": ["RESOURCES/corpora/gold_standard/de/test_fungi_de.tok.pos.iobGOLD.txt"],
41
+ },
42
+ "en": {
43
+ "train": [
44
+ "RESOURCES/corpora/training corpora/en/plantblog_corpus_en.tok.pos.iob.txt",
45
+ "RESOURCES/corpora/training corpora/en/wiki_abstractcorpus_en.tok.pos.iob.txt",
46
+ "RESOURCES/corpora/training corpora/en/TextBerg_subcorpus_en.tok.pos.iob.txt",
47
+ "RESOURCES/corpora/training corpora/en/botlit_corpus_en.tok.pos.iob.txt",
48
+ ],
49
+ "test": ["RESOURCES/corpora/gold_standard/en/combined.test.fold1GOLD_en.txt"],
50
+ "fungi": ["RESOURCES/corpora/gold_standard/en/test_fungi_en.tok.pos.iobGOLD.txt"],
51
+ },
52
+ }
53
+
54
+ class BotanicalNERConfig(datasets.BuilderConfig):
55
+ """BuilderConfig for BotanicalNER"""
56
+ def __init__(self, language="de", **kwargs):
57
+ super(BotanicalNERConfig, self).__init__(**kwargs)
58
+ self.language = language
59
+
60
+ class BotanicalNER(datasets.GeneratorBasedBuilder):
61
+ """BotanicalNER dataset for plant name NER in German and English"""
62
+
63
+ VERSION = datasets.Version("1.0.0")
64
+
65
+ BUILDER_CONFIGS = [
66
+ BotanicalNERConfig(name="de", language="de", version=VERSION, description="German BotanicalNER dataset"),
67
+ BotanicalNERConfig(name="en", language="en", version=VERSION, description="English BotanicalNER dataset"),
68
+ ]
69
+
70
+ DEFAULT_CONFIG_NAME = "de"
71
+
72
+ def _info(self):
73
+ features = datasets.Features({
74
+ "id": datasets.Value("string"),
75
+ "tokens": datasets.Sequence(datasets.Value("string")),
76
+ "pos_tags": datasets.Sequence(datasets.Value("string")), # Kept as string as the tag set is very large
77
+ "ner_tags": datasets.Sequence(datasets.ClassLabel(names=_NER_TAGS)),
78
+ })
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=features,
82
+ supervised_keys=None,
83
+ homepage=_HOMEPAGE,
84
+ license=_LICENSE,
85
+ citation=_CITATION,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ """Returns SplitGenerators."""
90
+ # Download and extract the single zip file
91
+ data_dir = dl_manager.download_and_extract(_URL)
92
+ base_path = Path(data_dir) / "BotanicalNER-master"
93
+ language = self.config.language
94
+
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ gen_kwargs={"filepaths": [base_path / f for f in _FILE_PATHS[language]["train"]]},
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TEST,
102
+ gen_kwargs={"filepaths": [base_path / f for f in _FILE_PATHS[language]["test"]]},
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name="fungi",
106
+ gen_kwargs={"filepaths": [base_path / f for f in _FILE_PATHS[language]["fungi"]]},
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, filepaths):
111
+ """Yields examples from the dataset files."""
112
+ guid = 0
113
+ for filepath in filepaths:
114
+ logging.info(f"Generating examples from {filepath}")
115
+ with open(filepath, encoding="utf-8") as f:
116
+ tokens = []
117
+ pos_tags = []
118
+ ner_tags = []
119
+ for line in f:
120
+ line = line.strip()
121
+ if not line or line.startswith("-DOCSTART-"):
122
+ if tokens:
123
+ yield guid, {
124
+ "id": str(guid),
125
+ "tokens": tokens,
126
+ "pos_tags": pos_tags,
127
+ "ner_tags": ner_tags,
128
+ }
129
+ guid += 1
130
+ tokens = []
131
+ pos_tags = []
132
+ ner_tags = []
133
+ else:
134
+ parts = line.split("\t")
135
+ # The files consistently have 3 columns: token, pos, ner
136
+ if len(parts) == 3:
137
+ tokens.append(parts[0])
138
+ pos_tags.append(parts[1])
139
+ ner_tags.append(parts[2])
140
+ else:
141
+ logging.warning(f"Skipping malformed line in {filepath}: '{line}'")
142
+
143
+ # Yield the last sentence if the file does not end with a newline
144
+ if tokens:
145
+ yield guid, {
146
+ "id": str(guid),
147
+ "tokens": tokens,
148
+ "pos_tags": pos_tags,
149
+ "ner_tags": ner_tags,
150
+ }
151
+ guid += 1