holylovenia commited on
Commit
50e5f7f
1 Parent(s): 8487b9f

Upload tlunified_ner.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tlunified_ner.py +149 -0
tlunified_ner.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ from datasets.download.download_manager import DownloadManager
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @inproceedings{miranda-2023-developing,
13
+ title = {Developing a Named Entity Recognition Dataset for Tagalog},
14
+ author = "Miranda, Lester James Validad",
15
+ booktitle = "Proceedings of the First Workshop for Southeast Asian Language Processing (SEALP),"
16
+ month = nov,
17
+ year = 2023,
18
+ address = "Online",
19
+ publisher = "Association for Computational Linguistics",
20
+ }
21
+ """
22
+
23
+ _LOCAL = False
24
+ _LANGUAGES = ["tgl"]
25
+ _DATASETNAME = "tlunified_ner"
26
+ _DESCRIPTION = """\
27
+ This dataset contains the annotated TLUnified corpora from Cruz and Cheng
28
+ (2021). It is a curated sample of around 7,000 documents for the named entity
29
+ recognition (NER) task. The majority of the corpus are news reports in Tagalog,
30
+ resembling the domain of the original ConLL 2003. There are three entity types:
31
+ Person (PER), Organization (ORG), and Location (LOC).
32
+ """
33
+
34
+ _HOMEPAGE = "https://huggingface.co/ljvmiranda921/tlunified-ner"
35
+ _LICENSE = Licenses.GPL_3_0.value
36
+ _URLS = {
37
+ "train": "https://huggingface.co/datasets/ljvmiranda921/tlunified-ner/resolve/main/corpus/iob/train.iob",
38
+ "dev": "https://huggingface.co/datasets/ljvmiranda921/tlunified-ner/resolve/main/corpus/iob/dev.iob",
39
+ "test": "https://huggingface.co/datasets/ljvmiranda921/tlunified-ner/resolve/main/corpus/iob/test.iob",
40
+ }
41
+
42
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
43
+ _SOURCE_VERSION = "1.0.0"
44
+ _SEACROWD_VERSION = "2024.06.20"
45
+
46
+
47
+ class TLUnifiedNERDataset(datasets.GeneratorBasedBuilder):
48
+ """Tagalog Named Entity Recognition dataset from https://huggingface.co/ljvmiranda921/tlunified-ner"""
49
+
50
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
51
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
52
+
53
+ SEACROWD_SCHEMA_NAME = "seq_label"
54
+ LABEL_CLASSES = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
55
+
56
+ BUILDER_CONFIGS = [
57
+ SEACrowdConfig(
58
+ name=f"{_DATASETNAME}_source",
59
+ version=SOURCE_VERSION,
60
+ description=f"{_DATASETNAME} source schema",
61
+ schema="source",
62
+ subset_id=_DATASETNAME,
63
+ ),
64
+ SEACrowdConfig(
65
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
66
+ version=SEACROWD_VERSION,
67
+ description=f"{_DATASETNAME} SEACrowd schema",
68
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
69
+ subset_id=_DATASETNAME,
70
+ ),
71
+ ]
72
+
73
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
74
+
75
+ def _info(self) -> datasets.DatasetInfo:
76
+ if self.config.schema == "source":
77
+ features = datasets.Features(
78
+ {
79
+ "id": datasets.Value("string"),
80
+ "tokens": datasets.Sequence(datasets.Value("string")),
81
+ "ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=self.LABEL_CLASSES)),
82
+ }
83
+ )
84
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
85
+ features = schemas.seq_label_features(self.LABEL_CLASSES)
86
+
87
+ return datasets.DatasetInfo(
88
+ description=_DESCRIPTION,
89
+ features=features,
90
+ homepage=_HOMEPAGE,
91
+ license=_LICENSE,
92
+ citation=_CITATION,
93
+ )
94
+
95
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
96
+ """Returns SplitGenerators."""
97
+ data_files = {
98
+ "train": Path(dl_manager.download_and_extract(_URLS["train"])),
99
+ "dev": Path(dl_manager.download_and_extract(_URLS["dev"])),
100
+ "test": Path(dl_manager.download_and_extract(_URLS["test"])),
101
+ }
102
+
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={"filepath": data_files["train"], "split": "train"},
107
+ ),
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.VALIDATION,
110
+ gen_kwargs={"filepath": data_files["dev"], "split": "dev"},
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ gen_kwargs={"filepath": data_files["test"], "split": "test"},
115
+ ),
116
+ ]
117
+
118
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
119
+ """Yield examples as (key, example) tuples"""
120
+ # The only difference between the source schema and the seacrowd seq_label schema is the dictionary keys.
121
+ # The implementation is the same.
122
+ label_key = "ner_tags" if self.config.schema == "source" else "labels"
123
+ with open(filepath, encoding="utf-8") as f:
124
+ guid = 0
125
+ tokens = []
126
+ ner_tags = []
127
+ for line in f:
128
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
129
+ if tokens:
130
+ yield guid, {
131
+ "id": str(guid),
132
+ "tokens": tokens,
133
+ label_key: ner_tags,
134
+ }
135
+ guid += 1
136
+ tokens = []
137
+ ner_tags = []
138
+ else:
139
+ # TLUnified-NER iob are separated by \t
140
+ token, ner_tag = line.split("\t")
141
+ tokens.append(token)
142
+ ner_tags.append(ner_tag.rstrip())
143
+ # Last example
144
+ if tokens:
145
+ yield guid, {
146
+ "id": str(guid),
147
+ "tokens": tokens,
148
+ label_key: ner_tags,
149
+ }