holylovenia commited on
Commit
a3ad0b2
1 Parent(s): 8502758

Upload jadi_ide.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. jadi_ide.py +130 -0
jadi_ide.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @article{hidayatullah2020attention,
13
+ title={Attention-based cnn-bilstm for dialect identification on javanese text},
14
+ author={Hidayatullah, Ahmad Fathan and Cahyaningtyas, Siwi and Pamungkas, Rheza Daffa},
15
+ journal={Kinetik: Game Technology, Information System, Computer Network, Computing, Electronics, and Control},
16
+ pages={317--324},
17
+ year={2020}
18
+ }
19
+ """
20
+
21
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
22
+ _LOCAL = False
23
+
24
+ _DATASETNAME = "jadi_ide"
25
+
26
+ _DESCRIPTION = """\
27
+ The JaDi-Ide dataset is a Twitter dataset for Javanese dialect identification, containing 16,498
28
+ data samples. The dialect is classified into `Standard Javanese`, `Ngapak Javanese`, and `East
29
+ Javanese` dialects.
30
+ """
31
+
32
+ _HOMEPAGE = "https://github.com/fathanick/Javanese-Dialect-Identification-from-Twitter-Data"
33
+ _LICENSE = "Unknown"
34
+ _URLS = {
35
+ _DATASETNAME: "https://github.com/fathanick/Javanese-Dialect-Identification-from-Twitter-Data/raw/main/Update 16K_Dataset.xlsx",
36
+ }
37
+ # TODO check supported tasks
38
+ _SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
39
+ _SOURCE_VERSION = "1.0.0"
40
+ _NUSANTARA_VERSION = "1.0.0"
41
+
42
+
43
+ class JaDi_Ide(datasets.GeneratorBasedBuilder):
44
+ """The JaDi-Ide dataset is a Twitter dataset for Javanese dialect identification, containing 16,498
45
+ data samples."""
46
+
47
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
48
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
49
+
50
+ BUILDER_CONFIGS = [
51
+ NusantaraConfig(
52
+ name="jadi_ide_source",
53
+ version=SOURCE_VERSION,
54
+ description="JaDi-Ide source schema",
55
+ schema="source",
56
+ subset_id="jadi_ide",
57
+ ),
58
+ NusantaraConfig(
59
+ name="jadi_ide_nusantara_text",
60
+ version=NUSANTARA_VERSION,
61
+ description="JaDi-Ide Nusantara schema",
62
+ schema="nusantara_text",
63
+ subset_id="jadi_ide",
64
+ ),
65
+ ]
66
+
67
+ DEFAULT_CONFIG_NAME = "jadi_ide_source"
68
+
69
+ def _info(self) -> datasets.DatasetInfo:
70
+ if self.config.schema == "source":
71
+ features = datasets.Features(
72
+ {
73
+ "id": datasets.Value("string"),
74
+ "text": datasets.Value("string"),
75
+ "label": datasets.Value("string")
76
+ }
77
+ )
78
+ elif self.config.schema == "nusantara_text":
79
+ features = schemas.text_features(["Jawa Timur", "Jawa Standar", "Jawa Ngapak"])
80
+
81
+
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=features,
85
+ homepage=_HOMEPAGE,
86
+ license=_LICENSE,
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
91
+ """Returns SplitGenerators."""
92
+ # Dataset does not have predetermined split, putting all as TRAIN
93
+ urls = _URLS[_DATASETNAME]
94
+ base_dir = Path(dl_manager.download(urls))
95
+ data_files = {"train": base_dir}
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={
101
+ "filepath": data_files["train"],
102
+ "split": "train",
103
+ },
104
+ ),
105
+ ]
106
+
107
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
108
+ """Yields examples as (key, example) tuples."""
109
+ df = pd.read_excel(filepath)
110
+ df.columns = ["id", "text", "label"]
111
+
112
+ if self.config.schema == "source":
113
+ for idx, row in enumerate(df.itertuples()):
114
+ ex = {
115
+ "id": str(idx),
116
+ "text": row.text,
117
+ "label": row.label,
118
+ }
119
+ yield idx, ex
120
+
121
+ elif self.config.schema == "nusantara_text":
122
+ for idx, row in enumerate(df.itertuples()):
123
+ ex = {
124
+ "id": str(idx),
125
+ "text": row.text,
126
+ "label": row.label,
127
+ }
128
+ yield idx, ex
129
+ else:
130
+ raise ValueError(f"Invalid config: {self.config.name}")