Datasets:
CZLC
/

Modalities:
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
mfajcik commited on
Commit
5c85698
·
verified ·
1 Parent(s): 0948037

Upload 3 files

Browse files
Files changed (3) hide show
  1. convert_czner.py +133 -0
  2. test.jsonl +0 -0
  3. train.jsonl +0 -0
convert_czner.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ from datasets import load_dataset
5
+
6
+
7
+ def whitespace_tokenize_with_offsets(text):
8
+ tokens = []
9
+ start_tok_offsets = []
10
+ end_tok_offsets = []
11
+ current_token = ""
12
+ current_token_start = None
13
+
14
+ for i, char in enumerate(text):
15
+ if char.isspace():
16
+ if current_token:
17
+ tokens.append(current_token)
18
+ start_tok_offsets.append(current_token_start)
19
+ end_tok_offsets.append(i)
20
+ current_token = ""
21
+ current_token_start = None
22
+ else:
23
+ if current_token == "":
24
+ current_token_start = i
25
+ current_token += char
26
+
27
+ # Add the last token if there is one
28
+ if current_token:
29
+ tokens.append(current_token)
30
+ start_tok_offsets.append(current_token_start)
31
+ end_tok_offsets.append(len(text))
32
+
33
+ return tokens, start_tok_offsets, end_tok_offsets
34
+
35
+
36
+ def proc_dataset(dataset, max_text_length=420):
37
+ r = []
38
+ for doc in dataset:
39
+ text = doc["text"]
40
+ covered_entities = set()
41
+ for ent_id, entity in enumerate(doc["entities"]):
42
+ if ent_id in covered_entities:
43
+ continue
44
+ target_text = text
45
+ if len(text) > max_text_length:
46
+ tokens, start_tok_offsets, end_tok_offsets = whitespace_tokenize_with_offsets(text)
47
+ entity_start = entity["start"]
48
+ entity_end = entity["end"]
49
+
50
+ # Find the token indices that correspond to the entity
51
+ entity_start_idx = None
52
+ entity_end_idx = None
53
+ for idx, (start, end) in enumerate(zip(start_tok_offsets, end_tok_offsets)):
54
+ if start <= entity_start < end:
55
+ entity_start_idx = idx
56
+ if start < entity_end <= end:
57
+ entity_end_idx = idx
58
+ break
59
+
60
+ if entity_start_idx is None or entity_end_idx is None:
61
+ continue
62
+
63
+ allowed_tokens = max_text_length - len(tokens[entity_start_idx:entity_end_idx + 1]) - 20
64
+ before_tokens = random.randint(0, int(allowed_tokens * 0.8))
65
+ after_tokens = allowed_tokens - before_tokens
66
+
67
+ # Determine the start and end indices for the new text segment
68
+ if entity_start_idx - before_tokens < 0:
69
+ after_tokens += - (entity_start_idx - before_tokens)
70
+ elif entity_end_idx + after_tokens + 1 >= len(tokens):
71
+ before_tokens += entity_end_idx + after_tokens + 1 - len(tokens)
72
+ start_idx = max(0, entity_start_idx - before_tokens)
73
+ end_idx = min(len(tokens), entity_end_idx + after_tokens + 1)
74
+
75
+ # Ensure the first 20 tokens are included if possible
76
+ initial_text = ""
77
+ if start_idx > 20:
78
+ initial_text = text[:end_tok_offsets[20]] + "... "
79
+
80
+ # Use offsets to extract the original text
81
+ start_offset = start_tok_offsets[start_idx]
82
+ end_offset = end_tok_offsets[end_idx - 1]
83
+
84
+ target_text = initial_text + text[start_offset:end_offset]
85
+
86
+ # if target text contains more entities of the same type, add them to the answers and covered entities
87
+ this_answer_entities = [ent_id]
88
+ answers = [entity["content"]]
89
+ for ent_id2, entity2 in enumerate(doc["entities"]):
90
+ if ent_id2 == ent_id:
91
+ continue
92
+ # check type
93
+ if entity2["category_str"] == entity["category_str"]:
94
+ # just check the string in the target text
95
+ # check if the entity is in the target text
96
+ if entity2["content"] in target_text:
97
+ this_answer_entities.append(ent_id2)
98
+ answers.append(entity2["content"])
99
+
100
+ covered_entities.update(this_answer_entities)
101
+
102
+ r.append({
103
+ "label": entity["category_str"],
104
+ "answers": list(set(answers)),
105
+ "text": target_text,
106
+ })
107
+ return r
108
+
109
+
110
+ d = load_dataset("fewshot-goes-multilingual/cs_czech-named-entity-corpus_2.0")
111
+ train = list(d['train'])
112
+ random.shuffle(train)
113
+ new_dataset_train = proc_dataset(train[3000:])
114
+ dataset_test_ftrain = proc_dataset(train[:3000])
115
+ dataset_val = proc_dataset(d['validation'])
116
+ dataset_test = proc_dataset(d['test'])
117
+
118
+ # merge splits
119
+ new_dataset_test = dataset_test_ftrain + dataset_val + dataset_test
120
+ random.shuffle(new_dataset_test)
121
+
122
+ # save using jsonlines in .data/hf_datasets/ner_court_decisions
123
+ os.makedirs(".data/hf_dataset/czner_2.0", exist_ok=True)
124
+ import jsonlines
125
+
126
+ # print dataset lengths
127
+ print("train", len(new_dataset_train))
128
+ print("test", len(new_dataset_test))
129
+
130
+ with jsonlines.open(".data/hf_dataset/czner_2.0/train.jsonl", "w") as f:
131
+ f.write_all(new_dataset_train)
132
+ with jsonlines.open(".data/hf_dataset/czner_2.0/test.jsonl", "w") as f:
133
+ f.write_all(new_dataset_test)
test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
train.jsonl ADDED
The diff for this file is too large to render. See raw diff