Datasets:
Tasks:
Token Classification
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Arabic
Size:
10K - 100K
License:
File size: 1,114 Bytes
1e63bb3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import json, re, pathlib, unicodedata, itertools
# helper regexes
AR = re.compile(r'[\u0600-\u06FF]') # Arabic block
EMOJI = re.compile('['
'\U0001F600-\U0001F64F' # emoticons
'\U0001F300-\U0001F5FF' # symbols & pictographs
'\U0001F680-\U0001F6FF' # transport & map symbols
'\U0001F1E0-\U0001F1FF' # flags
']', flags=re.UNICODE)
def is_flagged(txt):
if EMOJI.search(txt) or any(c in '@#' for c in txt):
return True
non_ar = sum(1 for c in txt if not AR.match(c))
return len(txt) and non_ar / len(txt) >= 0.8
files = ['train.jsonl', 'validation.jsonl', 'test.jsonl']
tot = flagged = 0
examples = []
for fn in files:
for row in map(json.loads, pathlib.Path(fn).read_text().splitlines()):
for sp in row['spans']:
txt = sp.get('text') or row['text'][sp['start']:sp['end']]
tot += 1
if is_flagged(txt):
flagged += 1
if len(examples) < 2000:
examples.append(txt)
print(f"{flagged}/{tot} spans flagged ({flagged/tot:.2%})")
print("sample:", examples)
|