Datasets:
Tasks:
Token Classification
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Arabic
Size:
10K - 100K
License:
import json, re, pathlib, unicodedata, itertools | |
# helper regexes | |
AR = re.compile(r'[\u0600-\u06FF]') # Arabic block | |
EMOJI = re.compile('[' | |
'\U0001F600-\U0001F64F' # emoticons | |
'\U0001F300-\U0001F5FF' # symbols & pictographs | |
'\U0001F680-\U0001F6FF' # transport & map symbols | |
'\U0001F1E0-\U0001F1FF' # flags | |
']', flags=re.UNICODE) | |
def is_flagged(txt): | |
if EMOJI.search(txt) or any(c in '@#' for c in txt): | |
return True | |
non_ar = sum(1 for c in txt if not AR.match(c)) | |
return len(txt) and non_ar / len(txt) >= 0.8 | |
files = ['train.jsonl', 'validation.jsonl', 'test.jsonl'] | |
tot = flagged = 0 | |
examples = [] | |
for fn in files: | |
for row in map(json.loads, pathlib.Path(fn).read_text().splitlines()): | |
for sp in row['spans']: | |
txt = sp.get('text') or row['text'][sp['start']:sp['end']] | |
tot += 1 | |
if is_flagged(txt): | |
flagged += 1 | |
if len(examples) < 2000: | |
examples.append(txt) | |
print(f"{flagged}/{tot} spans flagged ({flagged/tot:.2%})") | |
print("sample:", examples) | |