ShamNER / sanity_check_split.py
noamor's picture
Updated split philosophy, relaxed prune_overlap threshold to 0.50 for better F1. Regenerated splits.
1e63bb3
raw
history blame
2.44 kB
# sanity_check_split.py (overwrite previous)
from pathlib import Path
import json, re, unicodedata, collections
from datasets import load_dataset
from itertools import chain
# ---------- helpers ----------------------------------------------------------
AR_DIACRITICS_RE = re.compile(r"[\u0610-\u061A\u064B-\u065F\u06D6-\u06ED]")
AL_PREFIX_RE = re.compile(r"^ال(?=[\u0621-\u064A])")
MULTISPACE_RE = re.compile(r"\s+")
def norm(txt):
t = AR_DIACRITICS_RE.sub("", txt)
t = AL_PREFIX_RE.sub("", t)
t = unicodedata.normalize("NFKC", t).lower()
return MULTISPACE_RE.sub(" ", t).strip()
def read_jsonl(p):
with open(p, encoding="utf-8") as fh:
for line in fh:
yield json.loads(line)
def span_strings(row):
sent = row["text"]
for sp in row["spans"]:
raw = sp.get("text") or sent[sp["start"]: sp["end"]]
if raw:
yield norm(raw)
# ---------- 1. size check ----------------------------------------------------
splits = {"train": "train.jsonl",
"validation": "validation.jsonl",
"test": "test.jsonl"}
sizes = {k: sum(1 for _ in read_jsonl(Path(v))) for k, v in splits.items()}
print("Sentence counts:", sizes)
# ---------- 2. doc leakage ---------------------------------------------------
seen = {}
dups = []
for split, path in splits.items():
for row in read_jsonl(Path(path)):
key = (row["doc_name"], row["round"])
if key in seen and seen[key] != split:
dups.append((key, seen[key], split))
seen[key] = split
print("Document bundles in >1 split:", len(dups))
# ---------- 3. span novelty --------------------------------------------------
train_spans = set(chain.from_iterable(span_strings(r) for r in read_jsonl(Path("train.jsonl"))))
overlaps = collections.Counter()
for split in ["validation", "test"]:
for row in read_jsonl(Path(f"{split}.jsonl")):
if any(n in train_spans for n in span_strings(row)):
overlaps[split] += 1
print("Sentences in dev/test with SEEN spans:", dict(overlaps))
# ---------- 4. HF Datasets smoke-load ---------------------------------------
ds = load_dataset("parquet",
data_files={"train": "train.parquet",
"validation": "validation.parquet",
"test": "test.parquet"},
split=None)
print("load_dataset OK:", {k: len(v) for k, v in ds.items()})