blurbs-clustering-p2p / extract_data.py
slvnwhrl's picture
add extraction script
21d5444
raw
history blame
2.92 kB
"""Script to generate splits for benchmarking text embedding clustering.
Based on data from GermEval 2019 Shared Task on Hierarchical Tesk Classification (https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc.html)."""
import os
import random
import sys
from collections import Counter
import jsonlines
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
random.seed(42)
# path to "data" folder, can be retrieved from here: https://www.inf.uni-hamburg.de/en/inst/ab/lt/resources/data/germeval-2019-hmc/germeval2019t1-public-data-final.zip
DATA_PATH = sys.argv[1]
INCLUDE_BODY = (
True # True: combine title and article body (p2p), False: only title (s2s)
)
NUM_SPLITS = 10
SPLIT_RANGE = np.array([0.1, 1.0])
def get_samples(soup, include_body=INCLUDE_BODY):
d1_counter = Counter([d1.string for d1 in soup.find_all("topic", {"d": 1})])
samples = []
for book in soup.find_all("book"):
if book.title.string is None or book.body.string is None:
continue
d0_topics = list(set([d.string for d in book.find_all("topic", {"d": 0})]))
d1_topics = list(set([d.string for d in book.find_all("topic", {"d": 1})]))
if len(d0_topics) != 1:
continue
if len(d1_topics) < 1 or len(d1_topics) > 2:
continue
d0_label = d0_topics[0]
d1_label = sorted(d1_topics, key=lambda x: d1_counter[x])[0]
text = book.title.string
if include_body:
text += "\n" + book.body.string
samples.append([text, d0_label, d1_label])
return pd.DataFrame(samples, columns=["sentences", "d0_label", "d1_label"])
def get_split(frame, label="d0_label", split_range=SPLIT_RANGE):
samples = random.randint(*(split_range * len(frame)).astype(int))
return (
frame.sample(samples)[["sentences", label]]
.rename(columns={label: "labels"})[["sentences", "labels"]]
.to_dict("list")
)
def write_sets(name, sets):
with jsonlines.open(name, "w") as f_out:
f_out.write_all(sets)
train = open(os.path.join(DATA_PATH, "blurbs_train.txt"), encoding="utf-8").read()
dev = open(os.path.join(DATA_PATH, "blurbs_dev.txt"), encoding="utf-8").read()
test = open(os.path.join(DATA_PATH, "blurbs_test.txt"), encoding="utf-8").read()
soup = BeautifulSoup(train + "\n\n" + dev + "\n\n" + test, "html.parser")
samples = get_samples(soup)
sets = []
# coarse clustering
for _ in range(NUM_SPLITS):
sets.append(get_split(samples))
# fine grained clustering inside top-level category (d0)
for d0 in samples["d0_label"].unique():
sets.append(
(samples[samples.d0_label == d0])
.rename(columns={"d1_label": "labels"})[["sentences", "labels"]]
.to_dict("list")
)
# fine grained clustering
for _ in range(NUM_SPLITS):
sets.append(get_split(samples, label="d1_label"))
write_sets("test.jsonl", sets)