|
|
import csv |
|
|
import os |
|
|
import re |
|
|
|
|
|
from datetime import datetime |
|
|
from glob import glob |
|
|
|
|
|
from bs4 import BeautifulSoup |
|
|
|
|
|
|
|
|
BASE_DIR = "reuters21578/" |
|
|
|
|
|
|
|
|
def clean_date(date_str): |
|
|
""" |
|
|
Date format: 19-OCT-1987 01:51:51.69 |
|
|
""" |
|
|
pattern = r"(\d{2}-[A-Z]{3}-\d{4} \d{2}:\d{2}:\d{2}\.\d+)" |
|
|
|
|
|
match = re.search(pattern, date_str.strip()) |
|
|
|
|
|
if match: |
|
|
date_str = match.group(1) |
|
|
return datetime.strptime(date_str, "%d-%b-%Y %H:%M:%S.%f").isoformat() |
|
|
return None |
|
|
|
|
|
|
|
|
def clean_text(text): |
|
|
lines = text.split("\n") |
|
|
cleaned_lines = [] |
|
|
|
|
|
cleaned_lines.append(lines[0]) |
|
|
|
|
|
for line in lines[1:]: |
|
|
|
|
|
|
|
|
if not line.strip(): |
|
|
continue |
|
|
|
|
|
if line[0] == " ": |
|
|
cleaned_lines.append(line) |
|
|
else: |
|
|
cleaned_lines[-1] += " " + line |
|
|
|
|
|
return "\n\n".join(cleaned_lines[:-1]) |
|
|
|
|
|
|
|
|
def parse_sgm(fname): |
|
|
with open(fname, "r", encoding="ISO-8859-15") as f: |
|
|
contents = f.read() |
|
|
|
|
|
soup = BeautifulSoup(contents, "html.parser") |
|
|
rows_train = [] |
|
|
rows_test = [] |
|
|
|
|
|
for meta in soup.find_all("reuters"): |
|
|
data = parse_document(meta) |
|
|
|
|
|
if data["attr__lewissplit"] == "TRAIN": |
|
|
rows_train.append(data) |
|
|
if data["attr__lewissplit"] == "TEST": |
|
|
rows_test.append(data) |
|
|
|
|
|
return rows_train, rows_test |
|
|
|
|
|
|
|
|
def parse_document(meta): |
|
|
|
|
|
|
|
|
date = meta.find("date").text |
|
|
|
|
|
|
|
|
topics = [topic.text for topic in meta.find("topics").find_all("d")] |
|
|
|
|
|
|
|
|
places = [place.text for place in meta.find("places").find_all("d")] |
|
|
|
|
|
|
|
|
people = [people.text for people in meta.find("people").find_all("d")] |
|
|
|
|
|
|
|
|
orgs = [org.text for org in meta.find("orgs").find_all("d")] |
|
|
|
|
|
|
|
|
exchanges = [exchange.text for exchange in meta.find("exchanges").find_all("d")] |
|
|
|
|
|
|
|
|
companies = [company.text for company in meta.find("companies").find_all("d")] |
|
|
|
|
|
text = meta.find("text") |
|
|
text_title = text.find("title") |
|
|
text_dateline = text.find("dateline") |
|
|
text_body = text.find("body") |
|
|
|
|
|
return { |
|
|
"attr__topics": meta.attrs["topics"], |
|
|
"attr__lewissplit": meta.attrs["lewissplit"], |
|
|
"attr__cgisplit": meta.attrs["cgisplit"], |
|
|
"attr__oldid": int(meta.attrs["oldid"]), |
|
|
"attr__newid": int(meta.attrs["newid"]), |
|
|
"date": clean_date(date), |
|
|
"topics": topics, |
|
|
"places": places, |
|
|
"people": people, |
|
|
"orgs": orgs, |
|
|
"exchanges": exchanges, |
|
|
"companies": companies, |
|
|
"text__type": text.attrs["type"] if "type" in text.attrs else None, |
|
|
"text__title": text_title.text if text_title else None, |
|
|
"text__dateline": text_dateline.text if text_dateline else None, |
|
|
"text__body": text_body.text if text_body else None, |
|
|
"text": clean_text(text_body.text) if text_body else None, |
|
|
} |
|
|
|
|
|
|
|
|
def save_csv(rows, fname): |
|
|
""" |
|
|
Save the processed data into a CSV file. |
|
|
""" |
|
|
with open(fname, "w", encoding="utf8") as f: |
|
|
writer = csv.DictWriter(f, fieldnames=rows[0].keys()) |
|
|
writer.writeheader() |
|
|
|
|
|
for row in rows: |
|
|
writer.writerow(row) |
|
|
|
|
|
|
|
|
def run(): |
|
|
rows_train, rows_test = [], [] |
|
|
|
|
|
for fname in glob(os.path.join(BASE_DIR, "*.sgm")): |
|
|
train, test = parse_sgm(fname) |
|
|
rows_train.extend(train) |
|
|
rows_test.extend(test) |
|
|
|
|
|
save_csv(rows_train, "train.csv") |
|
|
save_csv(rows_test, "test.csv") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
run() |
|
|
|