File size: 3,565 Bytes
dc53250 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import csv
import os
import re
from datetime import datetime
from glob import glob
from bs4 import BeautifulSoup
BASE_DIR = "reuters21578/"
def clean_date(date_str):
"""
Date format: 19-OCT-1987 01:51:51.69
"""
pattern = r"(\d{2}-[A-Z]{3}-\d{4} \d{2}:\d{2}:\d{2}\.\d+)"
match = re.search(pattern, date_str.strip())
if match:
date_str = match.group(1)
return datetime.strptime(date_str, "%d-%b-%Y %H:%M:%S.%f").isoformat()
return None
def clean_text(text):
lines = text.split("\n")
cleaned_lines = []
cleaned_lines.append(lines[0])
for line in lines[1:]:
# Ignore empty lines
if not line.strip():
continue
if line[0] == " ":
cleaned_lines.append(line)
else:
cleaned_lines[-1] += " " + line
return "\n\n".join(cleaned_lines[:-1]) # The last line is always "REUTER"
def parse_sgm(fname):
with open(fname, "r", encoding="ISO-8859-15") as f:
contents = f.read()
soup = BeautifulSoup(contents, "html.parser")
rows_train = []
rows_test = []
for meta in soup.find_all("reuters"):
data = parse_document(meta)
if data["attr__lewissplit"] == "TRAIN":
rows_train.append(data)
if data["attr__lewissplit"] == "TEST":
rows_test.append(data)
return rows_train, rows_test
def parse_document(meta):
# date
date = meta.find("date").text
# topics
topics = [topic.text for topic in meta.find("topics").find_all("d")]
# places
places = [place.text for place in meta.find("places").find_all("d")]
# people
people = [people.text for people in meta.find("people").find_all("d")]
# orgs
orgs = [org.text for org in meta.find("orgs").find_all("d")]
# exchanges
exchanges = [exchange.text for exchange in meta.find("exchanges").find_all("d")]
# companies
companies = [company.text for company in meta.find("companies").find_all("d")]
text = meta.find("text")
text_title = text.find("title")
text_dateline = text.find("dateline")
text_body = text.find("body")
return {
"attr__topics": meta.attrs["topics"],
"attr__lewissplit": meta.attrs["lewissplit"],
"attr__cgisplit": meta.attrs["cgisplit"],
"attr__oldid": int(meta.attrs["oldid"]),
"attr__newid": int(meta.attrs["newid"]),
"date": clean_date(date),
"topics": topics,
"places": places,
"people": people,
"orgs": orgs,
"exchanges": exchanges,
"companies": companies,
"text__type": text.attrs["type"] if "type" in text.attrs else None,
"text__title": text_title.text if text_title else None,
"text__dateline": text_dateline.text if text_dateline else None,
"text__body": text_body.text if text_body else None,
"text": clean_text(text_body.text) if text_body else None,
}
def save_csv(rows, fname):
"""
Save the processed data into a CSV file.
"""
with open(fname, "w", encoding="utf8") as f:
writer = csv.DictWriter(f, fieldnames=rows[0].keys())
writer.writeheader()
for row in rows:
writer.writerow(row)
def run():
rows_train, rows_test = [], []
for fname in glob(os.path.join(BASE_DIR, "*.sgm")):
train, test = parse_sgm(fname)
rows_train.extend(train)
rows_test.extend(test)
save_csv(rows_train, "train.csv")
save_csv(rows_test, "test.csv")
if __name__ == "__main__":
run()
|