Upload 6 files
Browse files- describe.py +123 -0
- downloads.sh +25 -0
- extract.py +46 -0
- raw.zip +3 -0
- requirements.txt +6 -0
- utils.py +954 -0
describe.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from utils import DataLoader, SCAPlotter, TextProcessor, TopicModeling, DATA_ANALYSIS_PATH
|
| 5 |
+
|
| 6 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 7 |
+
|
| 8 |
+
logging.info('Initialising the data loader, plotter, text processor and topic modeler')
|
| 9 |
+
dl = DataLoader()
|
| 10 |
+
plotter = SCAPlotter()
|
| 11 |
+
text_processor = TextProcessor(dl)
|
| 12 |
+
topic_modeler = TopicModeling()
|
| 13 |
+
|
| 14 |
+
# plot case distribution
|
| 15 |
+
logging.info('Plotting the case distribution on all data')
|
| 16 |
+
plotter.plot_case_distribution(dl.load_data('all'))
|
| 17 |
+
|
| 18 |
+
# get the data with summaries
|
| 19 |
+
logging.info('Loading the data with summaries only for further analysis.')
|
| 20 |
+
df = dl.load_data('with_summaries')
|
| 21 |
+
|
| 22 |
+
# prepare the text
|
| 23 |
+
logging.info('Preparing the text: dropping duplicates, removing null values, etc.')
|
| 24 |
+
df = text_processor.prepare_text(df, target_columns=['input', 'output'])
|
| 25 |
+
|
| 26 |
+
# get all stats
|
| 27 |
+
logging.info('Getting all stats for the text and summary')
|
| 28 |
+
stats_file = DATA_ANALYSIS_PATH / 'data_with_stats.csv'
|
| 29 |
+
if stats_file.exists():
|
| 30 |
+
stats = pd.read_csv(stats_file)
|
| 31 |
+
df = pd.concat([df, stats], axis=1)
|
| 32 |
+
|
| 33 |
+
stats = df.copy()
|
| 34 |
+
df = text_processor.get_all_stats(df)
|
| 35 |
+
|
| 36 |
+
if df.equals(stats):
|
| 37 |
+
logging.info('Data and stats are the same. All stats are calculated up to date.')
|
| 38 |
+
else:
|
| 39 |
+
stats = df.drop(columns=['text', 'summary'])
|
| 40 |
+
stats.to_csv(stats_file, index=False)
|
| 41 |
+
logging.info(f'Data with stats saved to {stats_file}')
|
| 42 |
+
del stats
|
| 43 |
+
|
| 44 |
+
logging.info('Plotting the summary vs judgment length')
|
| 45 |
+
plotter.plot_summary_vs_judgment_length(df)
|
| 46 |
+
|
| 47 |
+
logging.info('Plotting the summary and judgment stats')
|
| 48 |
+
plotter.plot_length_distribution(df, columns=['text_sent_count', 'text_word_count', 'text_char_count'], file_name='judgment_stats')
|
| 49 |
+
plotter.plot_length_distribution(df, columns=['text_sent_density','text_word_density'], file_name='judgment_density_stats')
|
| 50 |
+
|
| 51 |
+
plotter.plot_length_distribution(df, columns=['sum_sent_count', 'sum_word_count', 'sum_char_count'], file_name='summary_stats')
|
| 52 |
+
plotter.plot_length_distribution(df, columns=['sum_sent_density','sum_word_density'], file_name='summary_density_stats')
|
| 53 |
+
|
| 54 |
+
# get the pos tags
|
| 55 |
+
logging.info('Getting the POS tags for the text and summary')
|
| 56 |
+
columns = ['ADJ','ADP','ADV','CONJ','DET','NOUN','NUM','PRT','PRON','VERB','.','X']
|
| 57 |
+
|
| 58 |
+
# plot the pos tags
|
| 59 |
+
logging.info('Plotting the POS tags for the text and summary')
|
| 60 |
+
postags = ['ADJ','ADP','ADV','CONJ','DET','NOUN']
|
| 61 |
+
|
| 62 |
+
df_text = df[[f'text_{p}' for p in postags]]
|
| 63 |
+
df_text.columns = [p for p in postags]
|
| 64 |
+
plotter.plot_length_distribution(df_text, columns=postags, plot_boxplots=False, file_name='judgment_pos_tags')
|
| 65 |
+
|
| 66 |
+
df_summary = df[[f'sum_{p}' for p in postags]]
|
| 67 |
+
df_summary.columns = [p for p in postags]
|
| 68 |
+
plotter.plot_length_distribution(df_summary, columns=postags, plot_boxplots=False, file_name='summary_pos_tags')
|
| 69 |
+
|
| 70 |
+
del df_text, df_summary
|
| 71 |
+
|
| 72 |
+
# print some unknown words
|
| 73 |
+
logging.info('Printing some unknown words')
|
| 74 |
+
print('Unknown words: ', df['text_unknown_words'].values[5])
|
| 75 |
+
|
| 76 |
+
# plot unknown words stats in text and summary
|
| 77 |
+
logging.info('Plotting the unknown words stats')
|
| 78 |
+
unknown_words_columns = ['text_unknown_count', 'sum_unknown_count']
|
| 79 |
+
plotter.plot_length_distribution(df, columns=unknown_words_columns, file_name='unknown_words_stats')
|
| 80 |
+
|
| 81 |
+
# plot puncs and stopwords
|
| 82 |
+
logging.info('Plotting the punctuation and stopwords stats')
|
| 83 |
+
target_columns = ['text_stopw_count', 'sum_stopw_count', 'text_punc_count','sum_punc_count']
|
| 84 |
+
plotter.plot_length_distribution(df, columns=target_columns, file_name='punc_stopw_and_punc_stats')
|
| 85 |
+
|
| 86 |
+
# clean the data for topic modeling
|
| 87 |
+
logging.info('Cleaning the text and summary for topic modeling')
|
| 88 |
+
cleaned_text, cleaned_summary = text_processor.remove_stopwords(df, target_columns=['text', 'summary'])
|
| 89 |
+
|
| 90 |
+
plotter.plot_wordcloud(cleaned_text, file_name='judgment_wordcloud')
|
| 91 |
+
plotter.plot_wordcloud(cleaned_summary, file_name='summary_wordcloud')
|
| 92 |
+
|
| 93 |
+
# Visualise the 20 most common words in the judgment
|
| 94 |
+
logging.info('Visualising the 20 most common words in the judgment')
|
| 95 |
+
tf, tf_feature_names = text_processor.get_vectorizer_features(cleaned_text)
|
| 96 |
+
plotter.plot_most_common_words(tf, tf_feature_names, file_name='judgment_most_common_words')
|
| 97 |
+
|
| 98 |
+
# # perform lda analysis, this takes a lot of time
|
| 99 |
+
# logging.info('Performing LDA analysis on the judgment')
|
| 100 |
+
# topic_modeler.perform_lda_analysis(cleaned_text, tf_vectorizer, file_name='judgment_lda_analysis')
|
| 101 |
+
|
| 102 |
+
# Visualise the 20 most common words in the summary
|
| 103 |
+
logging.info('Visualising the 20 most common words in the summary')
|
| 104 |
+
tf, tf_feature_names = text_processor.get_vectorizer_features(cleaned_summary)
|
| 105 |
+
plotter.plot_most_common_words(tf, tf_feature_names, file_name='summary_most_common_words')
|
| 106 |
+
|
| 107 |
+
# # perform lda analysis, this takes a lot of time
|
| 108 |
+
# logging.info('Performing LDA analysis on the summary')
|
| 109 |
+
# topic_modeler.perform_lda_analysis(cleaned_summary, tf_vectorizer, file_name='summary_lda_analysis')
|
| 110 |
+
|
| 111 |
+
# perform bertopic analysis
|
| 112 |
+
logging.info('Performing BERTopic analysis on the judgment and summary')
|
| 113 |
+
topic_modeler.perform_bertopic_analysis(cleaned_text=cleaned_text, cleaned_summary=cleaned_summary, output_path='bertopic/')
|
| 114 |
+
judgment_model, _ = topic_modeler.perform_bertopic_analysis(cleaned_text=cleaned_text, save_topic_info=False, output_path='bertopic/judgments/')
|
| 115 |
+
summary_model, _ = topic_modeler.perform_bertopic_analysis(cleaned_summary=cleaned_summary, save_topic_info=False, output_path='bertopic/summaries/')
|
| 116 |
+
|
| 117 |
+
# calculate topic overlap
|
| 118 |
+
logging.info('Calculating the topic overlap between the judgment and summary')
|
| 119 |
+
overlap_matrix = topic_modeler.calculate_overlap_matrix(judgment_model, summary_model)
|
| 120 |
+
|
| 121 |
+
# plot the overlap matrix
|
| 122 |
+
logging.info('Plotting the overlap matrix')
|
| 123 |
+
plotter.plot_overlap_heatmap(overlap_matrix, file_name='overlap_matrix')
|
downloads.sh
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
DATA_DIR=../data
|
| 4 |
+
URL=https://nlp.stanford.edu/data/glove.6B.zip
|
| 5 |
+
ZIP_FILE=$DATA_DIR/glove.6B.zip
|
| 6 |
+
UNZIPPED_FILE=$DATA_DIR/glove.6B.100d.txt
|
| 7 |
+
|
| 8 |
+
mkdir -p $DATA_DIR
|
| 9 |
+
|
| 10 |
+
if [ -f $UNZIPPED_FILE ]; then
|
| 11 |
+
echo "Files already unzipped in $DATA_DIR. Skipping download and extraction."
|
| 12 |
+
else
|
| 13 |
+
if [ ! -f $ZIP_FILE ]; then
|
| 14 |
+
echo "Downloading $URL..."
|
| 15 |
+
wget -N $URL -O $ZIP_FILE
|
| 16 |
+
else
|
| 17 |
+
echo "Zip file already exists at $ZIP_FILE. Skipping download."
|
| 18 |
+
fi
|
| 19 |
+
|
| 20 |
+
echo "Unzipping $ZIP_FILE to $DATA_DIR..."
|
| 21 |
+
unzip -o $ZIP_FILE -d $DATA_DIR
|
| 22 |
+
|
| 23 |
+
echo "Removing zip file $ZIP_FILE..."
|
| 24 |
+
rm $ZIP_FILE
|
| 25 |
+
fi
|
extract.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from tqdm.notebook import tqdm
|
| 5 |
+
from utils import FileManager, PDFExtractor
|
| 6 |
+
|
| 7 |
+
FileManager.unzip_data('../data/raw.zip', '../data')
|
| 8 |
+
|
| 9 |
+
directories = {
|
| 10 |
+
"with_summaries": {
|
| 11 |
+
"path": Path('../data/raw/with_summaries'),
|
| 12 |
+
"columns": ['id', 'type', 'year', 'main_judgement', 'media_summary'],
|
| 13 |
+
"has_summary": True
|
| 14 |
+
},
|
| 15 |
+
"without_summaries": {
|
| 16 |
+
"path": Path('../data/raw/without_summaries'),
|
| 17 |
+
"columns": ['id', 'type', 'year', 'main_judgement'],
|
| 18 |
+
"has_summary": False
|
| 19 |
+
}
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
for dir_key, dir_info in directories.items():
|
| 23 |
+
data = []
|
| 24 |
+
pdir = dir_info["path"]
|
| 25 |
+
|
| 26 |
+
for root, dirs, files in tqdm(os.walk(pdir)):
|
| 27 |
+
if not files:
|
| 28 |
+
continue
|
| 29 |
+
try:
|
| 30 |
+
dtails = Path(root).parts
|
| 31 |
+
record = [
|
| 32 |
+
dtails[-1].split('-')[0],
|
| 33 |
+
dtails[3],
|
| 34 |
+
dtails[4].split('-')[-1]
|
| 35 |
+
]
|
| 36 |
+
record.append(PDFExtractor.extract_text_from_pdf(f'{root}/main-judgement.pdf'))
|
| 37 |
+
if dir_info["has_summary"]:
|
| 38 |
+
record.append(PDFExtractor.extract_text_from_pdf(f'{root}/media-summary.pdf'))
|
| 39 |
+
|
| 40 |
+
data.append(record)
|
| 41 |
+
except Exception as e:
|
| 42 |
+
print(f"Skipping {root} due to error: {e}")
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
df = pd.DataFrame(data, columns=dir_info["columns"])
|
| 46 |
+
df.to_csv(f'../data/processed/judgments_{dir_key}.tsv', sep='\t', index=False)
|
raw.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eee3d7ba9f95645da5d59306d6e96f17f1f835b09472765e302145c891630138
|
| 3 |
+
size 1155703723
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nltk
|
| 2 |
+
gensim
|
| 3 |
+
PyMuPDF
|
| 4 |
+
bertopic
|
| 5 |
+
pyLDAvis
|
| 6 |
+
wordcloud
|
utils.py
ADDED
|
@@ -0,0 +1,954 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import fitz
|
| 2 |
+
import random
|
| 3 |
+
import logging
|
| 4 |
+
import zipfile
|
| 5 |
+
import re, string
|
| 6 |
+
import unicodedata
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import seaborn as sns
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
from scipy import stats
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
import matplotlib.pyplot as plt
|
| 14 |
+
from collections import Counter
|
| 15 |
+
|
| 16 |
+
import nltk
|
| 17 |
+
from nltk.tokenize import word_tokenize, sent_tokenize
|
| 18 |
+
|
| 19 |
+
import warnings
|
| 20 |
+
warnings.simplefilter("ignore", DeprecationWarning)
|
| 21 |
+
|
| 22 |
+
import pickle
|
| 23 |
+
import pyLDAvis
|
| 24 |
+
import pyLDAvis.lda_model as lda
|
| 25 |
+
|
| 26 |
+
from bertopic import BERTopic
|
| 27 |
+
from wordcloud import WordCloud
|
| 28 |
+
from sklearn.feature_extraction.text import CountVectorizer
|
| 29 |
+
|
| 30 |
+
from sklearn.decomposition import LatentDirichletAllocation as LDA
|
| 31 |
+
|
| 32 |
+
nltk.download('stopwords')
|
| 33 |
+
nltk.download('punkt')
|
| 34 |
+
nltk.download('averaged_perceptron_tagger')
|
| 35 |
+
nltk.download('universal_tagset')
|
| 36 |
+
|
| 37 |
+
tqdm.pandas()
|
| 38 |
+
plt.rcParams["font.family"] = "Tahoma"
|
| 39 |
+
sns.set_theme(style="whitegrid", font="Tahoma")
|
| 40 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 41 |
+
|
| 42 |
+
HOME_DIR = Path("..")
|
| 43 |
+
|
| 44 |
+
EXTRACTED_DATA_DIR = HOME_DIR / "data"
|
| 45 |
+
RAW_DATA_DIR = EXTRACTED_DATA_DIR / "raw"
|
| 46 |
+
PROCESSED_DATA_DIR = EXTRACTED_DATA_DIR / "processed"
|
| 47 |
+
GLOVE_EMBEDDINGS_FILE = EXTRACTED_DATA_DIR / "glove.6B.100d.txt"
|
| 48 |
+
|
| 49 |
+
DATA_ANALYSIS_PATH = HOME_DIR / "data_analysis"
|
| 50 |
+
FIGURES_DIR = DATA_ANALYSIS_PATH / "plots"
|
| 51 |
+
|
| 52 |
+
FIGURES_DIR.mkdir(parents=True, exist_ok=True)
|
| 53 |
+
POST_TAGS = ['ADJ','ADP','ADV','CONJ','DET','NOUN','NUM','PRT','PRON','VERB','.','X']
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class FileManager:
|
| 57 |
+
"""Handles file operations, including zip and unzipping folders and saving text to files."""
|
| 58 |
+
|
| 59 |
+
@staticmethod
|
| 60 |
+
def unzip_data(zip_path, extract_to):
|
| 61 |
+
"""
|
| 62 |
+
Unzips a ZIP file to a specified directory.
|
| 63 |
+
|
| 64 |
+
Parameters:
|
| 65 |
+
- zip_path (str or Path): Path to the ZIP file.
|
| 66 |
+
- extract_to (str or Path): Target directory to extract files to.
|
| 67 |
+
|
| 68 |
+
Raises:
|
| 69 |
+
- FileNotFoundError: If the ZIP file does not exist.
|
| 70 |
+
- RuntimeError: If the file is not a valid ZIP archive.
|
| 71 |
+
"""
|
| 72 |
+
zip_file = Path(zip_path)
|
| 73 |
+
extract_to = Path(extract_to)
|
| 74 |
+
if not zip_file.exists():
|
| 75 |
+
raise FileNotFoundError(f"ZIP file not found: {zip_file}")
|
| 76 |
+
|
| 77 |
+
target_dir = extract_to / zip_file.stem
|
| 78 |
+
if target_dir.exists():
|
| 79 |
+
logging.info(f"Directory already exists: {target_dir}")
|
| 80 |
+
return
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
|
| 84 |
+
zip_ref.extractall(target_dir)
|
| 85 |
+
logging.info(f"Extracted {zip_file} to {target_dir}")
|
| 86 |
+
except zipfile.BadZipFile as e:
|
| 87 |
+
raise RuntimeError(f"Invalid ZIP file: {zip_file}") from e
|
| 88 |
+
|
| 89 |
+
@staticmethod
|
| 90 |
+
def save_text(text, file_path):
|
| 91 |
+
"""
|
| 92 |
+
Saves text to a file.
|
| 93 |
+
|
| 94 |
+
Parameters:
|
| 95 |
+
- text (str): Text to save.
|
| 96 |
+
- file_path (str or Path): Target file path.
|
| 97 |
+
|
| 98 |
+
Raises:
|
| 99 |
+
- IOError: If writing to the file fails.
|
| 100 |
+
"""
|
| 101 |
+
file_path = Path(file_path)
|
| 102 |
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
| 103 |
+
try:
|
| 104 |
+
with open(file_path, 'w', encoding='utf-8') as file:
|
| 105 |
+
file.write(text)
|
| 106 |
+
logging.info(f"Saved text to {file_path}")
|
| 107 |
+
except IOError as e:
|
| 108 |
+
logging.error(f"Failed to save text to {file_path}: {e}")
|
| 109 |
+
raise
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class PDFExtractor:
|
| 113 |
+
"""Extracts and cleans text from PDF documents."""
|
| 114 |
+
|
| 115 |
+
@staticmethod
|
| 116 |
+
def extract_text(pdf_path):
|
| 117 |
+
"""
|
| 118 |
+
Extracts and processes text from a PDF file.
|
| 119 |
+
|
| 120 |
+
Parameters:
|
| 121 |
+
- pdf_path (str or Path): Path to the PDF file.
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
- str: Cleaned and processed text.
|
| 125 |
+
|
| 126 |
+
Raises:
|
| 127 |
+
- FileNotFoundError: If the PDF file does not exist.
|
| 128 |
+
- RuntimeError: If the PDF cannot be opened.
|
| 129 |
+
"""
|
| 130 |
+
pdf_path = Path(pdf_path)
|
| 131 |
+
|
| 132 |
+
if not pdf_path.exists():
|
| 133 |
+
logging.error(f"PDF file not found: {pdf_path}")
|
| 134 |
+
raise FileNotFoundError(f"PDF file not found: {pdf_path}")
|
| 135 |
+
|
| 136 |
+
try:
|
| 137 |
+
doc = fitz.open(pdf_path)
|
| 138 |
+
text_lines = [
|
| 139 |
+
PDFExtractor._clean_line(page.get_text("text"))
|
| 140 |
+
for page in doc
|
| 141 |
+
]
|
| 142 |
+
doc.close()
|
| 143 |
+
return '\n'.join(PDFExtractor._combine_paragraphs(text_lines))
|
| 144 |
+
except Exception as e:
|
| 145 |
+
logging.error(f"Error extracting text from {pdf_path}: {e}")
|
| 146 |
+
raise RuntimeError(f"Error extracting text from {pdf_path}: {e}")
|
| 147 |
+
|
| 148 |
+
@staticmethod
|
| 149 |
+
def _clean_line(text):
|
| 150 |
+
"""
|
| 151 |
+
Cleans a line of text by removing unwanted content.
|
| 152 |
+
|
| 153 |
+
Parameters:
|
| 154 |
+
- text (str): The text to clean.
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
- list: List of cleaned sentences.
|
| 158 |
+
"""
|
| 159 |
+
paragraphs = [line.strip() for line in sent_tokenize(text)]
|
| 160 |
+
return [p for p in paragraphs if not PDFExtractor._is_numeric_string(p)]
|
| 161 |
+
|
| 162 |
+
@staticmethod
|
| 163 |
+
def _combine_paragraphs(lines):
|
| 164 |
+
"""
|
| 165 |
+
Combines lines into paragraphs based on paragraph markers.
|
| 166 |
+
|
| 167 |
+
Parameters:
|
| 168 |
+
- lines (list of str): List of text lines.
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
- list: Combined paragraphs.
|
| 172 |
+
"""
|
| 173 |
+
combined = []
|
| 174 |
+
for line in lines:
|
| 175 |
+
if PDFExtractor._is_paragraph_marker(line):
|
| 176 |
+
if combined:
|
| 177 |
+
combined[-1] += f' {line}'
|
| 178 |
+
else:
|
| 179 |
+
combined.append(line)
|
| 180 |
+
else:
|
| 181 |
+
combined.append(line)
|
| 182 |
+
return combined
|
| 183 |
+
|
| 184 |
+
@staticmethod
|
| 185 |
+
def _is_numeric_string(string):
|
| 186 |
+
"""
|
| 187 |
+
Checks if a string is numeric and less than 1000.
|
| 188 |
+
|
| 189 |
+
Parameters:
|
| 190 |
+
- string (str): The string to check.
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
- bool: True if numeric and less than 1000, otherwise False.
|
| 194 |
+
"""
|
| 195 |
+
return string.isdigit() and int(string) < 1000
|
| 196 |
+
|
| 197 |
+
@staticmethod
|
| 198 |
+
def _is_paragraph_marker(line):
|
| 199 |
+
"""
|
| 200 |
+
Determines if a line is a paragraph marker.
|
| 201 |
+
|
| 202 |
+
Parameters:
|
| 203 |
+
- line (str): The line to check.
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
- bool: True if it matches paragraph marker criteria, otherwise False.
|
| 207 |
+
"""
|
| 208 |
+
return line.startswith("[") and line.endswith("]") and line[1:-1].isdigit()
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class DataLoader:
|
| 212 |
+
"""Loads and processes TSV data files into DataFrames."""
|
| 213 |
+
|
| 214 |
+
def __init__(self, base_dir=PROCESSED_DATA_DIR, file_extension="tsv"):
|
| 215 |
+
"""
|
| 216 |
+
Initialize the DataLoader.
|
| 217 |
+
|
| 218 |
+
Parameters:
|
| 219 |
+
- base_dir (Path): Base directory containing the processed data.
|
| 220 |
+
- file_extension (str): Extension of data files to read (default: 'tsv').
|
| 221 |
+
"""
|
| 222 |
+
self.base_dir = Path(base_dir)
|
| 223 |
+
self.file_extension = file_extension
|
| 224 |
+
|
| 225 |
+
def load_data(self, data_type, column_name=None):
|
| 226 |
+
"""
|
| 227 |
+
Load data based on the specified type.
|
| 228 |
+
|
| 229 |
+
Parameters:
|
| 230 |
+
- data_type (str): One of ['with_summaries', 'without_summaries', 'all'].
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
- pd.DataFrame: Concatenated DataFrame with a 'split' column.
|
| 234 |
+
"""
|
| 235 |
+
paths = {
|
| 236 |
+
'with_summaries': [self.base_dir / "with_summaries" / f"{split}.{self.file_extension}" for split in ['train', 'dev', 'test']],
|
| 237 |
+
'without_summaries': [self.base_dir / "without_summaries" / f"all_data.{self.file_extension}"],
|
| 238 |
+
'all': [self.base_dir / "without_summaries" / f"all_data.{self.file_extension}"] +
|
| 239 |
+
[self.base_dir / "with_summaries" / f"{split}.{self.file_extension}" for split in ['train', 'dev', 'test']]
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
if data_type not in paths:
|
| 243 |
+
raise ValueError(f"Invalid data type specified: {data_type}. Expected one of {list(paths.keys())}.")
|
| 244 |
+
|
| 245 |
+
valid_paths = [path for path in paths[data_type] if path.exists()]
|
| 246 |
+
missing_paths = [path for path in paths[data_type] if not path.exists()]
|
| 247 |
+
|
| 248 |
+
if missing_paths:
|
| 249 |
+
logging.warning(f"Missing files: {missing_paths}")
|
| 250 |
+
|
| 251 |
+
if not valid_paths:
|
| 252 |
+
raise FileNotFoundError("No valid data files found to load.")
|
| 253 |
+
|
| 254 |
+
if column_name:
|
| 255 |
+
return self._read_files(valid_paths)[column_name]
|
| 256 |
+
|
| 257 |
+
return self._read_files(valid_paths)
|
| 258 |
+
|
| 259 |
+
@staticmethod
|
| 260 |
+
def _read_files(paths):
|
| 261 |
+
"""
|
| 262 |
+
Read and concatenate data files into a single DataFrame.
|
| 263 |
+
|
| 264 |
+
Parameters:
|
| 265 |
+
- paths (list of Path): Paths to the files to read.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
- pd.DataFrame: Combined DataFrame with a 'split' column.
|
| 269 |
+
"""
|
| 270 |
+
df_list = []
|
| 271 |
+
for path in paths:
|
| 272 |
+
logging.info(f"Loading file: {path}")
|
| 273 |
+
try:
|
| 274 |
+
df = pd.read_csv(path, sep='\t')
|
| 275 |
+
df['split'] = path.stem
|
| 276 |
+
df_list.append(df)
|
| 277 |
+
except Exception as e:
|
| 278 |
+
logging.error(f"Failed to read {path}: {e}")
|
| 279 |
+
|
| 280 |
+
return pd.concat(df_list, ignore_index=True) if df_list else pd.DataFrame()
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class GloveVectorizer:
|
| 284 |
+
"""
|
| 285 |
+
Maps words to GloVe embeddings and computes sentence embeddings
|
| 286 |
+
by averaging word vectors.
|
| 287 |
+
"""
|
| 288 |
+
|
| 289 |
+
def __init__(self, embedding_file):
|
| 290 |
+
"""
|
| 291 |
+
Initializes the vectorizer with GloVe embeddings.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
embedding_file (str): Path to the GloVe embedding file.
|
| 295 |
+
"""
|
| 296 |
+
self.word2vec = {}
|
| 297 |
+
self.embedding = []
|
| 298 |
+
self.idx2word = []
|
| 299 |
+
|
| 300 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
|
| 301 |
+
|
| 302 |
+
try:
|
| 303 |
+
logging.info("Loading word vectors...")
|
| 304 |
+
with open(embedding_file, encoding='utf-8') as f:
|
| 305 |
+
for line in f:
|
| 306 |
+
values = line.split()
|
| 307 |
+
word = values[0]
|
| 308 |
+
vec = np.asarray(values[1:], dtype='float32')
|
| 309 |
+
self.word2vec[word] = vec
|
| 310 |
+
self.embedding.append(vec)
|
| 311 |
+
self.idx2word.append(word)
|
| 312 |
+
|
| 313 |
+
self.embedding = np.array(self.embedding)
|
| 314 |
+
self.word2idx = {word: idx for idx, word in enumerate(self.idx2word)}
|
| 315 |
+
self.V, self.D = self.embedding.shape
|
| 316 |
+
logging.info(f"Found {len(self.word2vec)} word vectors.")
|
| 317 |
+
except FileNotFoundError:
|
| 318 |
+
logging.error(f"Embedding file '{embedding_file}' not found.")
|
| 319 |
+
raise FileNotFoundError(f"Embedding file '{embedding_file}' not found.")
|
| 320 |
+
except Exception as e:
|
| 321 |
+
logging.error(f"Error loading embeddings: {e}")
|
| 322 |
+
raise RuntimeError(f"Error loading embeddings: {e}")
|
| 323 |
+
|
| 324 |
+
def fit(self, data):
|
| 325 |
+
"""Placeholder for potential future implementation."""
|
| 326 |
+
pass
|
| 327 |
+
|
| 328 |
+
def get_vocabulary(self):
|
| 329 |
+
"""
|
| 330 |
+
Returns the vocabulary of the embeddings.
|
| 331 |
+
|
| 332 |
+
Returns:
|
| 333 |
+
list: A list of all words in the GloVe vocabulary.
|
| 334 |
+
"""
|
| 335 |
+
return list(self.word2vec.keys())
|
| 336 |
+
|
| 337 |
+
def transform(self, data, return_unknowns=False):
|
| 338 |
+
"""
|
| 339 |
+
Transforms a list of sentences into mean GloVe embeddings.
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
data (list of str): Sentences to transform.
|
| 343 |
+
return_unknowns (bool): If True, also return unknown words.
|
| 344 |
+
|
| 345 |
+
Returns:
|
| 346 |
+
np.ndarray: Mean GloVe embeddings for each sentence.
|
| 347 |
+
list: (Optional) List of unknown words for each sentence.
|
| 348 |
+
"""
|
| 349 |
+
X = np.zeros((len(data), self.D))
|
| 350 |
+
unknown_words = []
|
| 351 |
+
emptycount = 0
|
| 352 |
+
|
| 353 |
+
for n, sentence in enumerate(data):
|
| 354 |
+
tokens = sentence.lower().split()
|
| 355 |
+
vecs = []
|
| 356 |
+
unknowns = []
|
| 357 |
+
|
| 358 |
+
for word in tokens:
|
| 359 |
+
if word in self.word2vec:
|
| 360 |
+
vecs.append(self.word2vec[word])
|
| 361 |
+
else:
|
| 362 |
+
unknowns.append(word)
|
| 363 |
+
|
| 364 |
+
if vecs:
|
| 365 |
+
vecs = np.array(vecs)
|
| 366 |
+
X[n] = vecs.mean(axis=0)
|
| 367 |
+
else:
|
| 368 |
+
emptycount += 1
|
| 369 |
+
|
| 370 |
+
if return_unknowns:
|
| 371 |
+
unknown_words.append(unknowns)
|
| 372 |
+
|
| 373 |
+
if emptycount > 0:
|
| 374 |
+
print(f"Warning: {emptycount} sentences had no known words.")
|
| 375 |
+
|
| 376 |
+
return (X, unknown_words) if return_unknowns else X
|
| 377 |
+
|
| 378 |
+
def fit_transform(self, data, return_unknowns=False):
|
| 379 |
+
"""
|
| 380 |
+
Fits and transforms the data.
|
| 381 |
+
|
| 382 |
+
Args:
|
| 383 |
+
data (list of str): Sentences to transform.
|
| 384 |
+
return_unknowns (bool): If True, also return unknown words.
|
| 385 |
+
|
| 386 |
+
Returns:
|
| 387 |
+
np.ndarray: Mean GloVe embeddings for each sentence.
|
| 388 |
+
list: (Optional) List of unknown words for each sentence.
|
| 389 |
+
"""
|
| 390 |
+
self.fit(data)
|
| 391 |
+
return self.transform(data, return_unknowns)
|
| 392 |
+
|
| 393 |
+
class TextProcessor:
|
| 394 |
+
"""Processes text data for analysis and visualization."""
|
| 395 |
+
|
| 396 |
+
def __init__(self, data_loader):
|
| 397 |
+
self.data_loader = data_loader
|
| 398 |
+
|
| 399 |
+
@staticmethod
|
| 400 |
+
def tokenize_stats(df, col_name, tokenize_type):
|
| 401 |
+
tokenizer = sent_tokenize if tokenize_type == 'sent' else word_tokenize
|
| 402 |
+
stats = df[col_name].dropna().apply(lambda x: len(tokenizer(x)))
|
| 403 |
+
return stats
|
| 404 |
+
|
| 405 |
+
@staticmethod
|
| 406 |
+
def get_punctuation():
|
| 407 |
+
return string.punctuation
|
| 408 |
+
|
| 409 |
+
@staticmethod
|
| 410 |
+
def get_stopwords(language='english'):
|
| 411 |
+
return set(nltk.corpus.stopwords.words(language))
|
| 412 |
+
|
| 413 |
+
@staticmethod
|
| 414 |
+
def unicode_to_ascii(s):
|
| 415 |
+
return ''.join(c for c in unicodedata.normalize('NFD', s)
|
| 416 |
+
if unicodedata.category(c) != 'Mn')
|
| 417 |
+
|
| 418 |
+
@staticmethod
|
| 419 |
+
def count_stopwords(text, stopwords):
|
| 420 |
+
word_tokens = word_tokenize(text)
|
| 421 |
+
stopwords_x = [w for w in word_tokens if w in stopwords]
|
| 422 |
+
return len(stopwords_x)
|
| 423 |
+
|
| 424 |
+
@staticmethod
|
| 425 |
+
def replace_punctuation(text, punctuation):
|
| 426 |
+
table = str.maketrans(punctuation, ' ' * len(punctuation))
|
| 427 |
+
return text.translate(table)
|
| 428 |
+
|
| 429 |
+
@staticmethod
|
| 430 |
+
def get_unknown_words(text, vocab):
|
| 431 |
+
tokens = word_tokenize(text)
|
| 432 |
+
unknown = [t for t in tokens if t not in vocab.word2vec]
|
| 433 |
+
return unknown
|
| 434 |
+
|
| 435 |
+
@staticmethod
|
| 436 |
+
def get_pos_tags(sentences, columns, data_type, tagset='universal'):
|
| 437 |
+
''' Extract the part-of-speech taggings of the sentence
|
| 438 |
+
Input:
|
| 439 |
+
- sentence: string, sentence to tag
|
| 440 |
+
- tagset: string, tagset or the set of tags to search for
|
| 441 |
+
'''
|
| 442 |
+
tags = []
|
| 443 |
+
columns = [f'{data_type}_{c}' for c in columns]
|
| 444 |
+
for sent in tqdm(sentences):
|
| 445 |
+
pos_tags = Counter([j for _,j in nltk.pos_tag(word_tokenize(sent), tagset=tagset)])
|
| 446 |
+
pos_tags = {f'{data_type}_{k}':v for k,v in dict(pos_tags).items()}
|
| 447 |
+
tags.append(pos_tags)
|
| 448 |
+
|
| 449 |
+
return pd.DataFrame(tags, columns=columns).fillna(0)
|
| 450 |
+
|
| 451 |
+
def remove_stopwords(self, df, target_columns=None):
|
| 452 |
+
''' Apply some basic techniques for cleaning a text for an analysis of words
|
| 453 |
+
|
| 454 |
+
Input:
|
| 455 |
+
- text: text to be cleaned
|
| 456 |
+
Output:
|
| 457 |
+
- result: cleaned text
|
| 458 |
+
'''
|
| 459 |
+
def clean_text(text, stopwords):
|
| 460 |
+
text = text.lower()
|
| 461 |
+
pattern = r'[^a-zA-Z\s]'
|
| 462 |
+
text = re.sub(pattern, '', text)
|
| 463 |
+
|
| 464 |
+
tokens = nltk.word_tokenize(text)
|
| 465 |
+
tokens = [token.strip() for token in tokens]
|
| 466 |
+
text = ' '.join([token for token in tokens if token not in stopwords])
|
| 467 |
+
return text
|
| 468 |
+
|
| 469 |
+
if target_columns:
|
| 470 |
+
logging.info(f"Removing stopwords for columns: {target_columns}")
|
| 471 |
+
stopwords = self.get_stopwords()
|
| 472 |
+
cleaned_text = []
|
| 473 |
+
for col in target_columns:
|
| 474 |
+
cleaned_text.append(df[col].progress_apply(lambda x: clean_text(x, stopwords)).tolist())
|
| 475 |
+
return cleaned_text
|
| 476 |
+
|
| 477 |
+
def prepare_text(self, df, target_columns=None, drop_duplicates=True, drop_na=True):
|
| 478 |
+
if target_columns and len(target_columns) == 2:
|
| 479 |
+
logging.info(f"Preparing text data for columns: {target_columns}")
|
| 480 |
+
try:
|
| 481 |
+
df = df[target_columns]
|
| 482 |
+
except KeyError as e:
|
| 483 |
+
logging.error(f"Invalid columns specified: {e}")
|
| 484 |
+
raise ValueError(f"Invalid columns specified: {e}")
|
| 485 |
+
if drop_duplicates:
|
| 486 |
+
df.drop_duplicates(subset=target_columns[0], inplace=True)
|
| 487 |
+
logging.info(f"Dropped duplicates, new shape: {df.shape}")
|
| 488 |
+
if drop_na:
|
| 489 |
+
df.dropna(inplace=True)
|
| 490 |
+
logging.info(f"Dropped NA values, new shape: {df.shape}")
|
| 491 |
+
df.reset_index(drop=True, inplace=True)
|
| 492 |
+
df.columns = ['text', 'summary']
|
| 493 |
+
logging.info(f"Renamed columns to 'text' and 'summary'")
|
| 494 |
+
|
| 495 |
+
logging.info("Cleaning unicode characters and extra spaces...")
|
| 496 |
+
df['text'] = df['text'].apply(lambda x: self.unicode_to_ascii(x.strip()))
|
| 497 |
+
df['summary'] = df['summary'].apply(lambda x: self.unicode_to_ascii(x.strip()))
|
| 498 |
+
|
| 499 |
+
logging.info(f"Data prepared, new shape: {df.shape}")
|
| 500 |
+
|
| 501 |
+
return df
|
| 502 |
+
else:
|
| 503 |
+
logging.error("Invalid columns or number of target columns specified.")
|
| 504 |
+
raise ValueError('No target columns specified, or invalid number of columns.')
|
| 505 |
+
|
| 506 |
+
def get_vectorizer_features(self, texts, max_df=0.9, min_df=25, max_features=5000):
|
| 507 |
+
tf_vectorizer = CountVectorizer(max_df=max_df, min_df=min_df, max_features=max_features)
|
| 508 |
+
tf = tf_vectorizer.fit_transform(texts)
|
| 509 |
+
tf_feature_names = tf_vectorizer.get_feature_names_out()
|
| 510 |
+
return tf, tf_feature_names
|
| 511 |
+
|
| 512 |
+
def get_all_stats(self, df):
|
| 513 |
+
"""
|
| 514 |
+
Generate and add statistical metrics for text and summary columns in a DataFrame.
|
| 515 |
+
|
| 516 |
+
Parameters:
|
| 517 |
+
df (pd.DataFrame): Input DataFrame containing 'text' and 'summary' columns.
|
| 518 |
+
|
| 519 |
+
Returns:
|
| 520 |
+
pd.DataFrame: DataFrame with added statistical columns.
|
| 521 |
+
"""
|
| 522 |
+
punc = self.get_punctuation()
|
| 523 |
+
stopwords = self.get_stopwords()
|
| 524 |
+
vocab = GloveVectorizer(GLOVE_EMBEDDINGS_FILE)
|
| 525 |
+
|
| 526 |
+
def add_stat_column(column_name, compute_func, *args, **kwargs):
|
| 527 |
+
if column_name not in df.columns:
|
| 528 |
+
logging.info(f"Calculating {column_name}...")
|
| 529 |
+
df[column_name] = compute_func(*args, **kwargs)
|
| 530 |
+
else:
|
| 531 |
+
logging.info(f"{column_name} already present in stats, skipping...")
|
| 532 |
+
|
| 533 |
+
logging.info("Calculating text statistics (sentences, tokens, characters, etc.)...")
|
| 534 |
+
add_stat_column('text_sent_count', self.tokenize_stats, df, 'text', 'sent')
|
| 535 |
+
add_stat_column('text_word_count', self.tokenize_stats, df, 'text', 'word')
|
| 536 |
+
add_stat_column('text_char_count', lambda x: x['text'].progress_apply(lambda t: len(t.replace(" ", ""))), df)
|
| 537 |
+
add_stat_column('text_sent_density', lambda x: x['text_sent_count'] / (x['text_word_count'] + 1), df)
|
| 538 |
+
add_stat_column('text_word_density', lambda x: x['text_word_count'] / (x['text_char_count'] + 1), df)
|
| 539 |
+
add_stat_column('text_punc_count', lambda x: x['text'].progress_apply(lambda t: sum(1 for char in t if char in punc)), df)
|
| 540 |
+
add_stat_column('text_stopw_count', lambda x: x['text'].progress_apply(lambda t: self.count_stopwords(t, stopwords)), df)
|
| 541 |
+
add_stat_column('text_unknown_words', lambda x: x['text'].progress_apply(lambda t: self.get_unknown_words(self.replace_punctuation(t.lower(), string.punctuation), vocab)), df)
|
| 542 |
+
add_stat_column('text_unknown_count', lambda x: x['text_unknown_words'].progress_apply(lambda t: len(t) if isinstance(t, list) else 0), df)
|
| 543 |
+
|
| 544 |
+
logging.info("Calculating summary statistics (sentences, tokens, characters, etc.)...")
|
| 545 |
+
add_stat_column('sum_sent_count', self.tokenize_stats, df, 'summary', 'sent')
|
| 546 |
+
add_stat_column('sum_word_count', self.tokenize_stats, df, 'summary', 'word')
|
| 547 |
+
add_stat_column('sum_char_count', lambda x: x['summary'].progress_apply(lambda t: len(t.replace(" ", ""))), df)
|
| 548 |
+
add_stat_column('sum_sent_density', lambda x: x['sum_sent_count'] / (x['sum_word_count'] + 1), df)
|
| 549 |
+
add_stat_column('sum_word_density', lambda x: x['sum_word_count'] / (x['sum_char_count'] + 1), df)
|
| 550 |
+
add_stat_column('sum_punc_count', lambda x: x['summary'].progress_apply(lambda t: sum(1 for char in t if char in punc)), df)
|
| 551 |
+
add_stat_column('sum_stopw_count', lambda x: x['summary'].progress_apply(lambda t: self.count_stopwords(t, stopwords)), df)
|
| 552 |
+
add_stat_column('sum_unknown_words', lambda x: x['summary'].progress_apply(lambda t: self.get_unknown_words(self.replace_punctuation(t.lower(), string.punctuation), vocab)), df)
|
| 553 |
+
add_stat_column('sum_unknown_count', lambda x: x['sum_unknown_words'].progress_apply(lambda t: len(t) if isinstance(t, list) else 0), df)
|
| 554 |
+
|
| 555 |
+
logging.info("Adding POS tags for text and summary...")
|
| 556 |
+
text_columns = [f'text_{p}' for p in POST_TAGS]
|
| 557 |
+
if not all(col in df.columns for col in text_columns):
|
| 558 |
+
df = pd.concat([df, self.get_pos_tags(df['text'], POST_TAGS, 'text')], axis=1)
|
| 559 |
+
else:
|
| 560 |
+
logging.info("Text POS tags already present in stats, skipping...")
|
| 561 |
+
sum_columns = [f'sum_{p}' for p in POST_TAGS]
|
| 562 |
+
if not all(col in df.columns for col in sum_columns):
|
| 563 |
+
df = pd.concat([df, self.get_pos_tags(df['summary'], POST_TAGS, 'sum')], axis=1)
|
| 564 |
+
else:
|
| 565 |
+
logging.info("Summary POS tags already present in stats, skipping...")
|
| 566 |
+
|
| 567 |
+
logging.info("All statistics have been calculated successfully.")
|
| 568 |
+
return df
|
| 569 |
+
|
| 570 |
+
class SCAPlotter:
|
| 571 |
+
"""Generates plots for data visualization."""
|
| 572 |
+
|
| 573 |
+
def __init__(self):
|
| 574 |
+
self.labels_dict = {
|
| 575 |
+
'sum_word_count': 'Word Count of Summaries', 'text_word_count': 'Word Count of Judgments',
|
| 576 |
+
'sum_char_count': 'Chararacter Count of Summaries', 'text_char_count': 'Chararacter Count of Judgments',
|
| 577 |
+
'sum_word_density': 'Word Density of Summaries', 'text_word_density': 'Word Density of Judgments',
|
| 578 |
+
'sum_punc_count': 'Punctuation Count of Summaries', 'text_punc_count': 'Punctuation Count of Judgments',
|
| 579 |
+
'text_sent_count': 'Sentence Count of Judgments', 'sum_sent_count': 'Sentence Count of Summaries',
|
| 580 |
+
'text_sent_density': 'Sentence Density of Judgments', 'sum_sent_density': 'Sentence Density of Summaries',
|
| 581 |
+
'text_stopw_count': 'Stopwords Count of Judgments', 'sum_stopw_count': 'Stopwords Count of Summaries',
|
| 582 |
+
'ADJ': 'adjective','ADP': 'adposition', 'ADV': 'adverb','CONJ': 'conjunction',
|
| 583 |
+
'DET': 'determiner','NOUN': 'noun', 'text_unknown_count': 'Unknown words in Judgments',
|
| 584 |
+
'sum_unknown_count': 'Unknown words in Summaries'
|
| 585 |
+
}
|
| 586 |
+
|
| 587 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
|
| 588 |
+
|
| 589 |
+
def plot_case_distribution(self, df):
|
| 590 |
+
plt.figure(figsize=(7.5, 6))
|
| 591 |
+
sns.countplot(data=df, x='type', hue='type', palette='muted', width=0.5)
|
| 592 |
+
plt.ylabel('Number of Cases')
|
| 593 |
+
plt.xlabel('Case Type')
|
| 594 |
+
plt.xticks(rotation=0)
|
| 595 |
+
plt.savefig(FIGURES_DIR / 'number_of_cases_by_type.png')
|
| 596 |
+
plt.close()
|
| 597 |
+
|
| 598 |
+
def plot_summary_vs_judgment_length(self, df):
|
| 599 |
+
slope, intercept, _, _, _ = stats.linregress(df['text_word_count'], df['sum_word_count'])
|
| 600 |
+
plt.figure(figsize=(7.5, 6))
|
| 601 |
+
sns.scatterplot(x='text_word_count', y='sum_word_count', data=df, s=10, label='Data', color="dodgerblue")
|
| 602 |
+
|
| 603 |
+
plt.xlabel('Judgment Length')
|
| 604 |
+
plt.ylabel('Summary Length')
|
| 605 |
+
plt.plot(df['text_word_count'], intercept + slope * df['text_word_count'], 'b', label=f'Best Fit: y = {slope:.2f}x + {intercept:.2f}')
|
| 606 |
+
self._add_capacity_shading(df['text_word_count'], df['sum_word_count'])
|
| 607 |
+
plt.legend()
|
| 608 |
+
plt.savefig(FIGURES_DIR / 'data_summary_lengths.png')
|
| 609 |
+
|
| 610 |
+
plt.close()
|
| 611 |
+
|
| 612 |
+
def plot_length_distribution(self, df, columns, plot_histogram=True, plot_boxplots=True, file_name='stats'):
|
| 613 |
+
if plot_histogram or plot_boxplots:
|
| 614 |
+
if plot_histogram:
|
| 615 |
+
self._plot_histograms(
|
| 616 |
+
df,
|
| 617 |
+
np.array([columns]),
|
| 618 |
+
self.labels_dict,
|
| 619 |
+
show_kde=False,
|
| 620 |
+
output_path=FIGURES_DIR / f'{file_name}_histograms.png'
|
| 621 |
+
)
|
| 622 |
+
if plot_boxplots:
|
| 623 |
+
self._plot_boxplots(
|
| 624 |
+
df,
|
| 625 |
+
np.array([columns]),
|
| 626 |
+
self.labels_dict,
|
| 627 |
+
output_path=FIGURES_DIR / f'{file_name}_boxplots.png'
|
| 628 |
+
)
|
| 629 |
+
else:
|
| 630 |
+
raise ValueError('No plots selected to be generated.')
|
| 631 |
+
|
| 632 |
+
def plot_most_common_words(self, count_data, words, figsize=(15, 7), no_words=20, file_name=None, show_plot=False):
|
| 633 |
+
"""
|
| 634 |
+
Draw a barplot showing the most common words in the data.
|
| 635 |
+
|
| 636 |
+
Parameters:
|
| 637 |
+
- count_data (sparse matrix): Document-term matrix containing word occurrences.
|
| 638 |
+
- count_vectorizer (CountVectorizer): Fitted CountVectorizer object.
|
| 639 |
+
- figsize (tuple): Figure size for the plot.
|
| 640 |
+
- no_words (int): Number of most common words to display.
|
| 641 |
+
- output_path (str): Path to save the plot.
|
| 642 |
+
"""
|
| 643 |
+
total_counts = np.zeros(len(words))
|
| 644 |
+
for t in count_data:
|
| 645 |
+
total_counts += t.toarray()[0]
|
| 646 |
+
|
| 647 |
+
count_dict = sorted(zip(words, total_counts), key=lambda x: x[1], reverse=True)[:no_words]
|
| 648 |
+
words = [w[0] for w in count_dict]
|
| 649 |
+
counts = [w[1] for w in count_dict]
|
| 650 |
+
x_pos = np.arange(len(words))
|
| 651 |
+
|
| 652 |
+
plt.figure(figsize=figsize)
|
| 653 |
+
plt.subplot(title=f'{no_words} most common words')
|
| 654 |
+
sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5})
|
| 655 |
+
sns.barplot(x=x_pos, y=counts, palette='husl')
|
| 656 |
+
plt.xticks(x_pos, words, rotation=45)
|
| 657 |
+
plt.ylabel('Frequency')
|
| 658 |
+
plt.tight_layout()
|
| 659 |
+
if file_name:
|
| 660 |
+
plt.savefig(FIGURES_DIR / f'{file_name}.png')
|
| 661 |
+
if show_plot:
|
| 662 |
+
plt.show()
|
| 663 |
+
plt.close()
|
| 664 |
+
|
| 665 |
+
def plot_bertopic_visualizations(self, model, output_path):
|
| 666 |
+
"""
|
| 667 |
+
Generate and save BERTopic visualizations.
|
| 668 |
+
"""
|
| 669 |
+
fig = model.visualize_barchart(top_n_topics=12)
|
| 670 |
+
fig.write_html(output_path / "topic_barchart.html")
|
| 671 |
+
|
| 672 |
+
hierarchical_fig = model.visualize_hierarchy()
|
| 673 |
+
hierarchical_fig.write_html(output_path / "topic_hierarchy.html")
|
| 674 |
+
|
| 675 |
+
heatmap_fig = model.visualize_heatmap()
|
| 676 |
+
heatmap_fig.write_html(output_path / "topic_heatmap.html")
|
| 677 |
+
|
| 678 |
+
word_cloud_fig = model.visualize_topics()
|
| 679 |
+
word_cloud_fig.write_html(output_path / "topic_wordcloud.html")
|
| 680 |
+
|
| 681 |
+
def plot_overlap_heatmap(self, overlap_matrix, file_name=None):
|
| 682 |
+
"""
|
| 683 |
+
Plot a heatmap for the overlap matrix.
|
| 684 |
+
|
| 685 |
+
Parameters:
|
| 686 |
+
overlap_matrix (np.array): Overlap matrix between judgment and summary topics.
|
| 687 |
+
output_path (str): Path to save the heatmap.
|
| 688 |
+
"""
|
| 689 |
+
plt.figure(figsize=(12, 8))
|
| 690 |
+
sns.heatmap(overlap_matrix, annot=False, cmap="coolwarm", cbar=True)
|
| 691 |
+
plt.title("Topic Overlap Between Judgments and Summaries")
|
| 692 |
+
plt.xlabel("Summary Topics")
|
| 693 |
+
plt.ylabel("Judgment Topics")
|
| 694 |
+
plt.savefig(FIGURES_DIR / f'{file_name}.png')
|
| 695 |
+
plt.close()
|
| 696 |
+
|
| 697 |
+
def plot_wordcloud(self, texts, background_color="white", max_words=1000, contour_width=3, contour_color='steelblue', file_name='wordcloud'):
|
| 698 |
+
long_string = ','.join(texts)
|
| 699 |
+
wordcloud = WordCloud(background_color=background_color, max_words=max_words, contour_width=contour_width, contour_color=contour_color)
|
| 700 |
+
wordcloud.generate(long_string)
|
| 701 |
+
wordcloud.to_image()
|
| 702 |
+
wordcloud.to_file(FIGURES_DIR / f'{file_name}.png')
|
| 703 |
+
|
| 704 |
+
def plot_lda_results(self, lda_model, tf, tf_vectorizer, file_name='lda_topics'):
|
| 705 |
+
LDAvis_prepared = lda.prepare(lda_model, tf, tf_vectorizer)
|
| 706 |
+
|
| 707 |
+
with open(FIGURES_DIR / f'{file_name}.pkl', 'wb') as f:
|
| 708 |
+
pickle.dump(LDAvis_prepared, f)
|
| 709 |
+
|
| 710 |
+
with open(FIGURES_DIR / f'{file_name}.pkl', 'rb') as f:
|
| 711 |
+
LDAvis_prepared = pickle.load(f)
|
| 712 |
+
|
| 713 |
+
pyLDAvis.save_html(LDAvis_prepared, FIGURES_DIR / f'{file_name}.html')
|
| 714 |
+
|
| 715 |
+
@staticmethod
|
| 716 |
+
def _plot_boxplots(data, plot_vars, labels, figsize=(15, 5), output_path=None, show_plot=False):
|
| 717 |
+
"""
|
| 718 |
+
Plot boxplots for the specified variables with appropriate labels.
|
| 719 |
+
|
| 720 |
+
Parameters:
|
| 721 |
+
- data (pd.DataFrame): The data points to plot.
|
| 722 |
+
- plot_vars (array-like): A (1, x) or (n, m) array containing column names to plot.
|
| 723 |
+
- labels (dict): A dictionary mapping column names to their respective labels.
|
| 724 |
+
- figsize (tuple): The size of the figure (default: (15, 5)).
|
| 725 |
+
- output_path (str, optional): File path to save the plot.
|
| 726 |
+
- show_plot (bool, optional): Whether to display the plot.
|
| 727 |
+
|
| 728 |
+
Returns:
|
| 729 |
+
- None
|
| 730 |
+
"""
|
| 731 |
+
plot_vars = np.atleast_2d(plot_vars)
|
| 732 |
+
nrows, ncols = plot_vars.shape
|
| 733 |
+
|
| 734 |
+
fig, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
|
| 735 |
+
|
| 736 |
+
for i in range(nrows):
|
| 737 |
+
for j in range(ncols):
|
| 738 |
+
var = plot_vars[i, j]
|
| 739 |
+
ax = axes[i, j]
|
| 740 |
+
|
| 741 |
+
if var is not None:
|
| 742 |
+
ax.set_title(labels.get(var, var))
|
| 743 |
+
ax.grid(True)
|
| 744 |
+
ax.tick_params(
|
| 745 |
+
axis='x',
|
| 746 |
+
which='both',
|
| 747 |
+
bottom=False,
|
| 748 |
+
top=False,
|
| 749 |
+
labelbottom=False
|
| 750 |
+
)
|
| 751 |
+
if var in data.columns:
|
| 752 |
+
ax.boxplot(data[var])
|
| 753 |
+
else:
|
| 754 |
+
ax.set_visible(False)
|
| 755 |
+
else:
|
| 756 |
+
ax.set_visible(False)
|
| 757 |
+
|
| 758 |
+
fig.tight_layout()
|
| 759 |
+
|
| 760 |
+
if output_path:
|
| 761 |
+
plt.savefig(output_path)
|
| 762 |
+
if show_plot:
|
| 763 |
+
plt.show()
|
| 764 |
+
plt.close()
|
| 765 |
+
|
| 766 |
+
@staticmethod
|
| 767 |
+
def _plot_histograms(data, plot_vars, labels, figsize=(15,5), show_kde=False, output_path=None, show_plot=False):
|
| 768 |
+
''' Function to plot the histograms of the variables in plot_vars
|
| 769 |
+
Input:
|
| 770 |
+
- data: a dataframe, containing the data points to plot
|
| 771 |
+
- plot_vars: a (1,x) array, containing the columns to plot
|
| 772 |
+
- xlim: a list, defines the max x value for every column to plot
|
| 773 |
+
- labels: a dictionary, to map the column names to its label
|
| 774 |
+
- figsize: a tuple, indicating the size of the figure
|
| 775 |
+
- show_kde: a boolean, indicating if the kde should be shown
|
| 776 |
+
- output_path: a string, indicating the path to save the file
|
| 777 |
+
'''
|
| 778 |
+
fig, axes = plt.subplots(1, plot_vars.shape[1], figsize=figsize, sharey=False, dpi=100)
|
| 779 |
+
|
| 780 |
+
if plot_vars.shape[1] == 1:
|
| 781 |
+
axes = [axes]
|
| 782 |
+
|
| 783 |
+
for i in range(plot_vars.shape[1]):
|
| 784 |
+
color = (random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
|
| 785 |
+
|
| 786 |
+
sns.histplot(
|
| 787 |
+
data[plot_vars[0, i]],
|
| 788 |
+
color=color,
|
| 789 |
+
ax=axes[i],
|
| 790 |
+
bins=50,
|
| 791 |
+
kde=show_kde,
|
| 792 |
+
)
|
| 793 |
+
|
| 794 |
+
x_label = plot_vars[0, i].replace('sent', 'sentence')
|
| 795 |
+
axes[i].set_xlabel(' '.join([l.capitalize() for l in x_label.split('_')[1:]]))
|
| 796 |
+
axes[i].set_ylabel('Frequency')
|
| 797 |
+
|
| 798 |
+
axes[i].set_title(labels[plot_vars[0, i]])
|
| 799 |
+
|
| 800 |
+
fig.tight_layout()
|
| 801 |
+
if output_path:
|
| 802 |
+
plt.savefig(output_path)
|
| 803 |
+
if show_plot:
|
| 804 |
+
plt.show()
|
| 805 |
+
plt.close()
|
| 806 |
+
|
| 807 |
+
@staticmethod
|
| 808 |
+
def _add_capacity_shading(input_stats, output_stats):
|
| 809 |
+
model_input_length, model_output_length = 16384, 1024
|
| 810 |
+
plt.gca().add_patch(
|
| 811 |
+
plt.Rectangle((0, 0), model_input_length, max(output_stats) + 50,
|
| 812 |
+
color='red', alpha=0.3, linestyle='--', linewidth=1.5,
|
| 813 |
+
label=f"Judgments accommodated: {len([x for x in input_stats if x < model_input_length]):,}")
|
| 814 |
+
)
|
| 815 |
+
plt.gca().add_patch(
|
| 816 |
+
plt.Rectangle((0, 0), max(input_stats) + 400, model_output_length,
|
| 817 |
+
color='green', alpha=0.3, linestyle='-', linewidth=1.5,
|
| 818 |
+
label=f"Summaries accommodated: {len([y for y in output_stats if y < model_output_length]):,}")
|
| 819 |
+
)
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
class TopicModeling:
|
| 823 |
+
"""
|
| 824 |
+
Class to perform topic modeling using LDA, UMAP, and HDBSCAN.
|
| 825 |
+
"""
|
| 826 |
+
|
| 827 |
+
def __init__(self):
|
| 828 |
+
self.plotter = SCAPlotter()
|
| 829 |
+
|
| 830 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
|
| 831 |
+
|
| 832 |
+
def perform_lda_analysis(self, texts, tf_vectorizer, no_top_words=8, n_components=10, max_iter=500, random_state=0, learning_method='online', file_name='lda_topics'):
|
| 833 |
+
"""
|
| 834 |
+
Perform LDA topic modeling and save top words per topic.
|
| 835 |
+
|
| 836 |
+
Parameters:
|
| 837 |
+
texts (list of str): Input texts for LDA.
|
| 838 |
+
tf_vectorizer (TfidfVectorizer or CountVectorizer): Vectorizer for text processing.
|
| 839 |
+
no_top_words (int): Number of top words to display per topic.
|
| 840 |
+
n_components (int): Number of topics.
|
| 841 |
+
max_iter (int): Maximum number of iterations.
|
| 842 |
+
random_state (int): Random state for reproducibility.
|
| 843 |
+
learning_method (str): Learning method for LDA ('batch' or 'online').
|
| 844 |
+
file_name (str): Name of the file to save topics.
|
| 845 |
+
|
| 846 |
+
Returns:
|
| 847 |
+
lda_model (LDA): Fitted LDA model.
|
| 848 |
+
"""
|
| 849 |
+
logging.info("Vectorizing text data...")
|
| 850 |
+
tf = tf_vectorizer.fit_transform(texts)
|
| 851 |
+
|
| 852 |
+
logging.info("Fitting LDA model...")
|
| 853 |
+
lda_model = LDA(
|
| 854 |
+
n_components=n_components,
|
| 855 |
+
learning_method=learning_method,
|
| 856 |
+
max_iter=max_iter,
|
| 857 |
+
random_state=random_state
|
| 858 |
+
).fit(tf)
|
| 859 |
+
|
| 860 |
+
words = tf_vectorizer.get_feature_names_out()
|
| 861 |
+
|
| 862 |
+
with open(FIGURES_DIR / f'{file_name}.txt', 'w') as f:
|
| 863 |
+
for topic_idx, topic in enumerate(lda_model.components_):
|
| 864 |
+
f.write(f"\nTopic #{topic_idx}:\n")
|
| 865 |
+
f.write(" ".join([words[i] for i in topic.argsort()[:-no_top_words - 1:-1]]) + "\n")
|
| 866 |
+
|
| 867 |
+
self.plotter.plot_lda_results(lda_model, tf, tf_vectorizer, file_name)
|
| 868 |
+
return lda_model
|
| 869 |
+
|
| 870 |
+
def perform_bertopic_analysis(self, cleaned_text=None, cleaned_summary=None, output_path='bertopic', save_topic_info=True):
|
| 871 |
+
"""
|
| 872 |
+
Perform BERTopic modeling and generate plots.
|
| 873 |
+
|
| 874 |
+
Parameters:
|
| 875 |
+
cleaned_text (list of str): List of cleaned text strings.
|
| 876 |
+
cleaned_summary (list of str): List of cleaned summary strings.
|
| 877 |
+
output_path (str): Directory path to save results.
|
| 878 |
+
save_topic_info (bool): Save topic information as a CSV file.
|
| 879 |
+
|
| 880 |
+
Returns:
|
| 881 |
+
model (BERTopic): Trained BERTopic model.
|
| 882 |
+
topic_info (pd.DataFrame): DataFrame containing topic information.
|
| 883 |
+
"""
|
| 884 |
+
if cleaned_text is None and cleaned_summary is None:
|
| 885 |
+
logging.error("No cleaned text or summary data provided.")
|
| 886 |
+
raise ValueError("Please provide cleaned text and/or summary data.")
|
| 887 |
+
|
| 888 |
+
if cleaned_text and cleaned_summary:
|
| 889 |
+
logging.info('merging text and summary data...')
|
| 890 |
+
elif cleaned_text:
|
| 891 |
+
logging.info('using only text data...')
|
| 892 |
+
elif cleaned_summary:
|
| 893 |
+
logging.info('using only summary data...')
|
| 894 |
+
|
| 895 |
+
combined_texts = cleaned_text or [] + cleaned_summary or []
|
| 896 |
+
|
| 897 |
+
logging.info("Initializing and fitting BERTopic model...")
|
| 898 |
+
model = BERTopic()
|
| 899 |
+
model.fit_transform(combined_texts)
|
| 900 |
+
|
| 901 |
+
topic_info = None
|
| 902 |
+
topic_info_path = FIGURES_DIR / output_path
|
| 903 |
+
topic_info_path.mkdir(parents=True, exist_ok=True)
|
| 904 |
+
|
| 905 |
+
if save_topic_info:
|
| 906 |
+
logging.info("Saving topic information to CSV file...")
|
| 907 |
+
topic_info = model.get_topic_info()
|
| 908 |
+
topic_info.to_csv(topic_info_path / "topic_info.csv", index=False)
|
| 909 |
+
|
| 910 |
+
logging.info("Generating BERTopic visualizations...")
|
| 911 |
+
self.plotter.plot_bertopic_visualizations(model, topic_info_path)
|
| 912 |
+
|
| 913 |
+
return model, topic_info
|
| 914 |
+
|
| 915 |
+
def calculate_overlap_matrix(self, judgment_model, summary_model, top_n=12):
|
| 916 |
+
"""
|
| 917 |
+
Calculate the overlap matrix between judgment and summary topics.
|
| 918 |
+
|
| 919 |
+
Args:
|
| 920 |
+
judgment_model: The model containing judgment topics.
|
| 921 |
+
summary_model: The model containing summary topics.
|
| 922 |
+
top_n (int): The number of top topics to consider.
|
| 923 |
+
|
| 924 |
+
Returns:
|
| 925 |
+
np.ndarray: Overlap matrix between judgment and summary topics.
|
| 926 |
+
"""
|
| 927 |
+
logging.info("Getting topic information from judgment and summary models.")
|
| 928 |
+
|
| 929 |
+
# Get topic information
|
| 930 |
+
judgment_topics = judgment_model.get_topic_info()['Topic'][:top_n].values
|
| 931 |
+
summary_topics = summary_model.get_topic_info()['Topic'][:top_n].values
|
| 932 |
+
|
| 933 |
+
logging.info("Initializing overlap matrix.")
|
| 934 |
+
# Initialize overlap matrix
|
| 935 |
+
overlap_matrix = np.zeros((top_n, top_n))
|
| 936 |
+
|
| 937 |
+
for i, j_topic_id in enumerate(judgment_topics):
|
| 938 |
+
if j_topic_id == -1: # Skip outliers
|
| 939 |
+
logging.info(f"Skipping outlier topic in judgment model at index {i}.")
|
| 940 |
+
continue
|
| 941 |
+
logging.info(f"Processing judgment topic {j_topic_id} at index {i}.")
|
| 942 |
+
j_terms = {term for term, _ in judgment_model.get_topic(j_topic_id)}
|
| 943 |
+
for j, s_topic_id in enumerate(summary_topics):
|
| 944 |
+
if s_topic_id == -1: # Skip outliers
|
| 945 |
+
logging.info(f"Skipping outlier topic in summary model at index {j}.")
|
| 946 |
+
continue
|
| 947 |
+
logging.info(f"Processing summary topic {s_topic_id} at index {j}.")
|
| 948 |
+
s_terms = {term for term, _ in summary_model.get_topic(s_topic_id)}
|
| 949 |
+
# Calculate Jaccard similarity
|
| 950 |
+
overlap_matrix[i, j] = len(j_terms & s_terms) / len(j_terms | s_terms)
|
| 951 |
+
logging.info(f"Calculated Jaccard similarity for judgment topic {j_topic_id} and summary topic {s_topic_id}: {overlap_matrix[i, j]}")
|
| 952 |
+
|
| 953 |
+
logging.info("Overlap matrix calculation complete.")
|
| 954 |
+
return overlap_matrix
|