UTS2017_Bank / dataset_statistics.py
Vu Anh
Add comprehensive Vietnamese banking dataset with 3 subsets
ff2289d
raw
history blame
8.28 kB
import json
from pathlib import Path
from collections import Counter
import statistics as stats
def load_jsonl(file_path):
"""Load JSONL file and return list of items."""
items = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
items.append(json.loads(line.strip()))
return items
def calculate_text_statistics(items):
"""Calculate statistics for text fields."""
text_lengths = [len(item['text'].split()) for item in items]
char_lengths = [len(item['text']) for item in items]
return {
'avg_words': stats.mean(text_lengths),
'min_words': min(text_lengths),
'max_words': max(text_lengths),
'median_words': stats.median(text_lengths),
'avg_chars': stats.mean(char_lengths),
'min_chars': min(char_lengths),
'max_chars': max(char_lengths),
'median_chars': stats.median(char_lengths),
}
def analyze_classification_subset():
"""Analyze classification subset statistics."""
print("\n" + "="*60)
print("CLASSIFICATION SUBSET ANALYSIS")
print("="*60)
for split in ['train', 'test']:
file_path = Path(f'data/classification/{split}.jsonl')
items = load_jsonl(file_path)
print(f"\n{split.upper()} Split:")
print(f" Total examples: {len(items)}")
# Label distribution
label_counter = Counter(item['label'] for item in items)
print("\n Label Distribution:")
for label, count in label_counter.most_common():
percentage = (count / len(items)) * 100
print(f" {label:20s}: {count:4d} ({percentage:5.1f}%)")
# Text statistics
text_stats = calculate_text_statistics(items)
print("\n Text Statistics:")
print(f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
f"Median: {text_stats['median_words']:.1f}")
print(f" Chars per text - Avg: {text_stats['avg_chars']:.1f}, "
f"Min: {text_stats['min_chars']}, Max: {text_stats['max_chars']}, "
f"Median: {text_stats['median_chars']:.1f}")
def analyze_sentiment_subset():
"""Analyze sentiment subset statistics."""
print("\n" + "="*60)
print("SENTIMENT SUBSET ANALYSIS")
print("="*60)
for split in ['train', 'test']:
file_path = Path(f'data/sentiment/{split}.jsonl')
items = load_jsonl(file_path)
print(f"\n{split.upper()} Split:")
print(f" Total examples: {len(items)}")
# Sentiment distribution
sentiment_counter = Counter(item['sentiment'] for item in items)
print("\n Sentiment Distribution:")
for sentiment, count in sentiment_counter.most_common():
percentage = (count / len(items)) * 100
print(f" {sentiment:10s}: {count:4d} ({percentage:5.1f}%)")
# Text statistics
text_stats = calculate_text_statistics(items)
print("\n Text Statistics:")
print(f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
f"Median: {text_stats['median_words']:.1f}")
def analyze_aspect_sentiment_subset():
"""Analyze aspect-sentiment subset statistics."""
print("\n" + "="*60)
print("ASPECT-SENTIMENT SUBSET ANALYSIS")
print("="*60)
for split in ['train', 'test']:
file_path = Path(f'data/aspect_sentiment/{split}.jsonl')
items = load_jsonl(file_path)
print(f"\n{split.upper()} Split:")
print(f" Total examples: {len(items)}")
# Multi-aspect analysis
single_aspect = sum(1 for item in items if len(item['aspects']) == 1)
multi_aspect = sum(1 for item in items if len(item['aspects']) > 1)
max_aspects = max(len(item['aspects']) for item in items)
print(f"\n Aspect Coverage:")
print(f" Single aspect: {single_aspect} ({(single_aspect/len(items))*100:.1f}%)")
print(f" Multi-aspect: {multi_aspect} ({(multi_aspect/len(items))*100:.1f}%)")
print(f" Max aspects per example: {max_aspects}")
# Aspect-sentiment pair distribution
aspect_sentiment_pairs = []
for item in items:
for asp in item['aspects']:
aspect_sentiment_pairs.append(f"{asp['aspect']}#{asp['sentiment']}")
pair_counter = Counter(aspect_sentiment_pairs)
print("\n Top 10 Aspect-Sentiment Pairs:")
for pair, count in pair_counter.most_common(10):
aspect, sentiment = pair.split('#')
percentage = (count / len(aspect_sentiment_pairs)) * 100
print(f" {aspect:20s} + {sentiment:8s}: {count:4d} ({percentage:5.1f}%)")
# Overall aspect distribution
aspect_counter = Counter()
sentiment_by_aspect = {}
for item in items:
for asp in item['aspects']:
aspect = asp['aspect']
sentiment = asp['sentiment']
aspect_counter[aspect] += 1
if aspect not in sentiment_by_aspect:
sentiment_by_aspect[aspect] = Counter()
sentiment_by_aspect[aspect][sentiment] += 1
print("\n Aspect Distribution with Sentiment Breakdown:")
for aspect, count in aspect_counter.most_common():
percentage = (count / sum(aspect_counter.values())) * 100
print(f"\n {aspect:20s}: {count:4d} ({percentage:5.1f}%)")
# Sentiment breakdown for this aspect
sentiments = sentiment_by_aspect[aspect]
total_aspect = sum(sentiments.values())
for sentiment in ['positive', 'negative', 'neutral']:
if sentiment in sentiments:
sent_count = sentiments[sentiment]
sent_pct = (sent_count / total_aspect) * 100
print(f" - {sentiment:8s}: {sent_count:3d} ({sent_pct:5.1f}%)")
def generate_summary_statistics():
"""Generate overall summary statistics."""
print("\n" + "="*60)
print("DATASET SUMMARY")
print("="*60)
total_train = len(load_jsonl('data/classification/train.jsonl'))
total_test = len(load_jsonl('data/classification/test.jsonl'))
print("\nTotal Dataset Size:")
print(f" Train: {total_train} examples")
print(f" Test: {total_test} examples")
print(f" Total: {total_train + total_test} examples")
print(f" Train/Test Ratio: {total_train/total_test:.2f}:1")
# Available subsets
print("\nAvailable Subsets:")
print(" 1. Classification: Text → Label (14 banking aspect categories)")
print(" 2. Sentiment: Text → Sentiment (positive/negative/neutral)")
print(" 3. Aspect-Sentiment: Text → Multiple (Aspect, Sentiment) pairs")
# Data format
print("\nData Format:")
print(" - All subsets use JSONL format")
print(" - UTF-8 encoding")
print(" - Vietnamese language text")
# Use cases
print("\nRecommended Use Cases:")
print(" - Classification: Banking domain text classification")
print(" - Sentiment: Customer feedback sentiment analysis")
print(" - Aspect-Sentiment: Fine-grained aspect-based sentiment analysis")
def save_statistics_report():
"""Save statistics to a markdown file."""
import sys
from io import StringIO
# Capture output
old_stdout = sys.stdout
sys.stdout = buffer = StringIO()
# Run all analyses
generate_summary_statistics()
analyze_classification_subset()
analyze_sentiment_subset()
analyze_aspect_sentiment_subset()
# Get output
output = buffer.getvalue()
sys.stdout = old_stdout
# Save to file
with open('statistics_report.md', 'w', encoding='utf-8') as f:
f.write("# UTS2017_Bank Dataset Statistics Report\n\n")
f.write("```\n")
f.write(output)
f.write("```\n")
print("Statistics report saved to statistics_report.md")
if __name__ == "__main__":
generate_summary_statistics()
analyze_classification_subset()
analyze_sentiment_subset()
analyze_aspect_sentiment_subset()
print("\n" + "="*60)
save_statistics_report()