Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
json
Languages:
Vietnamese
Size:
1K - 10K
DOI:
License:
Vu Anh
commited on
Commit
·
21fc01c
1
Parent(s):
6b29edc
Refactor all scripts - simple and elegant code
Browse filesPREPROCESS.PY (145→98 lines):
- Extracted helper functions for clarity
- Removed verbose output and debugging
- Cleaner data structure handling
- Focused, readable main() function
STATS.PY (227→81 lines):
- Condensed from detailed analysis to essential stats
- Single function for subset statistics
- Clean, emoji-enhanced output format
- Removed redundant statistical calculations
CLEANUP:
- Removed old statistics_report.md file
- Fixed all ruff linting issues
- Added shebangs for direct execution
- Consistent code style across all scripts
All scripts now follow the same simple, elegant pattern.
- preprocess.py +79 -126
- statistics_report.md +0 -262
- stats.py +49 -205
preprocess.py
CHANGED
|
@@ -1,145 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
import re
|
| 3 |
from pathlib import Path
|
| 4 |
|
| 5 |
|
| 6 |
-
def
|
| 7 |
-
"""
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
3. Aspect-Sentiment: text -> aspect, sentiment pairs
|
| 12 |
-
"""
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
with open(input_file, encoding="utf-8") as f:
|
| 19 |
-
for line_num, line in enumerate(f, 1):
|
| 20 |
-
line = line.strip()
|
| 21 |
-
if not line:
|
| 22 |
-
continue
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
continue
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
#
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
{"aspect": aspect, "sentiment": sentiment}
|
| 63 |
-
)
|
| 64 |
-
|
| 65 |
-
aspect_sentiment_data.append(
|
| 66 |
-
{"text": text, "aspects": aspect_sentiment_pairs}
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
# Save the three subsets
|
| 70 |
output_dir = Path(output_dir)
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
-
# Determine if this is train or test based on input filename
|
| 74 |
-
split = "train" if "train" in str(input_file) else "test"
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
with open(classification_file, "w", encoding="utf-8") as f:
|
| 80 |
-
for item in classification_data:
|
| 81 |
-
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|
| 82 |
-
print(
|
| 83 |
-
f"Saved {len(classification_data)} classification examples to {classification_file}"
|
| 84 |
-
)
|
| 85 |
-
|
| 86 |
-
# Save sentiment subset
|
| 87 |
-
sentiment_file = output_dir / "sentiment" / f"{split}.jsonl"
|
| 88 |
-
sentiment_file.parent.mkdir(parents=True, exist_ok=True)
|
| 89 |
-
with open(sentiment_file, "w", encoding="utf-8") as f:
|
| 90 |
-
for item in sentiment_data:
|
| 91 |
-
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|
| 92 |
-
print(f"Saved {len(sentiment_data)} sentiment examples to {sentiment_file}")
|
| 93 |
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|
| 100 |
-
print(
|
| 101 |
-
f"Saved {len(aspect_sentiment_data)} aspect-sentiment examples to {aspect_sentiment_file}"
|
| 102 |
-
)
|
| 103 |
-
|
| 104 |
-
# Print statistics
|
| 105 |
-
print("\n=== Statistics ===")
|
| 106 |
-
print(f"Total examples processed: {len(classification_data)}")
|
| 107 |
-
|
| 108 |
-
# Label distribution
|
| 109 |
-
label_counts = {}
|
| 110 |
-
for item in classification_data:
|
| 111 |
-
label = item["label"]
|
| 112 |
-
label_counts[label] = label_counts.get(label, 0) + 1
|
| 113 |
-
|
| 114 |
-
print("\nLabel distribution:")
|
| 115 |
-
for label, count in sorted(label_counts.items(), key=lambda x: x[1], reverse=True):
|
| 116 |
-
print(f" {label}: {count}")
|
| 117 |
-
|
| 118 |
-
# Sentiment distribution
|
| 119 |
-
sentiment_counts = {}
|
| 120 |
-
for item in sentiment_data:
|
| 121 |
-
sentiment = item["sentiment"]
|
| 122 |
-
sentiment_counts[sentiment] = sentiment_counts.get(sentiment, 0) + 1
|
| 123 |
-
|
| 124 |
-
print("\nSentiment distribution:")
|
| 125 |
-
for sentiment, count in sorted(
|
| 126 |
-
sentiment_counts.items(), key=lambda x: x[1], reverse=True
|
| 127 |
-
):
|
| 128 |
-
print(f" {sentiment}: {count}")
|
| 129 |
-
|
| 130 |
-
# Multi-aspect examples
|
| 131 |
-
multi_aspect_count = sum(
|
| 132 |
-
1 for item in aspect_sentiment_data if len(item["aspects"]) > 1
|
| 133 |
-
)
|
| 134 |
-
print(f"\nExamples with multiple aspects: {multi_aspect_count}")
|
| 135 |
|
| 136 |
|
| 137 |
if __name__ == "__main__":
|
| 138 |
-
|
| 139 |
-
print("Processing train data...")
|
| 140 |
-
process_banking_data("raw_data/train.txt", "data")
|
| 141 |
-
|
| 142 |
-
# Process test data
|
| 143 |
-
print("\n" + "=" * 50)
|
| 144 |
-
print("Processing test data...")
|
| 145 |
-
process_banking_data("raw_data/test.txt", "data")
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Process raw banking data into three task-specific subsets."""
|
| 3 |
+
|
| 4 |
import json
|
| 5 |
import re
|
| 6 |
from pathlib import Path
|
| 7 |
|
| 8 |
|
| 9 |
+
def extract_labels_and_text(line):
|
| 10 |
+
"""Extract labels, sentiments, and clean text from a labeled line."""
|
| 11 |
+
pattern = r"__label__([A-Z_]+)#(positive|negative|neutral)"
|
| 12 |
+
matches = re.findall(pattern, line)
|
| 13 |
+
text = re.sub(r"__label__[A-Z_]+#(positive|negative|neutral)\s*", "", line).strip()
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
if not matches or not text:
|
| 16 |
+
return None
|
| 17 |
+
|
| 18 |
+
aspects = [m[0] for m in matches]
|
| 19 |
+
sentiments = [m[1] for m in matches]
|
| 20 |
+
return aspects, sentiments, text
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_overall_sentiment(sentiments):
|
| 24 |
+
"""Get overall sentiment from multiple sentiments."""
|
| 25 |
+
if len(set(sentiments)) == 1:
|
| 26 |
+
return sentiments[0]
|
| 27 |
+
|
| 28 |
+
# Use most common sentiment
|
| 29 |
+
counts = {}
|
| 30 |
+
for s in sentiments:
|
| 31 |
+
counts[s] = counts.get(s, 0) + 1
|
| 32 |
+
return max(counts, key=counts.get)
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
def save_subset(data, output_path):
|
| 36 |
+
"""Save data to JSONL file."""
|
| 37 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 38 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 39 |
+
for item in data:
|
| 40 |
+
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|
| 41 |
+
|
| 42 |
|
| 43 |
+
def process_file(input_file, output_dir):
|
| 44 |
+
"""Process a single input file into three subsets."""
|
| 45 |
+
data = {"classification": [], "sentiment": [], "aspect_sentiment": []}
|
| 46 |
+
split_name = "train" if "train" in str(input_file) else "test"
|
| 47 |
|
| 48 |
+
with open(input_file, encoding="utf-8") as f:
|
| 49 |
+
for line in f:
|
| 50 |
+
result = extract_labels_and_text(line.strip())
|
| 51 |
+
if not result:
|
| 52 |
continue
|
| 53 |
|
| 54 |
+
aspects, sentiments, text = result
|
| 55 |
+
|
| 56 |
+
# Classification subset
|
| 57 |
+
data["classification"].append({
|
| 58 |
+
"text": text,
|
| 59 |
+
"label": aspects[0]
|
| 60 |
+
})
|
| 61 |
+
|
| 62 |
+
# Sentiment subset
|
| 63 |
+
data["sentiment"].append({
|
| 64 |
+
"text": text,
|
| 65 |
+
"sentiment": get_overall_sentiment(sentiments)
|
| 66 |
+
})
|
| 67 |
+
|
| 68 |
+
# Aspect-sentiment subset
|
| 69 |
+
aspect_pairs = [
|
| 70 |
+
{"aspect": aspect, "sentiment": sentiment}
|
| 71 |
+
for aspect, sentiment in zip(aspects, sentiments, strict=False)
|
| 72 |
+
]
|
| 73 |
+
data["aspect_sentiment"].append({
|
| 74 |
+
"text": text,
|
| 75 |
+
"aspects": aspect_pairs
|
| 76 |
+
})
|
| 77 |
+
|
| 78 |
+
# Save all subsets
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
output_dir = Path(output_dir)
|
| 80 |
+
for subset_name, subset_data in data.items():
|
| 81 |
+
output_path = output_dir / subset_name / f"{split_name}.jsonl"
|
| 82 |
+
save_subset(subset_data, output_path)
|
| 83 |
+
print(f"✅ {subset_name}/{split_name}.jsonl: {len(subset_data)} examples")
|
| 84 |
|
|
|
|
|
|
|
| 85 |
|
| 86 |
+
def main():
|
| 87 |
+
"""Process raw data into task-specific subsets."""
|
| 88 |
+
print("🔄 Processing banking data...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
+
process_file("raw_data/train.txt", "data")
|
| 91 |
+
process_file("raw_data/test.txt", "data")
|
| 92 |
+
|
| 93 |
+
print("\n🎉 Processing complete!")
|
| 94 |
+
print("💡 Run 'python validate.py' to test the dataset")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
|
| 97 |
if __name__ == "__main__":
|
| 98 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
statistics_report.md
DELETED
|
@@ -1,262 +0,0 @@
|
|
| 1 |
-
# UTS2017_Bank Dataset Statistics Report
|
| 2 |
-
|
| 3 |
-
```
|
| 4 |
-
|
| 5 |
-
============================================================
|
| 6 |
-
DATASET SUMMARY
|
| 7 |
-
============================================================
|
| 8 |
-
|
| 9 |
-
Total Dataset Size:
|
| 10 |
-
Train: 1977 examples
|
| 11 |
-
Test: 494 examples
|
| 12 |
-
Total: 2471 examples
|
| 13 |
-
Train/Test Ratio: 4.00:1
|
| 14 |
-
|
| 15 |
-
Available Subsets:
|
| 16 |
-
1. Classification: Text → Label (14 banking aspect categories)
|
| 17 |
-
2. Sentiment: Text → Sentiment (positive/negative/neutral)
|
| 18 |
-
3. Aspect-Sentiment: Text → Multiple (Aspect, Sentiment) pairs
|
| 19 |
-
|
| 20 |
-
Data Format:
|
| 21 |
-
- All subsets use JSONL format
|
| 22 |
-
- UTF-8 encoding
|
| 23 |
-
- Vietnamese language text
|
| 24 |
-
|
| 25 |
-
Recommended Use Cases:
|
| 26 |
-
- Classification: Banking domain text classification
|
| 27 |
-
- Sentiment: Customer feedback sentiment analysis
|
| 28 |
-
- Aspect-Sentiment: Fine-grained aspect-based sentiment analysis
|
| 29 |
-
|
| 30 |
-
============================================================
|
| 31 |
-
CLASSIFICATION SUBSET ANALYSIS
|
| 32 |
-
============================================================
|
| 33 |
-
|
| 34 |
-
TRAIN Split:
|
| 35 |
-
Total examples: 1977
|
| 36 |
-
|
| 37 |
-
Label Distribution:
|
| 38 |
-
CUSTOMER_SUPPORT : 774 ( 39.2%)
|
| 39 |
-
TRADEMARK : 699 ( 35.4%)
|
| 40 |
-
LOAN : 74 ( 3.7%)
|
| 41 |
-
INTERNET_BANKING : 70 ( 3.5%)
|
| 42 |
-
OTHER : 69 ( 3.5%)
|
| 43 |
-
CARD : 66 ( 3.3%)
|
| 44 |
-
INTEREST_RATE : 60 ( 3.0%)
|
| 45 |
-
PROMOTION : 53 ( 2.7%)
|
| 46 |
-
DISCOUNT : 41 ( 2.1%)
|
| 47 |
-
MONEY_TRANSFER : 34 ( 1.7%)
|
| 48 |
-
PAYMENT : 15 ( 0.8%)
|
| 49 |
-
SAVING : 13 ( 0.7%)
|
| 50 |
-
ACCOUNT : 5 ( 0.3%)
|
| 51 |
-
SECURITY : 4 ( 0.2%)
|
| 52 |
-
|
| 53 |
-
Text Statistics:
|
| 54 |
-
Words per text - Avg: 23.8, Min: 1, Max: 816, Median: 14.0
|
| 55 |
-
Chars per text - Avg: 106.3, Min: 3, Max: 3787, Median: 62.0
|
| 56 |
-
|
| 57 |
-
TEST Split:
|
| 58 |
-
Total examples: 494
|
| 59 |
-
|
| 60 |
-
Label Distribution:
|
| 61 |
-
CUSTOMER_SUPPORT : 338 ( 68.4%)
|
| 62 |
-
CARD : 44 ( 8.9%)
|
| 63 |
-
TRADEMARK : 41 ( 8.3%)
|
| 64 |
-
INTERNET_BANKING : 32 ( 6.5%)
|
| 65 |
-
OTHER : 12 ( 2.4%)
|
| 66 |
-
PROMOTION : 9 ( 1.8%)
|
| 67 |
-
INTEREST_RATE : 6 ( 1.2%)
|
| 68 |
-
LOAN : 3 ( 0.6%)
|
| 69 |
-
SAVING : 3 ( 0.6%)
|
| 70 |
-
MONEY_TRANSFER : 2 ( 0.4%)
|
| 71 |
-
DISCOUNT : 2 ( 0.4%)
|
| 72 |
-
PAYMENT : 2 ( 0.4%)
|
| 73 |
-
|
| 74 |
-
Text Statistics:
|
| 75 |
-
Words per text - Avg: 30.6, Min: 1, Max: 411, Median: 18.0
|
| 76 |
-
Chars per text - Avg: 134.8, Min: 4, Max: 1800, Median: 80.0
|
| 77 |
-
|
| 78 |
-
============================================================
|
| 79 |
-
SENTIMENT SUBSET ANALYSIS
|
| 80 |
-
============================================================
|
| 81 |
-
|
| 82 |
-
TRAIN Split:
|
| 83 |
-
Total examples: 1977
|
| 84 |
-
|
| 85 |
-
Sentiment Distribution:
|
| 86 |
-
positive : 1211 ( 61.3%)
|
| 87 |
-
negative : 743 ( 37.6%)
|
| 88 |
-
neutral : 23 ( 1.2%)
|
| 89 |
-
|
| 90 |
-
Text Statistics:
|
| 91 |
-
Words per text - Avg: 23.8, Min: 1, Max: 816, Median: 14.0
|
| 92 |
-
|
| 93 |
-
TEST Split:
|
| 94 |
-
Total examples: 494
|
| 95 |
-
|
| 96 |
-
Sentiment Distribution:
|
| 97 |
-
negative : 301 ( 60.9%)
|
| 98 |
-
positive : 185 ( 37.4%)
|
| 99 |
-
neutral : 8 ( 1.6%)
|
| 100 |
-
|
| 101 |
-
Text Statistics:
|
| 102 |
-
Words per text - Avg: 30.6, Min: 1, Max: 411, Median: 18.0
|
| 103 |
-
|
| 104 |
-
============================================================
|
| 105 |
-
ASPECT-SENTIMENT SUBSET ANALYSIS
|
| 106 |
-
============================================================
|
| 107 |
-
|
| 108 |
-
TRAIN Split:
|
| 109 |
-
Total examples: 1977
|
| 110 |
-
|
| 111 |
-
Aspect Coverage:
|
| 112 |
-
Single aspect: 1948 (98.5%)
|
| 113 |
-
Multi-aspect: 29 (1.5%)
|
| 114 |
-
Max aspects per example: 4
|
| 115 |
-
|
| 116 |
-
Top 10 Aspect-Sentiment Pairs:
|
| 117 |
-
TRADEMARK + positive: 652 ( 32.5%)
|
| 118 |
-
CUSTOMER_SUPPORT + positive: 409 ( 20.4%)
|
| 119 |
-
CUSTOMER_SUPPORT + negative: 370 ( 18.4%)
|
| 120 |
-
INTEREST_RATE + negative: 63 ( 3.1%)
|
| 121 |
-
LOAN + negative: 61 ( 3.0%)
|
| 122 |
-
INTERNET_BANKING + negative: 57 ( 2.8%)
|
| 123 |
-
CARD + negative: 54 ( 2.7%)
|
| 124 |
-
TRADEMARK + negative: 47 ( 2.3%)
|
| 125 |
-
OTHER + negative: 35 ( 1.7%)
|
| 126 |
-
PROMOTION + positive: 33 ( 1.6%)
|
| 127 |
-
|
| 128 |
-
Aspect Distribution with Sentiment Breakdown:
|
| 129 |
-
|
| 130 |
-
CUSTOMER_SUPPORT : 784 ( 39.0%)
|
| 131 |
-
- positive: 409 ( 52.2%)
|
| 132 |
-
- negative: 370 ( 47.2%)
|
| 133 |
-
- neutral : 5 ( 0.6%)
|
| 134 |
-
|
| 135 |
-
TRADEMARK : 699 ( 34.8%)
|
| 136 |
-
- positive: 652 ( 93.3%)
|
| 137 |
-
- negative: 47 ( 6.7%)
|
| 138 |
-
|
| 139 |
-
INTERNET_BANKING : 79 ( 3.9%)
|
| 140 |
-
- positive: 20 ( 25.3%)
|
| 141 |
-
- negative: 57 ( 72.2%)
|
| 142 |
-
- neutral : 2 ( 2.5%)
|
| 143 |
-
|
| 144 |
-
LOAN : 74 ( 3.7%)
|
| 145 |
-
- positive: 13 ( 17.6%)
|
| 146 |
-
- negative: 61 ( 82.4%)
|
| 147 |
-
|
| 148 |
-
OTHER : 69 ( 3.4%)
|
| 149 |
-
- positive: 30 ( 43.5%)
|
| 150 |
-
- negative: 35 ( 50.7%)
|
| 151 |
-
- neutral : 4 ( 5.8%)
|
| 152 |
-
|
| 153 |
-
INTEREST_RATE : 68 ( 3.4%)
|
| 154 |
-
- positive: 4 ( 5.9%)
|
| 155 |
-
- negative: 63 ( 92.6%)
|
| 156 |
-
- neutral : 1 ( 1.5%)
|
| 157 |
-
|
| 158 |
-
CARD : 67 ( 3.3%)
|
| 159 |
-
- positive: 12 ( 17.9%)
|
| 160 |
-
- negative: 54 ( 80.6%)
|
| 161 |
-
- neutral : 1 ( 1.5%)
|
| 162 |
-
|
| 163 |
-
PROMOTION : 53 ( 2.6%)
|
| 164 |
-
- positive: 33 ( 62.3%)
|
| 165 |
-
- negative: 17 ( 32.1%)
|
| 166 |
-
- neutral : 3 ( 5.7%)
|
| 167 |
-
|
| 168 |
-
DISCOUNT : 42 ( 2.1%)
|
| 169 |
-
- positive: 20 ( 47.6%)
|
| 170 |
-
- negative: 18 ( 42.9%)
|
| 171 |
-
- neutral : 4 ( 9.5%)
|
| 172 |
-
|
| 173 |
-
MONEY_TRANSFER : 36 ( 1.8%)
|
| 174 |
-
- positive: 5 ( 13.9%)
|
| 175 |
-
- negative: 31 ( 86.1%)
|
| 176 |
-
|
| 177 |
-
PAYMENT : 15 ( 0.7%)
|
| 178 |
-
- positive: 11 ( 73.3%)
|
| 179 |
-
- negative: 4 ( 26.7%)
|
| 180 |
-
|
| 181 |
-
SAVING : 13 ( 0.6%)
|
| 182 |
-
- positive: 6 ( 46.2%)
|
| 183 |
-
- negative: 6 ( 46.2%)
|
| 184 |
-
- neutral : 1 ( 7.7%)
|
| 185 |
-
|
| 186 |
-
ACCOUNT : 5 ( 0.2%)
|
| 187 |
-
- negative: 5 (100.0%)
|
| 188 |
-
|
| 189 |
-
SECURITY : 5 ( 0.2%)
|
| 190 |
-
- positive: 1 ( 20.0%)
|
| 191 |
-
- negative: 2 ( 40.0%)
|
| 192 |
-
- neutral : 2 ( 40.0%)
|
| 193 |
-
|
| 194 |
-
TEST Split:
|
| 195 |
-
Total examples: 494
|
| 196 |
-
|
| 197 |
-
Aspect Coverage:
|
| 198 |
-
Single aspect: 492 (99.6%)
|
| 199 |
-
Multi-aspect: 2 (0.4%)
|
| 200 |
-
Max aspects per example: 3
|
| 201 |
-
|
| 202 |
-
Top 10 Aspect-Sentiment Pairs:
|
| 203 |
-
CUSTOMER_SUPPORT + negative: 215 ( 43.3%)
|
| 204 |
-
CUSTOMER_SUPPORT + positive: 122 ( 24.5%)
|
| 205 |
-
CARD + negative: 37 ( 7.4%)
|
| 206 |
-
TRADEMARK + positive: 35 ( 7.0%)
|
| 207 |
-
INTERNET_BANKING + negative: 27 ( 5.4%)
|
| 208 |
-
PROMOTION + positive: 7 ( 1.4%)
|
| 209 |
-
TRADEMARK + negative: 6 ( 1.2%)
|
| 210 |
-
OTHER + positive: 6 ( 1.2%)
|
| 211 |
-
INTERNET_BANKING + positive: 6 ( 1.2%)
|
| 212 |
-
INTEREST_RATE + negative: 6 ( 1.2%)
|
| 213 |
-
|
| 214 |
-
Aspect Distribution with Sentiment Breakdown:
|
| 215 |
-
|
| 216 |
-
CUSTOMER_SUPPORT : 339 ( 68.2%)
|
| 217 |
-
- positive: 122 ( 36.0%)
|
| 218 |
-
- negative: 215 ( 63.4%)
|
| 219 |
-
- neutral : 2 ( 0.6%)
|
| 220 |
-
|
| 221 |
-
CARD : 44 ( 8.9%)
|
| 222 |
-
- positive: 5 ( 11.4%)
|
| 223 |
-
- negative: 37 ( 84.1%)
|
| 224 |
-
- neutral : 2 ( 4.5%)
|
| 225 |
-
|
| 226 |
-
TRADEMARK : 41 ( 8.2%)
|
| 227 |
-
- positive: 35 ( 85.4%)
|
| 228 |
-
- negative: 6 ( 14.6%)
|
| 229 |
-
|
| 230 |
-
INTERNET_BANKING : 33 ( 6.6%)
|
| 231 |
-
- positive: 6 ( 18.2%)
|
| 232 |
-
- negative: 27 ( 81.8%)
|
| 233 |
-
|
| 234 |
-
OTHER : 12 ( 2.4%)
|
| 235 |
-
- positive: 6 ( 50.0%)
|
| 236 |
-
- negative: 3 ( 25.0%)
|
| 237 |
-
- neutral : 3 ( 25.0%)
|
| 238 |
-
|
| 239 |
-
PROMOTION : 9 ( 1.8%)
|
| 240 |
-
- positive: 7 ( 77.8%)
|
| 241 |
-
- negative: 1 ( 11.1%)
|
| 242 |
-
- neutral : 1 ( 11.1%)
|
| 243 |
-
|
| 244 |
-
INTEREST_RATE : 6 ( 1.2%)
|
| 245 |
-
- negative: 6 (100.0%)
|
| 246 |
-
|
| 247 |
-
LOAN : 3 ( 0.6%)
|
| 248 |
-
- negative: 3 (100.0%)
|
| 249 |
-
|
| 250 |
-
MONEY_TRANSFER : 3 ( 0.6%)
|
| 251 |
-
- negative: 3 (100.0%)
|
| 252 |
-
|
| 253 |
-
SAVING : 3 ( 0.6%)
|
| 254 |
-
- positive: 3 (100.0%)
|
| 255 |
-
|
| 256 |
-
DISCOUNT : 2 ( 0.4%)
|
| 257 |
-
- negative: 2 (100.0%)
|
| 258 |
-
|
| 259 |
-
PAYMENT : 2 ( 0.4%)
|
| 260 |
-
- positive: 1 ( 50.0%)
|
| 261 |
-
- negative: 1 ( 50.0%)
|
| 262 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stats.py
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
import statistics as stats
|
| 3 |
from collections import Counter
|
|
@@ -6,232 +9,73 @@ from pathlib import Path
|
|
| 6 |
|
| 7 |
def load_jsonl(file_path):
|
| 8 |
"""Load JSONL file and return list of items."""
|
| 9 |
-
items = []
|
| 10 |
with open(file_path, encoding="utf-8") as f:
|
| 11 |
-
for line in f
|
| 12 |
-
items.append(json.loads(line.strip()))
|
| 13 |
-
return items
|
| 14 |
-
|
| 15 |
|
| 16 |
-
def calculate_text_statistics(items):
|
| 17 |
-
"""Calculate statistics for text fields."""
|
| 18 |
-
text_lengths = [len(item["text"].split()) for item in items]
|
| 19 |
-
char_lengths = [len(item["text"]) for item in items]
|
| 20 |
|
|
|
|
|
|
|
|
|
|
| 21 |
return {
|
| 22 |
-
"
|
| 23 |
-
"
|
| 24 |
-
"
|
| 25 |
-
"
|
| 26 |
-
"avg_chars": stats.mean(char_lengths),
|
| 27 |
-
"min_chars": min(char_lengths),
|
| 28 |
-
"max_chars": max(char_lengths),
|
| 29 |
-
"median_chars": stats.median(char_lengths),
|
| 30 |
}
|
| 31 |
|
| 32 |
|
| 33 |
-
def
|
| 34 |
-
"""
|
| 35 |
-
print("\n
|
| 36 |
-
print("
|
| 37 |
-
print("=" * 60)
|
| 38 |
|
| 39 |
for split in ["train", "test"]:
|
| 40 |
-
file_path = Path(f"data/
|
| 41 |
items = load_jsonl(file_path)
|
| 42 |
|
| 43 |
-
print(f"\n{split.
|
| 44 |
-
print(f" Total examples: {len(items)}")
|
| 45 |
-
|
| 46 |
-
# Label distribution
|
| 47 |
-
label_counter = Counter(item["label"] for item in items)
|
| 48 |
-
print("\n Label Distribution:")
|
| 49 |
-
for label, count in label_counter.most_common():
|
| 50 |
-
percentage = (count / len(items)) * 100
|
| 51 |
-
print(f" {label:20s}: {count:4d} ({percentage:5.1f}%)")
|
| 52 |
|
| 53 |
# Text statistics
|
| 54 |
-
|
| 55 |
-
print("
|
| 56 |
-
print(
|
| 57 |
-
f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
|
| 58 |
-
f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
|
| 59 |
-
f"Median: {text_stats['median_words']:.1f}"
|
| 60 |
-
)
|
| 61 |
-
print(
|
| 62 |
-
f" Chars per text - Avg: {text_stats['avg_chars']:.1f}, "
|
| 63 |
-
f"Min: {text_stats['min_chars']}, Max: {text_stats['max_chars']}, "
|
| 64 |
-
f"Median: {text_stats['median_chars']:.1f}"
|
| 65 |
-
)
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
def analyze_sentiment_subset():
|
| 69 |
-
"""Analyze sentiment subset statistics."""
|
| 70 |
-
print("\n" + "=" * 60)
|
| 71 |
-
print("SENTIMENT SUBSET ANALYSIS")
|
| 72 |
-
print("=" * 60)
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
| 77 |
|
| 78 |
-
|
| 79 |
-
|
|
|
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
for sentiment, count in sentiment_counter.most_common():
|
| 85 |
-
percentage = (count / len(items)) * 100
|
| 86 |
-
print(f" {sentiment:10s}: {count:4d} ({percentage:5.1f}%)")
|
| 87 |
|
| 88 |
-
# Text statistics
|
| 89 |
-
text_stats = calculate_text_statistics(items)
|
| 90 |
-
print("\n Text Statistics:")
|
| 91 |
-
print(
|
| 92 |
-
f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
|
| 93 |
-
f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
|
| 94 |
-
f"Median: {text_stats['median_words']:.1f}"
|
| 95 |
-
)
|
| 96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
print("=" * 60)
|
| 103 |
|
| 104 |
-
|
| 105 |
-
file_path = Path(f"data/aspect_sentiment/{split}.jsonl")
|
| 106 |
-
items = load_jsonl(file_path)
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
single_aspect = sum(1 for item in items if len(item["aspects"]) == 1)
|
| 113 |
-
multi_aspect = sum(1 for item in items if len(item["aspects"]) > 1)
|
| 114 |
-
max_aspects = max(len(item["aspects"]) for item in items)
|
| 115 |
-
|
| 116 |
-
print("\n Aspect Coverage:")
|
| 117 |
-
print(
|
| 118 |
-
f" Single aspect: {single_aspect} ({(single_aspect / len(items)) * 100:.1f}%)"
|
| 119 |
-
)
|
| 120 |
-
print(
|
| 121 |
-
f" Multi-aspect: {multi_aspect} ({(multi_aspect / len(items)) * 100:.1f}%)"
|
| 122 |
-
)
|
| 123 |
-
print(f" Max aspects per example: {max_aspects}")
|
| 124 |
-
|
| 125 |
-
# Aspect-sentiment pair distribution
|
| 126 |
-
aspect_sentiment_pairs = []
|
| 127 |
-
for item in items:
|
| 128 |
-
for asp in item["aspects"]:
|
| 129 |
-
aspect_sentiment_pairs.append(f"{asp['aspect']}#{asp['sentiment']}")
|
| 130 |
-
|
| 131 |
-
pair_counter = Counter(aspect_sentiment_pairs)
|
| 132 |
-
print("\n Top 10 Aspect-Sentiment Pairs:")
|
| 133 |
-
for pair, count in pair_counter.most_common(10):
|
| 134 |
-
aspect, sentiment = pair.split("#")
|
| 135 |
-
percentage = (count / len(aspect_sentiment_pairs)) * 100
|
| 136 |
-
print(f" {aspect:20s} + {sentiment:8s}: {count:4d} ({percentage:5.1f}%)")
|
| 137 |
-
|
| 138 |
-
# Overall aspect distribution
|
| 139 |
-
aspect_counter = Counter()
|
| 140 |
-
sentiment_by_aspect = {}
|
| 141 |
-
|
| 142 |
-
for item in items:
|
| 143 |
-
for asp in item["aspects"]:
|
| 144 |
-
aspect = asp["aspect"]
|
| 145 |
-
sentiment = asp["sentiment"]
|
| 146 |
-
aspect_counter[aspect] += 1
|
| 147 |
-
|
| 148 |
-
if aspect not in sentiment_by_aspect:
|
| 149 |
-
sentiment_by_aspect[aspect] = Counter()
|
| 150 |
-
sentiment_by_aspect[aspect][sentiment] += 1
|
| 151 |
-
|
| 152 |
-
print("\n Aspect Distribution with Sentiment Breakdown:")
|
| 153 |
-
for aspect, count in aspect_counter.most_common():
|
| 154 |
-
percentage = (count / sum(aspect_counter.values())) * 100
|
| 155 |
-
print(f"\n {aspect:20s}: {count:4d} ({percentage:5.1f}%)")
|
| 156 |
-
|
| 157 |
-
# Sentiment breakdown for this aspect
|
| 158 |
-
sentiments = sentiment_by_aspect[aspect]
|
| 159 |
-
total_aspect = sum(sentiments.values())
|
| 160 |
-
for sentiment in ["positive", "negative", "neutral"]:
|
| 161 |
-
if sentiment in sentiments:
|
| 162 |
-
sent_count = sentiments[sentiment]
|
| 163 |
-
sent_pct = (sent_count / total_aspect) * 100
|
| 164 |
-
print(f" - {sentiment:8s}: {sent_count:3d} ({sent_pct:5.1f}%)")
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
def generate_summary_statistics():
|
| 168 |
-
"""Generate overall summary statistics."""
|
| 169 |
-
print("\n" + "=" * 60)
|
| 170 |
-
print("DATASET SUMMARY")
|
| 171 |
-
print("=" * 60)
|
| 172 |
-
|
| 173 |
-
total_train = len(load_jsonl("data/classification/train.jsonl"))
|
| 174 |
-
total_test = len(load_jsonl("data/classification/test.jsonl"))
|
| 175 |
-
|
| 176 |
-
print("\nTotal Dataset Size:")
|
| 177 |
-
print(f" Train: {total_train} examples")
|
| 178 |
-
print(f" Test: {total_test} examples")
|
| 179 |
-
print(f" Total: {total_train + total_test} examples")
|
| 180 |
-
print(f" Train/Test Ratio: {total_train / total_test:.2f}:1")
|
| 181 |
-
|
| 182 |
-
# Available subsets
|
| 183 |
-
print("\nAvailable Subsets:")
|
| 184 |
-
print(" 1. Classification: Text → Label (14 banking aspect categories)")
|
| 185 |
-
print(" 2. Sentiment: Text → Sentiment (positive/negative/neutral)")
|
| 186 |
-
print(" 3. Aspect-Sentiment: Text → Multiple (Aspect, Sentiment) pairs")
|
| 187 |
-
|
| 188 |
-
# Data format
|
| 189 |
-
print("\nData Format:")
|
| 190 |
-
print(" - All subsets use JSONL format")
|
| 191 |
-
print(" - UTF-8 encoding")
|
| 192 |
-
print(" - Vietnamese language text")
|
| 193 |
-
|
| 194 |
-
# Use cases
|
| 195 |
-
print("\nRecommended Use Cases:")
|
| 196 |
-
print(" - Classification: Banking domain text classification")
|
| 197 |
-
print(" - Sentiment: Customer feedback sentiment analysis")
|
| 198 |
-
print(" - Aspect-Sentiment: Fine-grained aspect-based sentiment analysis")
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
def save_statistics_report():
|
| 202 |
-
"""Save statistics to a markdown file."""
|
| 203 |
-
import sys
|
| 204 |
-
from io import StringIO
|
| 205 |
-
|
| 206 |
-
# Capture output
|
| 207 |
-
old_stdout = sys.stdout
|
| 208 |
-
sys.stdout = buffer = StringIO()
|
| 209 |
-
|
| 210 |
-
# Run all analyses
|
| 211 |
-
generate_summary_statistics()
|
| 212 |
-
analyze_classification_subset()
|
| 213 |
-
analyze_sentiment_subset()
|
| 214 |
-
analyze_aspect_sentiment_subset()
|
| 215 |
-
|
| 216 |
-
# Get output
|
| 217 |
-
output = buffer.getvalue()
|
| 218 |
-
sys.stdout = old_stdout
|
| 219 |
-
|
| 220 |
-
# Save to file
|
| 221 |
-
with open("statistics_report.md", "w", encoding="utf-8") as f:
|
| 222 |
-
f.write("# UTS2017_Bank Dataset Statistics Report\n\n")
|
| 223 |
-
f.write("```\n")
|
| 224 |
-
f.write(output)
|
| 225 |
-
f.write("```\n")
|
| 226 |
-
|
| 227 |
-
print("Statistics report saved to statistics_report.md")
|
| 228 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
|
| 230 |
-
if __name__ == "__main__":
|
| 231 |
-
generate_summary_statistics()
|
| 232 |
-
analyze_classification_subset()
|
| 233 |
-
analyze_sentiment_subset()
|
| 234 |
-
analyze_aspect_sentiment_subset()
|
| 235 |
|
| 236 |
-
|
| 237 |
-
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Generate statistics for the UTS2017_Bank dataset."""
|
| 3 |
+
|
| 4 |
import json
|
| 5 |
import statistics as stats
|
| 6 |
from collections import Counter
|
|
|
|
| 9 |
|
| 10 |
def load_jsonl(file_path):
|
| 11 |
"""Load JSONL file and return list of items."""
|
|
|
|
| 12 |
with open(file_path, encoding="utf-8") as f:
|
| 13 |
+
return [json.loads(line.strip()) for line in f]
|
|
|
|
|
|
|
|
|
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
def text_stats(items):
|
| 17 |
+
"""Calculate text length statistics."""
|
| 18 |
+
word_counts = [len(item["text"].split()) for item in items]
|
| 19 |
return {
|
| 20 |
+
"avg": stats.mean(word_counts),
|
| 21 |
+
"min": min(word_counts),
|
| 22 |
+
"max": max(word_counts),
|
| 23 |
+
"median": stats.median(word_counts),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
}
|
| 25 |
|
| 26 |
|
| 27 |
+
def print_subset_stats(subset_name, emoji):
|
| 28 |
+
"""Print statistics for a dataset subset."""
|
| 29 |
+
print(f"\n{emoji} {subset_name.upper()} SUBSET")
|
| 30 |
+
print("-" * 40)
|
|
|
|
| 31 |
|
| 32 |
for split in ["train", "test"]:
|
| 33 |
+
file_path = Path(f"data/{subset_name}/{split}.jsonl")
|
| 34 |
items = load_jsonl(file_path)
|
| 35 |
|
| 36 |
+
print(f"\n{split.capitalize()}: {len(items)} examples")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
# Text statistics
|
| 39 |
+
text_data = text_stats(items)
|
| 40 |
+
print(f" Words: avg={text_data['avg']:.1f}, range={text_data['min']}-{text_data['max']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
+
# Subset-specific stats
|
| 43 |
+
if subset_name == "classification":
|
| 44 |
+
labels = Counter(item["label"] for item in items)
|
| 45 |
+
print(f" Top labels: {', '.join(f'{k}({v})' for k, v in labels.most_common(3))}")
|
| 46 |
|
| 47 |
+
elif subset_name == "sentiment":
|
| 48 |
+
sentiments = Counter(item["sentiment"] for item in items)
|
| 49 |
+
print(f" Sentiments: {', '.join(f'{k}({v})' for k, v in sentiments.most_common())}")
|
| 50 |
|
| 51 |
+
elif subset_name == "aspect_sentiment":
|
| 52 |
+
multi_aspect = sum(1 for item in items if len(item["aspects"]) > 1)
|
| 53 |
+
print(f" Multi-aspect: {multi_aspect}/{len(items)} examples")
|
|
|
|
|
|
|
|
|
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
def main():
|
| 57 |
+
"""Generate and display dataset statistics."""
|
| 58 |
+
print("📊 UTS2017_Bank Dataset Statistics")
|
| 59 |
+
print("=" * 50)
|
| 60 |
|
| 61 |
+
# Overall stats
|
| 62 |
+
train_items = load_jsonl("data/classification/train.jsonl")
|
| 63 |
+
test_items = load_jsonl("data/classification/test.jsonl")
|
| 64 |
+
total = len(train_items) + len(test_items)
|
|
|
|
| 65 |
|
| 66 |
+
print(f"\n📈 OVERALL: {total} examples ({len(train_items)} train, {len(test_items)} test)")
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
# Subset statistics
|
| 69 |
+
print_subset_stats("classification", "🏷️")
|
| 70 |
+
print_subset_stats("sentiment", "😊")
|
| 71 |
+
print_subset_stats("aspect_sentiment", "🎯")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
+
# Available configurations
|
| 74 |
+
print("\n💡 USAGE:")
|
| 75 |
+
print(" load_dataset('undertheseanlp/UTS2017_Bank', 'classification')")
|
| 76 |
+
print(" load_dataset('undertheseanlp/UTS2017_Bank', 'sentiment')")
|
| 77 |
+
print(" load_dataset('undertheseanlp/UTS2017_Bank', 'aspect_sentiment')")
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
if __name__ == "__main__":
|
| 81 |
+
main()
|