import re import json from pathlib import Path def process_banking_data(input_file, output_dir): """ Process banking data into three subsets: 1. Classification: text -> label (main aspect only) 2. Sentiment: text -> sentiment (overall sentiment) 3. Aspect-Sentiment: text -> aspect, sentiment pairs """ classification_data = [] sentiment_data = [] aspect_sentiment_data = [] with open(input_file, 'r', encoding='utf-8') as f: for line_num, line in enumerate(f, 1): line = line.strip() if not line: continue # Extract labels and sentiments label_pattern = r'__label__([A-Z_]+)#(positive|negative|neutral)' matches = re.findall(label_pattern, line) # Remove labels from text text = re.sub(r'__label__[A-Z_]+#(positive|negative|neutral)\s*', '', line).strip() if not text or not matches: print(f"Skipping line {line_num}: No valid data found") continue # Extract aspects and sentiments aspects = [m[0] for m in matches] sentiments = [m[1] for m in matches] # 1. Classification subset (first aspect as main label) classification_data.append({ "text": text, "label": aspects[0] }) # 2. Sentiment-only subset (overall sentiment) # If multiple sentiments, use the first one or most frequent overall_sentiment = sentiments[0] if len(set(sentiments)) == 1: overall_sentiment = sentiments[0] else: # Use most common sentiment sentiment_counts = {} for s in sentiments: sentiment_counts[s] = sentiment_counts.get(s, 0) + 1 overall_sentiment = max(sentiment_counts, key=sentiment_counts.get) sentiment_data.append({ "text": text, "sentiment": overall_sentiment }) # 3. Aspect-Sentiment subset aspect_sentiment_pairs = [] for aspect, sentiment in zip(aspects, sentiments): aspect_sentiment_pairs.append({ "aspect": aspect, "sentiment": sentiment }) aspect_sentiment_data.append({ "text": text, "aspects": aspect_sentiment_pairs }) # Save the three subsets output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) # Determine if this is train or test based on input filename split = "train" if "train" in str(input_file) else "test" # Save classification subset classification_file = output_dir / "classification" / f"{split}.jsonl" classification_file.parent.mkdir(parents=True, exist_ok=True) with open(classification_file, 'w', encoding='utf-8') as f: for item in classification_data: f.write(json.dumps(item, ensure_ascii=False) + '\n') print(f"Saved {len(classification_data)} classification examples to {classification_file}") # Save sentiment subset sentiment_file = output_dir / "sentiment" / f"{split}.jsonl" sentiment_file.parent.mkdir(parents=True, exist_ok=True) with open(sentiment_file, 'w', encoding='utf-8') as f: for item in sentiment_data: f.write(json.dumps(item, ensure_ascii=False) + '\n') print(f"Saved {len(sentiment_data)} sentiment examples to {sentiment_file}") # Save aspect-sentiment subset aspect_sentiment_file = output_dir / "aspect_sentiment" / f"{split}.jsonl" aspect_sentiment_file.parent.mkdir(parents=True, exist_ok=True) with open(aspect_sentiment_file, 'w', encoding='utf-8') as f: for item in aspect_sentiment_data: f.write(json.dumps(item, ensure_ascii=False) + '\n') print(f"Saved {len(aspect_sentiment_data)} aspect-sentiment examples to {aspect_sentiment_file}") # Print statistics print("\n=== Statistics ===") print(f"Total examples processed: {len(classification_data)}") # Label distribution label_counts = {} for item in classification_data: label = item['label'] label_counts[label] = label_counts.get(label, 0) + 1 print("\nLabel distribution:") for label, count in sorted(label_counts.items(), key=lambda x: x[1], reverse=True): print(f" {label}: {count}") # Sentiment distribution sentiment_counts = {} for item in sentiment_data: sentiment = item['sentiment'] sentiment_counts[sentiment] = sentiment_counts.get(sentiment, 0) + 1 print("\nSentiment distribution:") for sentiment, count in sorted(sentiment_counts.items(), key=lambda x: x[1], reverse=True): print(f" {sentiment}: {count}") # Multi-aspect examples multi_aspect_count = sum(1 for item in aspect_sentiment_data if len(item['aspects']) > 1) print(f"\nExamples with multiple aspects: {multi_aspect_count}") if __name__ == "__main__": # Process train data print("Processing train data...") process_banking_data("raw_data/train.txt", "data") # Process test data print("\n" + "="*50) print("Processing test data...") process_banking_data("raw_data/test.txt", "data")