Vu Anh commited on
Commit
0702fa5
·
1 Parent(s): 5fb8014

Simplify validation script - only test HuggingFace Hub loading

Browse files

- Removed file structure validation
- Removed JSONL format validation
- Removed data content validation
- Removed data consistency checks
- Keep only HuggingFace Hub loading test
- Much cleaner and focused validation

Files changed (1) hide show
  1. validate.py +7 -263
validate.py CHANGED
@@ -1,172 +1,15 @@
1
  """
2
- Validation script to verify the UTS2017_Bank dataset is properly created and loadable.
3
  """
4
 
5
- import json
6
- from pathlib import Path
7
-
8
  from datasets import load_dataset
9
 
10
 
11
- def validate_file_structure():
12
- """Validate that all required files and directories exist."""
13
- print("=" * 60)
14
- print("VALIDATING FILE STRUCTURE")
15
- print("=" * 60)
16
-
17
- required_files = [
18
- "data/classification/train.jsonl",
19
- "data/classification/test.jsonl",
20
- "data/sentiment/train.jsonl",
21
- "data/sentiment/test.jsonl",
22
- "data/aspect_sentiment/train.jsonl",
23
- "data/aspect_sentiment/test.jsonl",
24
- "raw_data/train.txt",
25
- "raw_data/test.txt",
26
- "README.md",
27
- "preprocess.py",
28
- "stats.py"
29
- ]
30
-
31
- missing_files = []
32
- for file_path in required_files:
33
- if not Path(file_path).exists():
34
- missing_files.append(file_path)
35
- else:
36
- print(f"✓ {file_path}")
37
-
38
- if missing_files:
39
- print(f"\n❌ Missing files: {missing_files}")
40
- return False
41
- else:
42
- print("\n✅ All required files present")
43
- return True
44
-
45
-
46
- def validate_jsonl_format(file_path):
47
- """Validate JSONL file format and return basic stats."""
48
- try:
49
- with open(file_path, encoding="utf-8") as f:
50
- items = []
51
- for line_num, line in enumerate(f, 1):
52
- try:
53
- item = json.loads(line.strip())
54
- items.append(item)
55
- except json.JSONDecodeError as e:
56
- print(f"❌ JSON error in {file_path} line {line_num}: {e}")
57
- return None
58
-
59
- return items
60
- except Exception as e:
61
- print(f"❌ Error reading {file_path}: {e}")
62
- return None
63
-
64
-
65
- def validate_data_content():
66
- """Validate the content and format of each dataset subset."""
67
- print("\n" + "=" * 60)
68
- print("VALIDATING DATA CONTENT")
69
- print("=" * 60)
70
-
71
- # Classification subset validation
72
- print("\n📊 Classification Subset:")
73
- for split in ["train", "test"]:
74
- file_path = f"data/classification/{split}.jsonl"
75
- items = validate_jsonl_format(file_path)
76
-
77
- if items is None:
78
- continue
79
-
80
- print(f" {split}: {len(items)} examples")
81
-
82
- # Check required fields
83
- if items:
84
- required_fields = ["text", "label"]
85
- sample = items[0]
86
- missing_fields = [f for f in required_fields if f not in sample]
87
-
88
- if missing_fields:
89
- print(f" ❌ Missing fields: {missing_fields}")
90
- else:
91
- print(" ✓ Required fields present")
92
-
93
- # Check label variety
94
- labels = {item["label"] for item in items}
95
- print(f" ✓ {len(labels)} unique labels")
96
-
97
- # Sentiment subset validation
98
- print("\n😊 Sentiment Subset:")
99
- for split in ["train", "test"]:
100
- file_path = f"data/sentiment/{split}.jsonl"
101
- items = validate_jsonl_format(file_path)
102
-
103
- if items is None:
104
- continue
105
-
106
- print(f" {split}: {len(items)} examples")
107
-
108
- # Check required fields
109
- if items:
110
- required_fields = ["text", "sentiment"]
111
- sample = items[0]
112
- missing_fields = [f for f in required_fields if f not in sample]
113
-
114
- if missing_fields:
115
- print(f" ❌ Missing fields: {missing_fields}")
116
- else:
117
- print(" ✓ Required fields present")
118
-
119
- # Check sentiment values
120
- sentiments = {item["sentiment"] for item in items}
121
- expected_sentiments = {"positive", "negative", "neutral"}
122
- if sentiments.issubset(expected_sentiments):
123
- print(f" ✓ Valid sentiments: {sentiments}")
124
- else:
125
- print(f" ❌ Unexpected sentiments: {sentiments - expected_sentiments}")
126
-
127
- # Aspect-sentiment subset validation
128
- print("\n🎯 Aspect-Sentiment Subset:")
129
- for split in ["train", "test"]:
130
- file_path = f"data/aspect_sentiment/{split}.jsonl"
131
- items = validate_jsonl_format(file_path)
132
-
133
- if items is None:
134
- continue
135
-
136
- print(f" {split}: {len(items)} examples")
137
-
138
- # Check required fields
139
- if items:
140
- required_fields = ["text", "aspects"]
141
- sample = items[0]
142
- missing_fields = [f for f in required_fields if f not in sample]
143
-
144
- if missing_fields:
145
- print(f" ❌ Missing fields: {missing_fields}")
146
- else:
147
- print(" ✓ Required fields present")
148
-
149
- # Check aspects structure
150
- if "aspects" in sample and isinstance(sample["aspects"], list):
151
- if sample["aspects"]:
152
- aspect_sample = sample["aspects"][0]
153
- aspect_fields = ["aspect", "sentiment"]
154
- missing_aspect_fields = [f for f in aspect_fields if f not in aspect_sample]
155
-
156
- if missing_aspect_fields:
157
- print(f" ❌ Missing aspect fields: {missing_aspect_fields}")
158
- else:
159
- print(" ✓ Aspect structure valid")
160
-
161
- # Count multi-aspect examples
162
- multi_aspect = sum(1 for item in items if len(item["aspects"]) > 1)
163
- print(f" ✓ Multi-aspect examples: {multi_aspect}/{len(items)}")
164
-
165
-
166
  def validate_huggingface_loading():
167
  """Validate that the dataset can be loaded from HuggingFace Hub."""
168
 
169
- print("\n" + "=" * 60)
 
170
  print("VALIDATING HUGGINGFACE HUB LOADING")
171
  print("=" * 60)
172
 
@@ -187,111 +30,12 @@ def validate_huggingface_loading():
187
  except Exception as e:
188
  print(f" ❌ Failed to load {config} from Hub: {e}")
189
 
190
-
191
- def validate_data_consistency():
192
- """Validate data consistency across subsets."""
193
- print("\n" + "=" * 60)
194
- print("VALIDATING DATA CONSISTENCY")
195
- print("=" * 60)
196
-
197
- # Load all classification data to get baseline
198
- classification_train = validate_jsonl_format("data/classification/train.jsonl")
199
- classification_test = validate_jsonl_format("data/classification/test.jsonl")
200
-
201
- sentiment_train = validate_jsonl_format("data/sentiment/train.jsonl")
202
- sentiment_test = validate_jsonl_format("data/sentiment/test.jsonl")
203
-
204
- aspect_train = validate_jsonl_format("data/aspect_sentiment/train.jsonl")
205
- aspect_test = validate_jsonl_format("data/aspect_sentiment/test.jsonl")
206
-
207
- if all([classification_train, classification_test, sentiment_train,
208
- sentiment_test, aspect_train, aspect_test]):
209
-
210
- # Check example counts consistency
211
- train_counts = [len(classification_train), len(sentiment_train), len(aspect_train)]
212
- test_counts = [len(classification_test), len(sentiment_test), len(aspect_test)]
213
-
214
- if len(set(train_counts)) == 1:
215
- print(f"✓ Train counts consistent: {train_counts[0]} examples")
216
- else:
217
- print(f"❌ Train counts inconsistent: {train_counts}")
218
-
219
- if len(set(test_counts)) == 1:
220
- print(f"✓ Test counts consistent: {test_counts[0]} examples")
221
- else:
222
- print(f"❌ Test counts inconsistent: {test_counts}")
223
-
224
- # Check text consistency (first few examples)
225
- print("\n🔍 Checking text consistency across subsets:")
226
- for i in range(min(3, len(classification_train))):
227
- clf_text = classification_train[i]["text"]
228
- sent_text = sentiment_train[i]["text"]
229
- asp_text = aspect_train[i]["text"]
230
-
231
- if clf_text == sent_text == asp_text:
232
- print(f" ✓ Example {i+1}: Text consistent")
233
- else:
234
- print(f" ❌ Example {i+1}: Text inconsistent")
235
-
236
-
237
- def generate_validation_summary():
238
- """Generate a summary of dataset statistics."""
239
  print("\n" + "=" * 60)
240
- print("DATASET SUMMARY")
241
  print("=" * 60)
242
-
243
- # Count total examples
244
- total_train = 0
245
- total_test = 0
246
-
247
- classification_train = validate_jsonl_format("data/classification/train.jsonl")
248
- classification_test = validate_jsonl_format("data/classification/test.jsonl")
249
-
250
- if classification_train and classification_test:
251
- total_train = len(classification_train)
252
- total_test = len(classification_test)
253
-
254
- print(f"📊 Total Examples: {total_train + total_test}")
255
- print(f" - Train: {total_train}")
256
- print(f" - Test: {total_test}")
257
- print(f" - Ratio: {total_train/total_test:.2f}:1")
258
-
259
- # Count unique labels
260
- labels = {item["label"] for item in classification_train + classification_test}
261
- print(f"🏷️ Unique Labels: {len(labels)}")
262
-
263
- # Most common labels
264
- from collections import Counter
265
- label_counts = Counter(item["label"] for item in classification_train + classification_test)
266
- print("📈 Top 3 Labels:")
267
- for label, count in label_counts.most_common(3):
268
- print(f" - {label}: {count}")
269
-
270
- print("\n📁 Available Configurations:")
271
- print(" - classification: Banking aspect classification")
272
- print(" - sentiment: Sentiment analysis")
273
- print(" - aspect_sentiment: Aspect-based sentiment analysis")
274
 
275
 
276
  if __name__ == "__main__":
277
- print("🔍 UTS2017_Bank Dataset Validation")
278
- print("=" * 60)
279
-
280
- # Run all validations
281
- file_structure_ok = validate_file_structure()
282
-
283
- if file_structure_ok:
284
- validate_data_content()
285
- validate_data_consistency()
286
- validate_huggingface_loading()
287
- generate_validation_summary()
288
-
289
- print("\n" + "=" * 60)
290
- print("✅ VALIDATION COMPLETE")
291
- print("=" * 60)
292
- print("🎉 Dataset appears to be properly created and formatted!")
293
- print("💡 You can now use: load_dataset('undertheseanlp/UTS2017_Bank', config_name)")
294
-
295
- else:
296
- print("\n❌ VALIDATION FAILED")
297
- print("Please check the missing files and run preprocessing again.")
 
1
  """
2
+ Validation script to verify the UTS2017_Bank dataset can be loaded from HuggingFace Hub.
3
  """
4
 
 
 
 
5
  from datasets import load_dataset
6
 
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  def validate_huggingface_loading():
9
  """Validate that the dataset can be loaded from HuggingFace Hub."""
10
 
11
+ print("🔍 UTS2017_Bank Dataset Validation")
12
+ print("=" * 60)
13
  print("VALIDATING HUGGINGFACE HUB LOADING")
14
  print("=" * 60)
15
 
 
30
  except Exception as e:
31
  print(f" ❌ Failed to load {config} from Hub: {e}")
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  print("\n" + "=" * 60)
34
+ print(" VALIDATION COMPLETE")
35
  print("=" * 60)
36
+ print("🎉 Dataset can be loaded from HuggingFace Hub!")
37
+ print("💡 Usage: load_dataset('undertheseanlp/UTS2017_Bank', config_name)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
 
40
  if __name__ == "__main__":
41
+ validate_huggingface_loading()