Commit
·
e3e057f
1
Parent(s):
a090310
draft
Browse files- semantic-dedupe.py +22 -24
semantic-dedupe.py
CHANGED
|
@@ -14,6 +14,9 @@ Semantic deduplication for Hugging Face datasets using SemHash.
|
|
| 14 |
This script removes duplicate or near-duplicate text samples from datasets based on
|
| 15 |
semantic similarity, helping to clean training data and prevent train/test leakage.
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
Example usage:
|
| 18 |
# Basic deduplication
|
| 19 |
uv run semantic-dedupe.py username/dataset text username/dataset-deduped
|
|
@@ -22,8 +25,8 @@ Example usage:
|
|
| 22 |
uv run semantic-dedupe.py username/dataset text username/dataset-deduped \\
|
| 23 |
--threshold 0.85 --max-samples 1000
|
| 24 |
|
| 25 |
-
# Using HF Jobs
|
| 26 |
-
hf jobs uv run --flavor
|
| 27 |
-e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\
|
| 28 |
https://huggingface.co/datasets/uv-scripts/deduplication/raw/main/semantic-dedupe.py \\
|
| 29 |
username/dataset text username/dataset-deduped
|
|
@@ -202,44 +205,39 @@ def main():
|
|
| 202 |
print(f"Available columns: {', '.join(dataset.column_names)}")
|
| 203 |
sys.exit(1)
|
| 204 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
# Initialize SemHash
|
| 206 |
-
print("Initializing SemHash...")
|
| 207 |
-
semhash = SemHash(
|
| 208 |
-
batch_size=args.batch_size,
|
| 209 |
-
show_progress=True,
|
| 210 |
-
)
|
| 211 |
|
| 212 |
# Perform deduplication
|
| 213 |
print(f"Performing {args.method} deduplication on '{args.column}' column...")
|
| 214 |
|
| 215 |
if args.method == "duplicates":
|
| 216 |
-
result = semhash.
|
| 217 |
-
dataset,
|
| 218 |
-
text_column=args.column,
|
| 219 |
-
threshold=args.threshold,
|
| 220 |
-
)
|
| 221 |
elif args.method == "outliers":
|
| 222 |
-
result = semhash.
|
| 223 |
-
dataset,
|
| 224 |
-
text_column=args.column,
|
| 225 |
-
threshold=args.threshold,
|
| 226 |
-
)
|
| 227 |
elif args.method == "representatives":
|
| 228 |
-
result = semhash.
|
| 229 |
-
dataset,
|
| 230 |
-
text_column=args.column,
|
| 231 |
-
threshold=args.threshold,
|
| 232 |
-
)
|
| 233 |
else:
|
| 234 |
raise ValueError(f"Unknown method: {args.method}")
|
| 235 |
|
| 236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
|
| 238 |
# Print statistics
|
| 239 |
print(f"\nDeduplication complete!")
|
| 240 |
print(f"Original size: {original_size:,}")
|
| 241 |
print(f"Deduplicated size: {deduped_size:,}")
|
| 242 |
print(f"Removed: {original_size - deduped_size:,} ({((original_size - deduped_size) / original_size) * 100:.1f}%)")
|
|
|
|
| 243 |
|
| 244 |
# Create dataset card
|
| 245 |
card = create_dataset_card(
|
|
@@ -253,7 +251,7 @@ def main():
|
|
| 253 |
|
| 254 |
# Push to hub
|
| 255 |
print(f"\nPushing to hub: {args.output_repo}")
|
| 256 |
-
|
| 257 |
args.output_repo,
|
| 258 |
private=args.private,
|
| 259 |
commit_message=f"Deduplicated using {args.method} method",
|
|
|
|
| 14 |
This script removes duplicate or near-duplicate text samples from datasets based on
|
| 15 |
semantic similarity, helping to clean training data and prevent train/test leakage.
|
| 16 |
|
| 17 |
+
SemHash is CPU-optimized and uses Model2Vec embeddings that are 500x faster on CPU
|
| 18 |
+
than traditional transformers. No GPU required!
|
| 19 |
+
|
| 20 |
Example usage:
|
| 21 |
# Basic deduplication
|
| 22 |
uv run semantic-dedupe.py username/dataset text username/dataset-deduped
|
|
|
|
| 25 |
uv run semantic-dedupe.py username/dataset text username/dataset-deduped \\
|
| 26 |
--threshold 0.85 --max-samples 1000
|
| 27 |
|
| 28 |
+
# Using HF Jobs (CPU is sufficient)
|
| 29 |
+
hf jobs uv run --flavor cpu-4x-xlarge \\
|
| 30 |
-e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\
|
| 31 |
https://huggingface.co/datasets/uv-scripts/deduplication/raw/main/semantic-dedupe.py \\
|
| 32 |
username/dataset text username/dataset-deduped
|
|
|
|
| 205 |
print(f"Available columns: {', '.join(dataset.column_names)}")
|
| 206 |
sys.exit(1)
|
| 207 |
|
| 208 |
+
# Convert dataset to records (preserves all columns)
|
| 209 |
+
print("Converting dataset to records...")
|
| 210 |
+
records = [dict(row) for row in dataset]
|
| 211 |
+
|
| 212 |
# Initialize SemHash
|
| 213 |
+
print("Initializing SemHash (CPU-optimized)...")
|
| 214 |
+
semhash = SemHash.from_records(records=records, columns=[args.column])
|
|
|
|
|
|
|
|
|
|
| 215 |
|
| 216 |
# Perform deduplication
|
| 217 |
print(f"Performing {args.method} deduplication on '{args.column}' column...")
|
| 218 |
|
| 219 |
if args.method == "duplicates":
|
| 220 |
+
result = semhash.self_deduplicate(threshold=args.threshold)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
elif args.method == "outliers":
|
| 222 |
+
result = semhash.self_filter_outliers()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
elif args.method == "representatives":
|
| 224 |
+
result = semhash.self_find_representative()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
else:
|
| 226 |
raise ValueError(f"Unknown method: {args.method}")
|
| 227 |
|
| 228 |
+
# Get deduplicated records (all columns preserved)
|
| 229 |
+
deduplicated_records = result.selected
|
| 230 |
+
|
| 231 |
+
# Convert back to HF Dataset
|
| 232 |
+
result_dataset = Dataset.from_list(deduplicated_records)
|
| 233 |
+
deduped_size = len(result_dataset)
|
| 234 |
|
| 235 |
# Print statistics
|
| 236 |
print(f"\nDeduplication complete!")
|
| 237 |
print(f"Original size: {original_size:,}")
|
| 238 |
print(f"Deduplicated size: {deduped_size:,}")
|
| 239 |
print(f"Removed: {original_size - deduped_size:,} ({((original_size - deduped_size) / original_size) * 100:.1f}%)")
|
| 240 |
+
print("\nNote: SemHash processes ~20,000 sentences/second on CPU")
|
| 241 |
|
| 242 |
# Create dataset card
|
| 243 |
card = create_dataset_card(
|
|
|
|
| 251 |
|
| 252 |
# Push to hub
|
| 253 |
print(f"\nPushing to hub: {args.output_repo}")
|
| 254 |
+
result_dataset.push_to_hub(
|
| 255 |
args.output_repo,
|
| 256 |
private=args.private,
|
| 257 |
commit_message=f"Deduplicated using {args.method} method",
|