imagenet-clip-features / preparer.py
AbstractPhil's picture
Update preparer.py
3fe3838 verified
# =================================================================================== #
# ImageNet CLIP Feature Extraction - Download-First Strategy
# Author:AbstractPhil
#
# Description: Should sufficiently handle preparing imagenet from a repo of choice.
# Formatted for colab - uses userdata to set HF_TOKEN with userdata.get('HF_TOKEN')
# Should run as-is without hassle, but it's a little time consuming.
#
# License: MIT
# =================================================================================== #
import os, json, datetime, time
from pathlib import Path
from typing import Dict, List, Union, Optional, Generator
import torch
import torch.nn.functional as F
from datasets import Dataset, DatasetDict, Features, Value, Sequence
from transformers import CLIPModel
from huggingface_hub import HfApi, HfFolder, create_repo
from google.colab import userdata
# Set your HF_TOKEN here.
HF_TOKEN = userdata.get('HF_TOKEN') # set to os.environ or whatever you want to use.
os.environ["HF_TOKEN"] = HF_TOKEN
import torchvision.transforms.functional as TF
from torch.utils.data import DataLoader
# Configuration for ImageNet-scale processing
CONFIG = {
"device": "cuda" if torch.cuda.is_available() else "cpu",
"batch_size": 256, # A100 can handle much larger batches
"generator_chunk_size": 5000, # Process and yield in chunks
"prefetch_factor": 16, # DataLoader prefetch
"persistent_workers": True, # Keep workers alive
"num_workers": 2, # Parallel data loading
"image_size": 224,
"vector_dim": 768,
"normalize_on_gpu": True,
"clip_mean": (0.48145466, 0.4578275, 0.40821073),
"clip_std": (0.26862954, 0.26130258, 0.27577711),
# Memory management for ImageNet scale
"max_memory_gb": 64, # Adjust based on available RAM
"memory_cleanup_interval": 10000, # Clean memory every N images
# Output configuration
"upload_to_hub": False, # set to true if you wish to upload to your repo
"repo_id": "", #"AbstractPhil/imagenet-clip-features", # change this to your HF repo, you can't upload to mine.
"generator_version": "2.0.0", # Must be x.y.z format
# Download-first strategy (optimized for multiple models)
"download_first": True, # Download entire dataset before processing
"cache_dir": "./imagenet_cache", # Where to cache downloaded data
"keep_dataset_in_memory": False, # False to save RAM
"imagenet_repo": "benjamin-paine/imagenet-1k-256x256",
}
# Extended list of CLIP models to process
CLIP_MODELS = [
# OpenAI CLIP models
#{"repo_id": "openai/clip-vit-base-patch32", "short_name": "clip_vit_b32", "dim": 512},
# {"repo_id": "openai/clip-vit-base-patch16", "short_name": "clip_vit_b16", "dim": 512},
#{"repo_id": "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", "short_name": "clip_vit_laion_b32", "dim": 512},
#{"repo_id": "openai/clip-vit-large-patch14", "short_name": "clip_vit_l14", "dim": 768},
#{"repo_id": "openai/clip-vit-large-patch14-336", "short_name": "clip_vit_l14_336", "dim": 768},
# LAION CLIP models (if you want to add them)
{"repo_id": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", "short_name": "clip_vit_laion_h14", "dim": 1024},
#{"repo_id": "laion/CLIP-ViT-g-14-laion2B-s12B-b42K", "short_name": "clip_vit_laion_g14", "dim": 1024},
# {"repo_id": "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", "short_name": "clip_vit_laion_bigg14", "dim": 1280},
# You can add more models here
]
TARGET_SPLITS = ["train", "validation", "test"]
class ImageNetClipFeatureExtractor:
"""
Production-ready CLIP feature extractor optimized for processing multiple models.
Uses download-first strategy for maximum throughput.
"""
def __init__(self, config: dict):
self.cfg = config
self.device = torch.device(config["device"])
self._setup_preprocessing()
self.hf_token = os.environ.get("HF_TOKEN") or userdata.get('HF_TOKEN')
self.datasets_cache = {} # Cache loaded datasets
def _setup_preprocessing(self):
self._mean = torch.tensor(self.cfg["clip_mean"]).view(1, 3, 1, 1)
self._std = torch.tensor(self.cfg["clip_std"]).view(1, 3, 1, 1)
def _download_datasets(self):
"""
Pre-download all datasets once before processing any models.
This is called once and datasets are reused for all models.
"""
from datasets import load_dataset
print("=" * 60)
print("📥 DOWNLOADING IMAGENET DATASET")
print("=" * 60)
for split in TARGET_SPLITS:
if split not in self.datasets_cache:
print(f"\n[⏬] Downloading {split} split to {self.cfg['cache_dir']}...")
start_time = time.time()
dataset = load_dataset(
imagenet_repo, #small tweak to allow setting your own imagenet target repo.
split=split,
cache_dir=self.cfg["cache_dir"],
keep_in_memory=self.cfg["keep_dataset_in_memory"],
num_proc=None # Disable the progress bar noise
)
download_time = time.time() - start_time
print(f"[✅] Downloaded {len(dataset)} {split} images in {download_time/60:.1f} minutes")
if download_time > 0:
print(f"[📊] Download speed: {len(dataset)/download_time:.1f} images/sec")
self.datasets_cache[split] = dataset
print("\n[✅] All datasets downloaded and cached!")
print("=" * 60)
def _gpu_preprocess(self, images: torch.Tensor) -> torch.Tensor:
"""Memory-efficient GPU preprocessing."""
if images.dtype != torch.float32:
images = images.float()
# Handle both 0-1 and 0-255 ranges
if images.max() > 1.5:
images = images / 255.0
# Resize if needed
if images.shape[-1] != self.cfg["image_size"]:
images = F.interpolate(
images,
size=(self.cfg["image_size"], self.cfg["image_size"]),
mode="bilinear",
align_corners=False
)
# Normalize
if self.cfg["normalize_on_gpu"]:
mean = self._mean.to(images.device, dtype=images.dtype)
std = self._std.to(images.device, dtype=images.dtype)
images = (images - mean) / std
return images
def _collate_fn(self, batch):
"""Custom collate function for DataLoader."""
import hashlib
images = []
labels = []
image_ids = []
for item in batch:
image = item['image']
if image.mode != 'RGB':
image = image.convert('RGB')
# Convert to tensor [3, H, W]
image_tensor = TF.to_tensor(image)
# Generate SHA256 hash of the image
image_bytes = image.tobytes()
sha256_hash = hashlib.sha256(image_bytes).hexdigest()
images.append(image_tensor)
labels.append(item.get('label', -1))
image_ids.append(sha256_hash)
return {
'images': torch.stack(images),
'labels': labels,
'image_ids': image_ids
}
def _imagenet_generator_optimized(self, split: str, model_id: str) -> Generator[Dict, None, None]:
"""
Optimized generator using pre-downloaded data and DataLoader for parallel loading.
"""
# Use cached dataset
dataset = self.datasets_cache[split]
# Create DataLoader for efficient parallel loading
dataloader = DataLoader(
dataset,
batch_size=self.cfg["batch_size"],
shuffle=False, # Keep order for reproducibility
num_workers=self.cfg["num_workers"],
prefetch_factor=self.cfg["prefetch_factor"],
persistent_workers=self.cfg["persistent_workers"],
collate_fn=self._collate_fn,
pin_memory=True # Faster GPU transfer
)
# Load CLIP model
print(f"\n[🤖] Loading {model_id}")
model = CLIPModel.from_pretrained(model_id).to(self.device)
model.eval()
# Setup for chunked processing
chunk_buffer = []
timestamp = datetime.datetime.now(datetime.timezone.utc)
images_processed = 0
start_time = time.time()
last_print_time = start_time
print_interval = 10 # Print progress every 10 seconds
try:
with torch.no_grad():
for batch_idx, batch in enumerate(dataloader):
# Move batch to GPU
image_batch = batch['images'].to(self.device, non_blocking=True)
labels = batch['labels']
image_ids = batch['image_ids']
# Preprocess on GPU
image_batch = self._gpu_preprocess(image_batch)
# Extract features
features = model.get_image_features(pixel_values=image_batch)
features = features / features.norm(dim=-1, keepdim=True)
# Create records
for img_id, label, feature_vec in zip(image_ids, labels, features):
chunk_buffer.append({
"image_id": img_id, # Now using SHA256 hash
"label": int(label),
"clip_model": model_id,
"clip_features": feature_vec.detach().cpu().float().numpy().tolist(),
"vector_dim": features.shape[-1],
"timestamp": timestamp,
})
images_processed += len(image_ids)
# Print progress at regular time intervals
current_time = time.time()
if current_time - last_print_time >= print_interval:
elapsed = current_time - start_time
speed = images_processed / elapsed
eta = (len(dataset) - images_processed) / speed
print(f"[⚡] Progress: {images_processed}/{len(dataset)} "
f"({100*images_processed/len(dataset):.1f}%) | "
f"Speed: {speed:.1f} img/sec | "
f"ETA: {eta/60:.1f} min")
last_print_time = current_time
# Yield chunk when it reaches configured size
if len(chunk_buffer) >= self.cfg["generator_chunk_size"]:
elapsed = time.time() - start_time
speed = images_processed / elapsed
print(f"[📦] Yielding chunk of {len(chunk_buffer)} features | "
f"Progress: {images_processed}/{len(dataset)} "
f"({100*images_processed/len(dataset):.1f}%)")
yield from chunk_buffer
chunk_buffer = []
# Memory cleanup at configured interval
if images_processed % self.cfg["memory_cleanup_interval"] == 0:
torch.cuda.empty_cache()
# Yield remaining chunk buffer
if chunk_buffer:
print(f"[📦] Final chunk of {len(chunk_buffer)} features")
yield from chunk_buffer
# Final stats
total_time = time.time() - start_time
print(f"\n[✅] Processed {images_processed} images in {total_time/60:.1f} minutes")
print(f"[📊] Average speed: {images_processed/total_time:.1f} images/sec")
finally:
del model
torch.cuda.empty_cache()
def extract_and_upload(self, model_config: dict, split: str = "train"):
"""
Extract features using optimized generator and upload to HuggingFace.
Returns the dataset if upload fails for retry purposes.
"""
model_id = model_config["repo_id"]
short_name = model_config["short_name"]
print("\n" + "=" * 60)
print(f"⚙️ PROCESSING: {short_name} - {split}")
print("=" * 60)
# Define dataset features
features = Features({
"image_id": Value("string"),
"label": Value("int32"),
"clip_model": Value("string"),
"clip_features": Sequence(Value("float32")),
"vector_dim": Value("int32"),
"timestamp": Value("timestamp[ns]"),
})
# Suppress the "Generating split" progress bar
import sys
import io
old_stderr = sys.stderr
sys.stderr = io.StringIO()
try:
# Create dataset from generator
dataset = Dataset.from_generator(
lambda: self._imagenet_generator_optimized(split, model_id),
features=features,
writer_batch_size=self.cfg["generator_chunk_size"],
split=split
)
except Exception as e:
raise Exception(e)
#finally:
# # Restore stderr
# sys.stderr = old_stderr
# return
# Add metadata
dataset.info.description = f"CLIP features for ImageNet-1k 256x256 {split} using {model_id}"
dataset.info.version = self.cfg["generator_version"]
# Save to disk before upload (safety backup)
temp_path = f"./temp_dataset_{short_name}_{split}"
print(f"[💾] Saving dataset to {temp_path} for safety...")
dataset.save_to_disk(temp_path)
# Upload to HuggingFace
split_name = f"{short_name}_{split}"
print(f"\n[📤] Uploading {split_name} to {self.cfg['repo_id']}")
try:
dataset.push_to_hub(
self.cfg["repo_id"],
split=split_name,
token=self.hf_token,
commit_message=f"Add {split_name} CLIP features",
max_shard_size="500MB"
)
print(f"[✅] Successfully uploaded {split_name}")
# Clean up temp file on success
import shutil
shutil.rmtree(temp_path, ignore_errors=True)
return None
except Exception as e:
print(f"[❌] Upload failed for {split_name}: {e}")
print(f"[💡] Dataset saved at {temp_path} - you can retry upload with:")
print(f" from datasets import load_from_disk")
print(f" dataset = load_from_disk('{temp_path}')")
print(f" dataset.push_to_hub('{self.cfg['repo_id']}', split='{split_name}', ...)")
return dataset # Return dataset for potential retry
def extract_all_models(self, models_to_process=None):
"""
Extract features for all models and splits.
Args:
models_to_process: List of model configs to process (default: all)
"""
# Ensure repo exists
if self.hf_token:
try:
create_repo(self.cfg["repo_id"], repo_type="dataset", exist_ok=True, token=self.hf_token)
print(f"[✅] Repository ready: {self.cfg['repo_id']}")
except Exception as e:
print(f"[⚠️] Repo creation warning: {e}")
# Download all data first (once for all models)
self._download_datasets()
# Process specified models or all
models = models_to_process or CLIP_MODELS
total_combinations = len(models) * 2 # train + validation
print("\n" + "=" * 60)
print(f"📋 PROCESSING PLAN: {len(models)} models × 2 splits = {total_combinations} tasks")
print("=" * 60)
# Keep track of failed uploads for retry
failed_uploads = []
for i, model_config in enumerate(models, 1):
print(f"\n[{i}/{len(models)}] Model: {model_config['short_name']}")
for split in TARGET_SPLITS: #"train", "test"]:
try:
dataset = self.extract_and_upload(model_config, split)
if dataset is not None:
# Upload failed but we have the dataset
failed_uploads.append({
'model': model_config['short_name'],
'split': split,
'dataset': dataset,
'path': f"./temp_dataset_{model_config['short_name']}_{split}"
})
except Exception as e:
print(f"[❌] Failed {model_config['short_name']} {split}: {e}")
continue
# Cleanup between models
torch.cuda.empty_cache()
print("\n" + "=" * 60)
if failed_uploads:
print(f"⚠️ PROCESSING COMPLETE WITH {len(failed_uploads)} FAILED UPLOADS")
print("\nFailed uploads saved to disk:")
for failure in failed_uploads:
print(f" - {failure['model']}_{failure['split']}: {failure['path']}")
print("\nYou can retry these uploads after fixing the issue.")
else:
print("🎉 ALL PROCESSING COMPLETE!")
print("=" * 60)
return failed_uploads # Return list of failed uploads for retry
# ============================================================
# Utility Functions
# ============================================================
def estimate_processing_time(num_models=len(CLIP_MODELS)):
"""
Estimate total processing time for all models.
"""
print("=" * 60)
print("⏱️ TIME ESTIMATES")
print("=" * 60)
# Dataset sizes
train_size = 1_281_167
val_size = 50_000
total_images = train_size + val_size
# Time estimates
download_time_min = 60 # minutes
download_time_max = 120
# Processing speeds (images/sec)
speed_min = 800
speed_max = 1200
print(f"\n📊 Dataset sizes:")
print(f" - Train: {train_size:,} images")
print(f" - Validation: {val_size:,} images")
print(f" - Total per model: {total_images:,} images")
print(f"\n⏬ Download time (one-time):")
print(f" - Estimated: {download_time_min}-{download_time_max} minutes")
print(f"\n🚀 Processing speed:")
print(f" - Expected: {speed_min}-{speed_max} images/sec")
# Per model
time_per_model_min = total_images / speed_max / 60
time_per_model_max = total_images / speed_min / 60
print(f"\n⏱️ Per model:")
print(f" - Processing time: {time_per_model_min:.1f}-{time_per_model_max:.1f} minutes")
# Total
total_min = download_time_min + (num_models * time_per_model_min)
total_max = download_time_max + (num_models * time_per_model_max)
print(f"\n🎯 Total for {num_models} models:")
print(f" - Total time: {total_min:.1f}-{total_max:.1f} minutes")
print(f" - Or: {total_min/60:.1f}-{total_max/60:.1f} hours")
print("\n💡 Tips:")
print(" - Processing is GPU-bound, so better GPUs = faster")
print(" - A100/H100 can use batch_size=1024+ for more speed")
print(" - Multiple GPUs can process different models in parallel")
print("=" * 60)
# ============================================================
# Main Execution
# ============================================================
"""
Main execution for multi-model ImageNet CLIP feature extraction.
"""
# Show time estimates
estimate_processing_time()
# Confirm settings
print(f"\n🔧 Current configuration:")
print(f" - Batch size: {CONFIG['batch_size']}")
print(f" - Chunk size: {CONFIG['generator_chunk_size']}")
print(f" - Workers: {CONFIG['num_workers']}")
print(f" - Models to process: {len(CLIP_MODELS)}")
# Option to process subset of models
# For testing, you might want to start with just one:
# test_models = CLIP_MODELS[:1] # Just first model
# extractor.extract_all_models(models_to_process=test_models)
# Run extraction
extractor = ImageNetClipFeatureExtractor(CONFIG)
extractor.extract_all_models() # Process all models