|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os, json, datetime, time |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Union, Optional, Generator |
|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
from datasets import Dataset, DatasetDict, Features, Value, Sequence |
|
|
from transformers import CLIPModel |
|
|
from huggingface_hub import HfApi, HfFolder, create_repo |
|
|
from google.colab import userdata |
|
|
|
|
|
|
|
|
HF_TOKEN = userdata.get('HF_TOKEN') |
|
|
os.environ["HF_TOKEN"] = HF_TOKEN |
|
|
|
|
|
import torchvision.transforms.functional as TF |
|
|
from torch.utils.data import DataLoader |
|
|
|
|
|
|
|
|
|
|
|
CONFIG = { |
|
|
"device": "cuda" if torch.cuda.is_available() else "cpu", |
|
|
"batch_size": 256, |
|
|
"generator_chunk_size": 5000, |
|
|
"prefetch_factor": 16, |
|
|
"persistent_workers": True, |
|
|
"num_workers": 2, |
|
|
|
|
|
"image_size": 224, |
|
|
"vector_dim": 768, |
|
|
"normalize_on_gpu": True, |
|
|
"clip_mean": (0.48145466, 0.4578275, 0.40821073), |
|
|
"clip_std": (0.26862954, 0.26130258, 0.27577711), |
|
|
|
|
|
|
|
|
"max_memory_gb": 64, |
|
|
"memory_cleanup_interval": 10000, |
|
|
|
|
|
|
|
|
"upload_to_hub": False, |
|
|
"repo_id": "", |
|
|
"generator_version": "2.0.0", |
|
|
|
|
|
|
|
|
"download_first": True, |
|
|
"cache_dir": "./imagenet_cache", |
|
|
"keep_dataset_in_memory": False, |
|
|
|
|
|
"imagenet_repo": "benjamin-paine/imagenet-1k-256x256", |
|
|
} |
|
|
|
|
|
|
|
|
CLIP_MODELS = [ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{"repo_id": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", "short_name": "clip_vit_laion_h14", "dim": 1024}, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
|
|
TARGET_SPLITS = ["train", "validation", "test"] |
|
|
|
|
|
|
|
|
class ImageNetClipFeatureExtractor: |
|
|
""" |
|
|
Production-ready CLIP feature extractor optimized for processing multiple models. |
|
|
Uses download-first strategy for maximum throughput. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: dict): |
|
|
self.cfg = config |
|
|
self.device = torch.device(config["device"]) |
|
|
self._setup_preprocessing() |
|
|
self.hf_token = os.environ.get("HF_TOKEN") or userdata.get('HF_TOKEN') |
|
|
self.datasets_cache = {} |
|
|
|
|
|
def _setup_preprocessing(self): |
|
|
self._mean = torch.tensor(self.cfg["clip_mean"]).view(1, 3, 1, 1) |
|
|
self._std = torch.tensor(self.cfg["clip_std"]).view(1, 3, 1, 1) |
|
|
|
|
|
def _download_datasets(self): |
|
|
""" |
|
|
Pre-download all datasets once before processing any models. |
|
|
This is called once and datasets are reused for all models. |
|
|
""" |
|
|
from datasets import load_dataset |
|
|
|
|
|
print("=" * 60) |
|
|
print("📥 DOWNLOADING IMAGENET DATASET") |
|
|
print("=" * 60) |
|
|
|
|
|
for split in TARGET_SPLITS: |
|
|
if split not in self.datasets_cache: |
|
|
print(f"\n[⏬] Downloading {split} split to {self.cfg['cache_dir']}...") |
|
|
start_time = time.time() |
|
|
|
|
|
dataset = load_dataset( |
|
|
imagenet_repo, |
|
|
split=split, |
|
|
cache_dir=self.cfg["cache_dir"], |
|
|
keep_in_memory=self.cfg["keep_dataset_in_memory"], |
|
|
num_proc=None |
|
|
) |
|
|
|
|
|
download_time = time.time() - start_time |
|
|
print(f"[✅] Downloaded {len(dataset)} {split} images in {download_time/60:.1f} minutes") |
|
|
if download_time > 0: |
|
|
print(f"[📊] Download speed: {len(dataset)/download_time:.1f} images/sec") |
|
|
|
|
|
self.datasets_cache[split] = dataset |
|
|
|
|
|
print("\n[✅] All datasets downloaded and cached!") |
|
|
print("=" * 60) |
|
|
|
|
|
def _gpu_preprocess(self, images: torch.Tensor) -> torch.Tensor: |
|
|
"""Memory-efficient GPU preprocessing.""" |
|
|
if images.dtype != torch.float32: |
|
|
images = images.float() |
|
|
|
|
|
|
|
|
if images.max() > 1.5: |
|
|
images = images / 255.0 |
|
|
|
|
|
|
|
|
if images.shape[-1] != self.cfg["image_size"]: |
|
|
images = F.interpolate( |
|
|
images, |
|
|
size=(self.cfg["image_size"], self.cfg["image_size"]), |
|
|
mode="bilinear", |
|
|
align_corners=False |
|
|
) |
|
|
|
|
|
|
|
|
if self.cfg["normalize_on_gpu"]: |
|
|
mean = self._mean.to(images.device, dtype=images.dtype) |
|
|
std = self._std.to(images.device, dtype=images.dtype) |
|
|
images = (images - mean) / std |
|
|
|
|
|
return images |
|
|
|
|
|
def _collate_fn(self, batch): |
|
|
"""Custom collate function for DataLoader.""" |
|
|
import hashlib |
|
|
images = [] |
|
|
labels = [] |
|
|
image_ids = [] |
|
|
|
|
|
for item in batch: |
|
|
image = item['image'] |
|
|
if image.mode != 'RGB': |
|
|
image = image.convert('RGB') |
|
|
|
|
|
|
|
|
image_tensor = TF.to_tensor(image) |
|
|
|
|
|
|
|
|
image_bytes = image.tobytes() |
|
|
sha256_hash = hashlib.sha256(image_bytes).hexdigest() |
|
|
|
|
|
images.append(image_tensor) |
|
|
labels.append(item.get('label', -1)) |
|
|
image_ids.append(sha256_hash) |
|
|
|
|
|
return { |
|
|
'images': torch.stack(images), |
|
|
'labels': labels, |
|
|
'image_ids': image_ids |
|
|
} |
|
|
|
|
|
def _imagenet_generator_optimized(self, split: str, model_id: str) -> Generator[Dict, None, None]: |
|
|
""" |
|
|
Optimized generator using pre-downloaded data and DataLoader for parallel loading. |
|
|
""" |
|
|
|
|
|
dataset = self.datasets_cache[split] |
|
|
|
|
|
|
|
|
dataloader = DataLoader( |
|
|
dataset, |
|
|
batch_size=self.cfg["batch_size"], |
|
|
shuffle=False, |
|
|
num_workers=self.cfg["num_workers"], |
|
|
prefetch_factor=self.cfg["prefetch_factor"], |
|
|
persistent_workers=self.cfg["persistent_workers"], |
|
|
collate_fn=self._collate_fn, |
|
|
pin_memory=True |
|
|
) |
|
|
|
|
|
|
|
|
print(f"\n[🤖] Loading {model_id}") |
|
|
model = CLIPModel.from_pretrained(model_id).to(self.device) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
chunk_buffer = [] |
|
|
timestamp = datetime.datetime.now(datetime.timezone.utc) |
|
|
images_processed = 0 |
|
|
start_time = time.time() |
|
|
last_print_time = start_time |
|
|
print_interval = 10 |
|
|
|
|
|
try: |
|
|
with torch.no_grad(): |
|
|
for batch_idx, batch in enumerate(dataloader): |
|
|
|
|
|
image_batch = batch['images'].to(self.device, non_blocking=True) |
|
|
labels = batch['labels'] |
|
|
image_ids = batch['image_ids'] |
|
|
|
|
|
|
|
|
image_batch = self._gpu_preprocess(image_batch) |
|
|
|
|
|
|
|
|
features = model.get_image_features(pixel_values=image_batch) |
|
|
features = features / features.norm(dim=-1, keepdim=True) |
|
|
|
|
|
|
|
|
for img_id, label, feature_vec in zip(image_ids, labels, features): |
|
|
chunk_buffer.append({ |
|
|
"image_id": img_id, |
|
|
"label": int(label), |
|
|
"clip_model": model_id, |
|
|
"clip_features": feature_vec.detach().cpu().float().numpy().tolist(), |
|
|
"vector_dim": features.shape[-1], |
|
|
"timestamp": timestamp, |
|
|
}) |
|
|
|
|
|
images_processed += len(image_ids) |
|
|
|
|
|
|
|
|
current_time = time.time() |
|
|
if current_time - last_print_time >= print_interval: |
|
|
elapsed = current_time - start_time |
|
|
speed = images_processed / elapsed |
|
|
eta = (len(dataset) - images_processed) / speed |
|
|
print(f"[⚡] Progress: {images_processed}/{len(dataset)} " |
|
|
f"({100*images_processed/len(dataset):.1f}%) | " |
|
|
f"Speed: {speed:.1f} img/sec | " |
|
|
f"ETA: {eta/60:.1f} min") |
|
|
last_print_time = current_time |
|
|
|
|
|
|
|
|
if len(chunk_buffer) >= self.cfg["generator_chunk_size"]: |
|
|
elapsed = time.time() - start_time |
|
|
speed = images_processed / elapsed |
|
|
print(f"[📦] Yielding chunk of {len(chunk_buffer)} features | " |
|
|
f"Progress: {images_processed}/{len(dataset)} " |
|
|
f"({100*images_processed/len(dataset):.1f}%)") |
|
|
yield from chunk_buffer |
|
|
chunk_buffer = [] |
|
|
|
|
|
|
|
|
if images_processed % self.cfg["memory_cleanup_interval"] == 0: |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
if chunk_buffer: |
|
|
print(f"[📦] Final chunk of {len(chunk_buffer)} features") |
|
|
yield from chunk_buffer |
|
|
|
|
|
|
|
|
total_time = time.time() - start_time |
|
|
print(f"\n[✅] Processed {images_processed} images in {total_time/60:.1f} minutes") |
|
|
print(f"[📊] Average speed: {images_processed/total_time:.1f} images/sec") |
|
|
|
|
|
finally: |
|
|
del model |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
def extract_and_upload(self, model_config: dict, split: str = "train"): |
|
|
""" |
|
|
Extract features using optimized generator and upload to HuggingFace. |
|
|
Returns the dataset if upload fails for retry purposes. |
|
|
""" |
|
|
model_id = model_config["repo_id"] |
|
|
short_name = model_config["short_name"] |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print(f"⚙️ PROCESSING: {short_name} - {split}") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
features = Features({ |
|
|
"image_id": Value("string"), |
|
|
"label": Value("int32"), |
|
|
"clip_model": Value("string"), |
|
|
"clip_features": Sequence(Value("float32")), |
|
|
"vector_dim": Value("int32"), |
|
|
"timestamp": Value("timestamp[ns]"), |
|
|
}) |
|
|
|
|
|
|
|
|
import sys |
|
|
import io |
|
|
old_stderr = sys.stderr |
|
|
sys.stderr = io.StringIO() |
|
|
|
|
|
try: |
|
|
|
|
|
dataset = Dataset.from_generator( |
|
|
lambda: self._imagenet_generator_optimized(split, model_id), |
|
|
features=features, |
|
|
writer_batch_size=self.cfg["generator_chunk_size"], |
|
|
split=split |
|
|
) |
|
|
except Exception as e: |
|
|
raise Exception(e) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset.info.description = f"CLIP features for ImageNet-1k 256x256 {split} using {model_id}" |
|
|
dataset.info.version = self.cfg["generator_version"] |
|
|
|
|
|
|
|
|
temp_path = f"./temp_dataset_{short_name}_{split}" |
|
|
print(f"[💾] Saving dataset to {temp_path} for safety...") |
|
|
dataset.save_to_disk(temp_path) |
|
|
|
|
|
|
|
|
split_name = f"{short_name}_{split}" |
|
|
|
|
|
print(f"\n[📤] Uploading {split_name} to {self.cfg['repo_id']}") |
|
|
try: |
|
|
dataset.push_to_hub( |
|
|
self.cfg["repo_id"], |
|
|
split=split_name, |
|
|
token=self.hf_token, |
|
|
commit_message=f"Add {split_name} CLIP features", |
|
|
max_shard_size="500MB" |
|
|
) |
|
|
print(f"[✅] Successfully uploaded {split_name}") |
|
|
|
|
|
|
|
|
import shutil |
|
|
shutil.rmtree(temp_path, ignore_errors=True) |
|
|
return None |
|
|
|
|
|
except Exception as e: |
|
|
print(f"[❌] Upload failed for {split_name}: {e}") |
|
|
print(f"[💡] Dataset saved at {temp_path} - you can retry upload with:") |
|
|
print(f" from datasets import load_from_disk") |
|
|
print(f" dataset = load_from_disk('{temp_path}')") |
|
|
print(f" dataset.push_to_hub('{self.cfg['repo_id']}', split='{split_name}', ...)") |
|
|
return dataset |
|
|
|
|
|
def extract_all_models(self, models_to_process=None): |
|
|
""" |
|
|
Extract features for all models and splits. |
|
|
|
|
|
Args: |
|
|
models_to_process: List of model configs to process (default: all) |
|
|
""" |
|
|
|
|
|
if self.hf_token: |
|
|
try: |
|
|
create_repo(self.cfg["repo_id"], repo_type="dataset", exist_ok=True, token=self.hf_token) |
|
|
print(f"[✅] Repository ready: {self.cfg['repo_id']}") |
|
|
except Exception as e: |
|
|
print(f"[⚠️] Repo creation warning: {e}") |
|
|
|
|
|
|
|
|
self._download_datasets() |
|
|
|
|
|
|
|
|
models = models_to_process or CLIP_MODELS |
|
|
total_combinations = len(models) * 2 |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print(f"📋 PROCESSING PLAN: {len(models)} models × 2 splits = {total_combinations} tasks") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
failed_uploads = [] |
|
|
|
|
|
for i, model_config in enumerate(models, 1): |
|
|
print(f"\n[{i}/{len(models)}] Model: {model_config['short_name']}") |
|
|
|
|
|
for split in TARGET_SPLITS: |
|
|
try: |
|
|
dataset = self.extract_and_upload(model_config, split) |
|
|
if dataset is not None: |
|
|
|
|
|
failed_uploads.append({ |
|
|
'model': model_config['short_name'], |
|
|
'split': split, |
|
|
'dataset': dataset, |
|
|
'path': f"./temp_dataset_{model_config['short_name']}_{split}" |
|
|
}) |
|
|
except Exception as e: |
|
|
print(f"[❌] Failed {model_config['short_name']} {split}: {e}") |
|
|
continue |
|
|
|
|
|
|
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
if failed_uploads: |
|
|
print(f"⚠️ PROCESSING COMPLETE WITH {len(failed_uploads)} FAILED UPLOADS") |
|
|
print("\nFailed uploads saved to disk:") |
|
|
for failure in failed_uploads: |
|
|
print(f" - {failure['model']}_{failure['split']}: {failure['path']}") |
|
|
print("\nYou can retry these uploads after fixing the issue.") |
|
|
else: |
|
|
print("🎉 ALL PROCESSING COMPLETE!") |
|
|
print("=" * 60) |
|
|
|
|
|
return failed_uploads |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def estimate_processing_time(num_models=len(CLIP_MODELS)): |
|
|
""" |
|
|
Estimate total processing time for all models. |
|
|
""" |
|
|
print("=" * 60) |
|
|
print("⏱️ TIME ESTIMATES") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
train_size = 1_281_167 |
|
|
val_size = 50_000 |
|
|
total_images = train_size + val_size |
|
|
|
|
|
|
|
|
download_time_min = 60 |
|
|
download_time_max = 120 |
|
|
|
|
|
|
|
|
speed_min = 800 |
|
|
speed_max = 1200 |
|
|
|
|
|
print(f"\n📊 Dataset sizes:") |
|
|
print(f" - Train: {train_size:,} images") |
|
|
print(f" - Validation: {val_size:,} images") |
|
|
print(f" - Total per model: {total_images:,} images") |
|
|
|
|
|
print(f"\n⏬ Download time (one-time):") |
|
|
print(f" - Estimated: {download_time_min}-{download_time_max} minutes") |
|
|
|
|
|
print(f"\n🚀 Processing speed:") |
|
|
print(f" - Expected: {speed_min}-{speed_max} images/sec") |
|
|
|
|
|
|
|
|
time_per_model_min = total_images / speed_max / 60 |
|
|
time_per_model_max = total_images / speed_min / 60 |
|
|
|
|
|
print(f"\n⏱️ Per model:") |
|
|
print(f" - Processing time: {time_per_model_min:.1f}-{time_per_model_max:.1f} minutes") |
|
|
|
|
|
|
|
|
total_min = download_time_min + (num_models * time_per_model_min) |
|
|
total_max = download_time_max + (num_models * time_per_model_max) |
|
|
|
|
|
print(f"\n🎯 Total for {num_models} models:") |
|
|
print(f" - Total time: {total_min:.1f}-{total_max:.1f} minutes") |
|
|
print(f" - Or: {total_min/60:.1f}-{total_max/60:.1f} hours") |
|
|
|
|
|
print("\n💡 Tips:") |
|
|
print(" - Processing is GPU-bound, so better GPUs = faster") |
|
|
print(" - A100/H100 can use batch_size=1024+ for more speed") |
|
|
print(" - Multiple GPUs can process different models in parallel") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Main execution for multi-model ImageNet CLIP feature extraction. |
|
|
""" |
|
|
|
|
|
estimate_processing_time() |
|
|
|
|
|
|
|
|
print(f"\n🔧 Current configuration:") |
|
|
print(f" - Batch size: {CONFIG['batch_size']}") |
|
|
print(f" - Chunk size: {CONFIG['generator_chunk_size']}") |
|
|
print(f" - Workers: {CONFIG['num_workers']}") |
|
|
print(f" - Models to process: {len(CLIP_MODELS)}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extractor = ImageNetClipFeatureExtractor(CONFIG) |
|
|
extractor.extract_all_models() |