hqclip / debug.py
zhixiangwei's picture
x
1f223d8
raw
history blame
925 Bytes
import gradio as gr
import open_clip
import torch
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
import io
# --- 1. Model Initialization (run only once) ---
print("Loading HQ-CLIP model...")
model_hq, _, preprocess_hq = open_clip.create_model_and_transforms('hf-hub:zhixiangwei/hqclip-openai-large-ft-vlm1b')
tokenizer_hq = open_clip.get_tokenizer('hf-hub:zhixiangwei/hqclip-openai-large-ft-vlm1b')
print("HQ-CLIP model loaded.")
print("Loading standard OpenAI CLIP model...")
model_openai, _, preprocess_openai = open_clip.create_model_and_transforms('ViT-L-14-quickgelu','openai')
tokenizer_openai = open_clip.get_tokenizer('ViT-L-14-quickgelu','openai')
print("OpenAI CLIP model loaded.")
device = "cuda" if torch.cuda.is_available() else "cpu"
device='cpu'
model_hq.to(device)
model_openai.to(device)
print(f"Models moved to {device}.")
tokenizer_openai(['cat','dog'],77)