CLIC
Collection
Models and dataset for the work: "Advancing Compositional Awareness in CLIP with Efficient Fine-Tuning"
•
10 items
•
Updated
•
1
import torch
from PIL import Image
import open_clip
model, _, image_processor = open_clip.create_model_and_transforms('hf-hub:nmndeep/CLIC-CLIPS-ViT-L-14-224-CogVLM')
image = image_processor(Image.open(urlopen(
'https://images.pexels.com/photos/869258/pexels-photo-869258.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1'))).unsqueeze(0)
model.eval()
#CLIPS clone available on our github
from clips import get_tokenizer
tokenizer = get_tokenizer('hf-hub:UCSC-VLAA/ViT-L-14-CLIPS-224-Recap-DataComp-1B', context_length=model.context_length)
texts= ["a diagram", "a dog", "a cat", "snow"]
text = tokenizer(texts)
with torch.no_grad(), torch.autocast("cuda"):
image_features = model.encode_image(image)
text_features = model.encode_text(text)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
idx = torch.argmax(text_probs)
print("Output label:", texts[idx])