Spaces:
Sleeping
Sleeping
import gradio as gr | |
import open_clip | |
import numpy as np | |
import torch | |
import pandas as pd | |
import os | |
open_clip_model, _, preprocess = open_clip.create_model_and_transforms( | |
'ViT-B-32', | |
pretrained='./open_clip_pytorch_model.bin') | |
debiased_model, _, _ = open_clip.create_model_and_transforms( | |
'ViT-B-32', | |
pretrained='./debiased_openclip.pt') | |
open_clip_model.eval() | |
debiased_model.eval() | |
tokenizer = open_clip.get_tokenizer('ViT-B-32') | |
def get_clip_scores(images, candidates, w=1): | |
images = images / np.sqrt(np.sum(images**2, axis=1, keepdims=True)) | |
candidates = candidates / np.sqrt(np.sum(candidates**2, axis=1, keepdims=True)) | |
per = w*np.clip(np.sum(images * candidates, axis=1), 0, None) | |
return per | |
def predict(text1, text2, input_img): | |
with torch.no_grad(): | |
image = preprocess(input_img) | |
image= image.unsqueeze(0) | |
image_features = open_clip_model.encode_image(image) | |
debiased_image_features = debiased_model.encode_image(image) | |
texts = tokenizer([text1]) | |
texts2 = tokenizer([text2]) | |
text_features = open_clip_model.encode_text(texts) | |
debiased_text_features = debiased_model.encode_text(texts) | |
# print(image_features.size(), text_features.size()) | |
# print(debiased_image_features.size(), debiased_text_features.size()) | |
score = get_clip_scores(image_features.numpy(), text_features.numpy()) | |
debiased_score = get_clip_scores(debiased_image_features.numpy(), debiased_text_features.numpy()) | |
text_features2 = open_clip_model.encode_text(texts2) | |
debiased_text_features2 = debiased_model.encode_text(texts2) | |
score2 = get_clip_scores(image_features.numpy(), text_features2.numpy()) | |
debiased_score2 = get_clip_scores(debiased_image_features.numpy(), debiased_text_features2.numpy()) | |
print(score, score2) | |
data = {'label': ["OpenCLIP for text1", "Debiased CLIP for text1", | |
"OpenCLIP for text2", "Debiased CLIP for text2" | |
], | |
'score': [score[0], debiased_score[0], score2[0], debiased_score2[0]] | |
} | |
print(pd.DataFrame.from_dict(data)) | |
return pd.DataFrame.from_dict(data) | |
# gradio_app = gr.Interface( | |
# predict, | |
# inputs=["text", "text", | |
# gr.Image(label="Select Image", sources=['upload', 'webcam'], type="pil"), | |
# ], | |
# outputs=gr.BarPlot(x="label", | |
# y="score", | |
# title="CLIP Score and Debiased Score", | |
# vertical=False, | |
# x_title=None | |
# ), | |
# title="Parrot Bias in CLIP!! (Both CLIP models are ViT-B-32)", | |
# ) | |
with gr.Blocks() as demo: | |
gr.Markdown("# Parrot Bias in CLIP!! (Both CLIP models are ViT-B-32)") | |
with gr.Row(): | |
im = gr.Image(label="Select Image", | |
sources=['upload', 'webcam'], | |
type="pil", | |
height=450) | |
with gr.Row(): | |
txt_1 = gr.Textbox(label="Input Text") | |
txt_2 = gr.Textbox(label="Input Text 2") | |
bar = gr.BarPlot(x="label", y="score", | |
title="CLIP Score and Debiased Score", | |
vertical=False, x_title=None) | |
btn = gr.Button(value="Submit") | |
btn.click(predict, inputs=[txt_1, txt_2, im], outputs=[bar]) | |
gr.Markdown("## Examples (from https://joaanna.github.io/disentangling_spelling_in_clip/)") | |
gr.Examples( | |
[["A mug cup", "An iPad",os.path.join(os.path.dirname(__file__), "examples/IMG_2938.jpg")], | |
["A hat", "bad",os.path.join(os.path.dirname(__file__), "examples/IMG_3066.jpg")]], | |
[txt_1, txt_2, im], | |
fn=predict, | |
outputs=bar, | |
cache_examples=True, | |
) | |
if __name__ == "__main__": | |
demo.launch(show_api=False,share=True) |