|
import requests |
|
from typing import List, Optional, cast, TypeVar |
|
from abc import ABC, abstractmethod |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch import Tensor |
|
from torch.utils.data import DataLoader |
|
|
|
from tqdm import tqdm |
|
from PIL import Image |
|
|
|
from datasets import Dataset |
|
from torch.utils.data import Dataset as TorchDataset |
|
|
|
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, Qwen2VLForConditionalGeneration |
|
from qwen_vl_utils import process_vision_info |
|
|
|
T = TypeVar("T") |
|
class ListDataset(TorchDataset[T]): |
|
def __init__(self, elements: List[T]): |
|
self.elements = elements |
|
|
|
def __len__(self) -> int: |
|
return len(self.elements) |
|
|
|
def __getitem__(self, idx: int) -> T: |
|
return self.elements[idx] |
|
|
|
def get_torch_device(device: str = "auto") -> str: |
|
""" |
|
Returns the device (string) to be used by PyTorch. |
|
|
|
`device` arg defaults to "auto" which will use: |
|
- "cuda:0" if available |
|
- else "mps" if available |
|
- else "cpu". |
|
""" |
|
|
|
if device == "auto": |
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
elif torch.backends.mps.is_available(): |
|
device = "mps" |
|
else: |
|
device = "cpu" |
|
|
|
return device |
|
|
|
class ImageConverter(): |
|
|
|
def __init__(self,image_corpus, images_mapping): |
|
self.image_corpus = image_corpus |
|
self.images_mapping = images_mapping |
|
|
|
def transform_func(self, example): |
|
if 'image' in example: |
|
if isinstance(example['image'], str): |
|
example['image'] = self.image_corpus[self.images_mapping[example['image']]] |
|
if isinstance(example['image'], list): |
|
converted_images = [] |
|
for el in example['image']: |
|
converted_images.append(self.image_corpus[self.images_mapping[el]]['image'].convert("RGB")) |
|
example['image'] = converted_images |
|
return(example) |
|
|
|
class CustomRetriever(ABC): |
|
""" |
|
Custom model (dense embeddings). |
|
""" |
|
|
|
def __init__(self, model_name_or_path, device: str = "auto"): |
|
super().__init__() |
|
self.device = get_torch_device(device) |
|
self.min_pixels=1*28*28 |
|
self.max_pixels=2560*28*28 |
|
self.processor = AutoProcessor.from_pretrained(model_name_or_path, min_pixels=self.min_pixels, max_pixels=self.max_pixels) |
|
self.processor.padding_side = "left" |
|
self.document_prefix = "What is shown in this image?" |
|
self.query_prefix = "Query:" |
|
self.pooling = "last" |
|
|
|
@property |
|
def use_visual_embedding(self) -> bool: |
|
return True |
|
|
|
@abstractmethod |
|
def process_images(self, images: List[Image.Image], **kwargs): |
|
pass |
|
|
|
@abstractmethod |
|
def process_queries(self, queries: List[str], **kwargs): |
|
pass |
|
|
|
def forward_queries(self, queries, batch_size: int, **kwargs) -> List[torch.Tensor]: |
|
dataloader = DataLoader( |
|
dataset=ListDataset[str](queries), |
|
batch_size=batch_size, |
|
shuffle=False, |
|
collate_fn=self.process_queries, |
|
num_workers=32 |
|
) |
|
|
|
qs = [] |
|
for batch_query in tqdm(dataloader, desc="Forward pass queries..."): |
|
with torch.no_grad(): |
|
with torch.autocast(device_type="cuda"): |
|
batch_query = {k: v.to(self.device) for k, v in batch_query.items()} |
|
embeddings_query = self.model(**batch_query, output_hidden_states=True).hidden_states[-1] |
|
|
|
embeds = self.pool( |
|
last_hidden_states=embeddings_query, |
|
attention_mask=batch_query["attention_mask"], |
|
pool_type=self.pooling, |
|
) |
|
embeds = F.normalize(embeds, dim=-1) |
|
|
|
qs.append(embeds.contiguous()) |
|
|
|
|
|
return torch.cat(qs, dim=0).cpu() |
|
|
|
def forward_documents(self, documents: List[str], batch_size: int, **kwargs) -> List[torch.Tensor]: |
|
dataset = Dataset.from_dict({"image": documents}) |
|
if self.imageconverter: |
|
dataset.set_transform(self.imageconverter.transform_func) |
|
dataloader = DataLoader( |
|
dataset=dataset, |
|
batch_size=batch_size, |
|
shuffle=False, |
|
collate_fn=self.process_images, |
|
num_workers=32 |
|
) |
|
|
|
ds = [] |
|
for batch_doc in tqdm(dataloader, desc="Forward pass documents..."): |
|
with torch.no_grad(): |
|
with torch.autocast(device_type="cuda"): |
|
batch_doc = {k: v.to(self.device) for k, v in batch_doc.items()} |
|
embeddings_doc = self.model(**batch_doc, output_hidden_states=True).hidden_states[-1] |
|
embeds = self.pool( |
|
last_hidden_states=embeddings_doc, |
|
attention_mask=batch_doc["attention_mask"], |
|
pool_type=self.pooling, |
|
) |
|
embeds = F.normalize(embeds, dim=-1) |
|
|
|
ds.append(embeds.contiguous()) |
|
|
|
return torch.cat(ds, dim=0).cpu() |
|
|
|
def pool(self, last_hidden_states: Tensor, |
|
attention_mask: Tensor, |
|
pool_type: str) -> Tensor: |
|
last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) |
|
|
|
if pool_type == "avg": |
|
emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] |
|
elif pool_type == "weighted_avg": |
|
emb = last_hidden.sum(dim=1) |
|
elif pool_type == "cls": |
|
emb = last_hidden[:, 0] |
|
elif pool_type == "last": |
|
left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0]) |
|
if left_padding: |
|
emb = last_hidden[:, -1] |
|
else: |
|
sequence_lengths = attention_mask.sum(dim=1) - 1 |
|
batch_size = last_hidden.shape[0] |
|
emb = last_hidden[torch.arange(batch_size, device=last_hidden.device), sequence_lengths] |
|
else: |
|
raise ValueError(f"pool_type {pool_type} not supported") |
|
|
|
return emb |
|
|
|
class DSERetriever(CustomRetriever): |
|
def __init__(self, model_name_or_path, device: str = "auto", images=None): |
|
super().__init__(model_name_or_path, device) |
|
model = Qwen2VLForConditionalGeneration.from_pretrained( |
|
model_name_or_path, |
|
attn_implementation="flash_attention_2", |
|
torch_dtype=torch.bfloat16, |
|
device_map='cuda' |
|
).eval() |
|
model.padding_side = "left" |
|
self.model = model |
|
self.q_max_length=512 |
|
self.p_max_length=10240 |
|
self.set_resize = False |
|
self.resized_height=760 |
|
self.resized_width=760 |
|
self.imageconverter = None |
|
if images: |
|
images_mapping = {} |
|
for i,e in enumerate(images['file_name']): |
|
images_mapping[e] = i |
|
self.imageconverter = ImageConverter(image_corpus=images, images_mapping=images_mapping) |
|
|
|
def process_images(self, documents, **kwargs): |
|
if isinstance(documents, dict): |
|
images = documents["image"] |
|
assert len(texts) == len(images) |
|
elif isinstance(documents, list): |
|
images = [pair['image'] for pair in documents ] |
|
else: |
|
raise ValueError("The documents need to be a dict or list of dicts") |
|
|
|
input_texts = [] |
|
doc_messages = [] |
|
doc_texts = [self.document_prefix] * len(images) |
|
for doc_text, doc_image in zip(doc_texts, images): |
|
message = [ |
|
{ |
|
'role': 'user', |
|
'content': [ |
|
{'type': 'image', 'image': doc_image, 'resized_height': self.resized_height , 'resized_width': self.resized_width} if self.set_resize else {'type': 'image', 'image': doc_image}, |
|
{'type': 'text', 'text': 'What is shown in this image?'} |
|
] |
|
} |
|
] |
|
doc_messages.append(message) |
|
doc_text = self.processor.apply_chat_template(message, tokenize=False, add_generation_prompt=True) + "<|endoftext|>" |
|
input_texts.append(doc_text) |
|
|
|
images, videos = process_vision_info(doc_messages) |
|
doc_batch_dict = self.processor( |
|
text=input_texts, |
|
images=images, |
|
videos=videos, |
|
truncation=True, |
|
max_length=self.p_max_length, |
|
padding='longest', |
|
return_tensors='pt' |
|
) |
|
return doc_batch_dict |
|
|
|
def process_queries(self, queries: List[str], **kwargs): |
|
query_messages = [] |
|
for query in queries: |
|
message = [ |
|
{ |
|
'role': 'user', |
|
'content': [ |
|
{'type': 'image', 'image': Image.new('RGB', (28, 28)), 'resized_height':1 , 'resized_width':1}, |
|
{'type': 'text', 'text': f'Query: {query}'}, |
|
] |
|
} |
|
] |
|
query_messages.append(message) |
|
query_texts = [ |
|
x + "<|endoftext|>" for x in self.processor.apply_chat_template(query_messages, tokenize=False, add_generation_prompt=True) |
|
] |
|
images, videos = process_vision_info(query_messages) |
|
query_batch_dict = self.processor( |
|
text=query_texts, |
|
images=images, |
|
videos=videos, |
|
padding='longest', |
|
return_tensors='pt' |
|
) |
|
return query_batch_dict |
|
|
|
def encode_queries( |
|
self, |
|
queries: List[str], |
|
batch_size: int = 16, |
|
**kwargs |
|
): |
|
return self.forward_queries(queries, batch_size=batch_size) |
|
|
|
def encode_corpus( |
|
self, |
|
corpus, |
|
batch_size: int = 16, |
|
**kwargs |
|
): |
|
|
|
return self.forward_documents([el['image_id'] for el in corpus], batch_size=batch_size) |