Datasets:

Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
nv-bschifferer commited on
Commit
2f5bf7b
·
1 Parent(s): 1bd8bbc
README.md CHANGED
@@ -25,6 +25,7 @@ license:
25
  task_ids:
26
  - document-retrieval
27
  tags:
 
28
  - image
29
  configs:
30
  - config_name: queries-ar
 
25
  task_ids:
26
  - document-retrieval
27
  tags:
28
+ - text
29
  - image
30
  configs:
31
  - config_name: queries-ar
eval_example/embedding_eval.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+
4
+ from beir.retrieval.evaluation import EvaluateRetrieval
5
+ from beir.retrieval.search.dense import DenseRetrievalExactSearch as DRES
6
+
7
+ from utils import load_data
8
+
9
+ import torch
10
+
11
+ from visual_embedding_model import DSERetriever
12
+
13
+ def get_args():
14
+ parser = argparse.ArgumentParser()
15
+ parser.add_argument(
16
+ '--dataset',
17
+ type=str,
18
+ help='Dataset Name which will be parsed to datasets.load_dataset function',
19
+ default='nvidia/miracl-vision'
20
+ )
21
+ parser.add_argument(
22
+ '--language',
23
+ type=str,
24
+ help='language to evaluate',
25
+ default='sw'
26
+ )
27
+ return parser.parse_args()
28
+
29
+ if __name__ == '__main__':
30
+ args = get_args()
31
+ tracker = None
32
+
33
+ queries, corpus, qrels, images = load_data(
34
+ args.dataset,
35
+ args.language
36
+ )
37
+ model = DSERetriever(
38
+ model_name_or_path='MrLight/dse-qwen2-2b-mrl-v1',
39
+ images=images
40
+ )
41
+ dres_model = DRES(
42
+ model,
43
+ corpus_chunk_size=250000,
44
+ batch_size=8
45
+ )
46
+ retriever = EvaluateRetrieval(
47
+ dres_model,
48
+ score_function='dot',
49
+ k_values = [1,5,10,100]
50
+ )
51
+
52
+ results = retriever.retrieve(corpus, queries)
53
+
54
+ ndcg, map_, recall, precision = retriever.evaluate(qrels, results, retriever.k_values, ignore_identical_ids=True)
55
+ print(ndcg, map_, recall, precision)
eval_example/utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ def hf_beir_queries(queries):
4
+ queries_beir = {}
5
+ for query in queries:
6
+ queries_beir[query['_id']] = query['text']
7
+ return(queries_beir)
8
+
9
+ def hf_beir_corpus(corpus):
10
+ corpus_beir = {}
11
+ for doc in corpus:
12
+ corpus_beir[doc['_id']] = doc
13
+ return(corpus_beir)
14
+
15
+ def hf_beir_qrels(qrels):
16
+ qrels_beir = {}
17
+ for el in qrels:
18
+ if str(el['query-id']) in qrels_beir:
19
+ qrels_beir[str(el['query-id'])][str(el['corpus-id'])] = el['score']
20
+ else:
21
+ qrels_beir[str(el['query-id'])] = {str(el['corpus-id']): el['score']}
22
+ return(qrels_beir)
23
+
24
+ def load_data(
25
+ path,
26
+ lang
27
+ ):
28
+ queries = load_dataset(path, 'queries-' + str(lang), split='default')
29
+ queries = hf_beir_queries(queries)
30
+ corpus = load_dataset(path, 'corpus-' + str(lang), split='default')
31
+ corpus = hf_beir_corpus(corpus)
32
+ qrels = load_dataset(path, 'qrels-' + str(lang), split='default')
33
+ qrels = hf_beir_qrels(qrels)
34
+ images = load_dataset(path, 'images-' + str(lang), split='default')
35
+ return(queries, corpus, qrels, images)
eval_example/visual_embedding_model.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from typing import List, Optional, cast, TypeVar
3
+ from abc import ABC, abstractmethod
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import Tensor
8
+ from torch.utils.data import DataLoader
9
+
10
+ from tqdm import tqdm
11
+ from PIL import Image
12
+
13
+ from datasets import Dataset
14
+ from torch.utils.data import Dataset as TorchDataset
15
+
16
+ from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, Qwen2VLForConditionalGeneration
17
+ from qwen_vl_utils import process_vision_info
18
+
19
+ T = TypeVar("T")
20
+ class ListDataset(TorchDataset[T]):
21
+ def __init__(self, elements: List[T]):
22
+ self.elements = elements
23
+
24
+ def __len__(self) -> int:
25
+ return len(self.elements)
26
+
27
+ def __getitem__(self, idx: int) -> T:
28
+ return self.elements[idx]
29
+
30
+ def get_torch_device(device: str = "auto") -> str:
31
+ """
32
+ Returns the device (string) to be used by PyTorch.
33
+
34
+ `device` arg defaults to "auto" which will use:
35
+ - "cuda:0" if available
36
+ - else "mps" if available
37
+ - else "cpu".
38
+ """
39
+
40
+ if device == "auto":
41
+ if torch.cuda.is_available():
42
+ device = "cuda"
43
+ elif torch.backends.mps.is_available(): # for Apple Silicon
44
+ device = "mps"
45
+ else:
46
+ device = "cpu"
47
+
48
+ return device
49
+
50
+ class ImageConverter():
51
+
52
+ def __init__(self,image_corpus, images_mapping):
53
+ self.image_corpus = image_corpus
54
+ self.images_mapping = images_mapping
55
+
56
+ def transform_func(self, example):
57
+ if 'image' in example:
58
+ if isinstance(example['image'], str):
59
+ example['image'] = self.image_corpus[self.images_mapping[example['image']]]
60
+ if isinstance(example['image'], list):
61
+ converted_images = []
62
+ for el in example['image']:
63
+ converted_images.append(self.image_corpus[self.images_mapping[el]]['image'].convert("RGB"))
64
+ example['image'] = converted_images
65
+ return(example)
66
+
67
+ class CustomRetriever(ABC):
68
+ """
69
+ Custom model (dense embeddings).
70
+ """
71
+
72
+ def __init__(self, model_name_or_path, device: str = "auto"):
73
+ super().__init__()
74
+ self.device = get_torch_device(device)
75
+ self.min_pixels=1*28*28
76
+ self.max_pixels=2560*28*28
77
+ self.processor = AutoProcessor.from_pretrained(model_name_or_path, min_pixels=self.min_pixels, max_pixels=self.max_pixels)
78
+ self.processor.padding_side = "left"
79
+ self.document_prefix = "What is shown in this image?"
80
+ self.query_prefix = "Query:"
81
+ self.pooling = "last"
82
+
83
+ @property
84
+ def use_visual_embedding(self) -> bool:
85
+ return True
86
+
87
+ @abstractmethod
88
+ def process_images(self, images: List[Image.Image], **kwargs):
89
+ pass
90
+
91
+ @abstractmethod
92
+ def process_queries(self, queries: List[str], **kwargs):
93
+ pass
94
+
95
+ def forward_queries(self, queries, batch_size: int, **kwargs) -> List[torch.Tensor]:
96
+ dataloader = DataLoader(
97
+ dataset=ListDataset[str](queries),
98
+ batch_size=batch_size,
99
+ shuffle=False,
100
+ collate_fn=self.process_queries,
101
+ num_workers=32
102
+ )
103
+
104
+ qs = []
105
+ for batch_query in tqdm(dataloader, desc="Forward pass queries..."):
106
+ with torch.no_grad():
107
+ with torch.autocast(device_type="cuda"):
108
+ batch_query = {k: v.to(self.device) for k, v in batch_query.items()}
109
+ embeddings_query = self.model(**batch_query, output_hidden_states=True).hidden_states[-1]
110
+
111
+ embeds = self.pool(
112
+ last_hidden_states=embeddings_query,
113
+ attention_mask=batch_query["attention_mask"],
114
+ pool_type=self.pooling,
115
+ )
116
+ embeds = F.normalize(embeds, dim=-1)
117
+
118
+ qs.append(embeds.contiguous())
119
+
120
+
121
+ return torch.cat(qs, dim=0).cpu()
122
+
123
+ def forward_documents(self, documents: List[str], batch_size: int, **kwargs) -> List[torch.Tensor]:
124
+ dataset = Dataset.from_dict({"image": documents})
125
+ if self.imageconverter:
126
+ dataset.set_transform(self.imageconverter.transform_func)
127
+ dataloader = DataLoader(
128
+ dataset=dataset,
129
+ batch_size=batch_size,
130
+ shuffle=False,
131
+ collate_fn=self.process_images,
132
+ num_workers=32
133
+ )
134
+
135
+ ds = []
136
+ for batch_doc in tqdm(dataloader, desc="Forward pass documents..."):
137
+ with torch.no_grad():
138
+ with torch.autocast(device_type="cuda"):
139
+ batch_doc = {k: v.to(self.device) for k, v in batch_doc.items()}
140
+ embeddings_doc = self.model(**batch_doc, output_hidden_states=True).hidden_states[-1]
141
+ embeds = self.pool(
142
+ last_hidden_states=embeddings_doc,
143
+ attention_mask=batch_doc["attention_mask"],
144
+ pool_type=self.pooling,
145
+ )
146
+ embeds = F.normalize(embeds, dim=-1)
147
+
148
+ ds.append(embeds.contiguous())
149
+
150
+ return torch.cat(ds, dim=0).cpu()
151
+
152
+ def pool(self, last_hidden_states: Tensor,
153
+ attention_mask: Tensor,
154
+ pool_type: str) -> Tensor:
155
+ last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
156
+
157
+ if pool_type == "avg":
158
+ emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
159
+ elif pool_type == "weighted_avg":
160
+ emb = last_hidden.sum(dim=1)
161
+ elif pool_type == "cls":
162
+ emb = last_hidden[:, 0]
163
+ elif pool_type == "last":
164
+ left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
165
+ if left_padding:
166
+ emb = last_hidden[:, -1]
167
+ else:
168
+ sequence_lengths = attention_mask.sum(dim=1) - 1
169
+ batch_size = last_hidden.shape[0]
170
+ emb = last_hidden[torch.arange(batch_size, device=last_hidden.device), sequence_lengths]
171
+ else:
172
+ raise ValueError(f"pool_type {pool_type} not supported")
173
+
174
+ return emb
175
+
176
+ class DSERetriever(CustomRetriever):
177
+ def __init__(self, model_name_or_path, device: str = "auto", images=None):
178
+ super().__init__(model_name_or_path, device)
179
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
180
+ model_name_or_path,
181
+ attn_implementation="flash_attention_2",
182
+ torch_dtype=torch.bfloat16,
183
+ device_map='cuda'
184
+ ).eval()
185
+ model.padding_side = "left"
186
+ self.model = model
187
+ self.q_max_length=512
188
+ self.p_max_length=10240
189
+ self.set_resize = False
190
+ self.resized_height=760
191
+ self.resized_width=760
192
+ self.imageconverter = None
193
+ if images:
194
+ images_mapping = {}
195
+ for i,e in enumerate(images['file_name']):
196
+ images_mapping[e] = i
197
+ self.imageconverter = ImageConverter(image_corpus=images, images_mapping=images_mapping)
198
+
199
+ def process_images(self, documents, **kwargs):
200
+ if isinstance(documents, dict):
201
+ images = documents["image"]
202
+ assert len(texts) == len(images)
203
+ elif isinstance(documents, list):
204
+ images = [pair['image'] for pair in documents ]
205
+ else:
206
+ raise ValueError("The documents need to be a dict or list of dicts")
207
+
208
+ input_texts = []
209
+ doc_messages = []
210
+ doc_texts = [self.document_prefix] * len(images)
211
+ for doc_text, doc_image in zip(doc_texts, images):
212
+ message = [
213
+ {
214
+ 'role': 'user',
215
+ 'content': [
216
+ {'type': 'image', 'image': doc_image, 'resized_height': self.resized_height , 'resized_width': self.resized_width} if self.set_resize else {'type': 'image', 'image': doc_image},
217
+ {'type': 'text', 'text': 'What is shown in this image?'}
218
+ ]
219
+ }
220
+ ]
221
+ doc_messages.append(message)
222
+ doc_text = self.processor.apply_chat_template(message, tokenize=False, add_generation_prompt=True) + "<|endoftext|>"
223
+ input_texts.append(doc_text)
224
+
225
+ images, videos = process_vision_info(doc_messages)
226
+ doc_batch_dict = self.processor(
227
+ text=input_texts,
228
+ images=images,
229
+ videos=videos,
230
+ truncation=True,
231
+ max_length=self.p_max_length,
232
+ padding='longest',
233
+ return_tensors='pt'
234
+ )
235
+ return doc_batch_dict
236
+
237
+ def process_queries(self, queries: List[str], **kwargs):
238
+ query_messages = []
239
+ for query in queries:
240
+ message = [
241
+ {
242
+ 'role': 'user',
243
+ 'content': [
244
+ {'type': 'image', 'image': Image.new('RGB', (28, 28)), 'resized_height':1 , 'resized_width':1}, # need a dummy image
245
+ {'type': 'text', 'text': f'Query: {query}'},
246
+ ]
247
+ }
248
+ ]
249
+ query_messages.append(message)
250
+ query_texts = [
251
+ x + "<|endoftext|>" for x in self.processor.apply_chat_template(query_messages, tokenize=False, add_generation_prompt=True)
252
+ ]
253
+ images, videos = process_vision_info(query_messages)
254
+ query_batch_dict = self.processor(
255
+ text=query_texts,
256
+ images=images,
257
+ videos=videos,
258
+ padding='longest',
259
+ return_tensors='pt'
260
+ )
261
+ return query_batch_dict
262
+
263
+ def encode_queries(
264
+ self,
265
+ queries: List[str],
266
+ batch_size: int = 16,
267
+ **kwargs
268
+ ):
269
+ return self.forward_queries(queries, batch_size=batch_size)
270
+
271
+ def encode_corpus(
272
+ self,
273
+ corpus,
274
+ batch_size: int = 16,
275
+ **kwargs
276
+ ):
277
+
278
+ return self.forward_documents([el['image_id'] for el in corpus], batch_size=batch_size)