Update README.md
Browse files
README.md
CHANGED
@@ -46,9 +46,9 @@ from transformers import CLIPModel, CLIPProcessor
|
|
46 |
from PIL import Image
|
47 |
import torch
|
48 |
|
49 |
-
# Load model
|
50 |
-
model = CLIPModel.from_pretrained("
|
51 |
-
|
52 |
|
53 |
model.eval()
|
54 |
```
|
@@ -58,12 +58,16 @@ model.eval()
|
|
58 |
```python
|
59 |
from torchvision import transforms
|
60 |
|
61 |
-
def
|
62 |
image = Image.open(image_path).convert("RGB")
|
63 |
-
inputs =
|
64 |
with torch.no_grad():
|
65 |
-
|
66 |
-
return
|
|
|
|
|
|
|
|
|
67 |
```
|
68 |
|
69 |
You can now compare cosine similarity between embeddings to retrieve similar patent drawings.
|
|
|
46 |
from PIL import Image
|
47 |
import torch
|
48 |
|
49 |
+
# Load fine-tuned model and processor
|
50 |
+
model = CLIPModel.from_pretrained("kshitij3188/PHOENIX-patent-retrieval")
|
51 |
+
processor = CLIPProcessor.from_pretrained("kshitij3188/PHOENIX-patent-retrieval")
|
52 |
|
53 |
model.eval()
|
54 |
```
|
|
|
58 |
```python
|
59 |
from torchvision import transforms
|
60 |
|
61 |
+
def extract_image_embedding(image_path):
|
62 |
image = Image.open(image_path).convert("RGB")
|
63 |
+
inputs = processor(images=image, return_tensors="pt")
|
64 |
with torch.no_grad():
|
65 |
+
embedding = model.get_image_features(**inputs).squeeze()
|
66 |
+
return embedding
|
67 |
+
|
68 |
+
# Example
|
69 |
+
embedding = extract_image_embedding("some_patent_image.png")
|
70 |
+
print("🔍 Image embedding shape:", embedding.shape)
|
71 |
```
|
72 |
|
73 |
You can now compare cosine similarity between embeddings to retrieve similar patent drawings.
|