kshitij3188 commited on
Commit
e0c3eab
·
verified ·
1 Parent(s): b73d810

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -7
README.md CHANGED
@@ -46,9 +46,9 @@ from transformers import CLIPModel, CLIPProcessor
46
  from PIL import Image
47
  import torch
48
 
49
- # Load model + feature extractor
50
- model = CLIPModel.from_pretrained("your-username/PHOENIX-patent-retrieval")
51
- feature_extractor = CLIPProcessor.from_pretrained("your-username/PHOENIX-patent-retrieval")
52
 
53
  model.eval()
54
  ```
@@ -58,12 +58,16 @@ model.eval()
58
  ```python
59
  from torchvision import transforms
60
 
61
- def extract_embedding(image_path):
62
  image = Image.open(image_path).convert("RGB")
63
- inputs = feature_extractor(images=image, return_tensors="pt")
64
  with torch.no_grad():
65
- outputs = model(**inputs)
66
- return outputs.last_hidden_state[:, 0, :].squeeze()
 
 
 
 
67
  ```
68
 
69
  You can now compare cosine similarity between embeddings to retrieve similar patent drawings.
 
46
  from PIL import Image
47
  import torch
48
 
49
+ # Load fine-tuned model and processor
50
+ model = CLIPModel.from_pretrained("kshitij3188/PHOENIX-patent-retrieval")
51
+ processor = CLIPProcessor.from_pretrained("kshitij3188/PHOENIX-patent-retrieval")
52
 
53
  model.eval()
54
  ```
 
58
  ```python
59
  from torchvision import transforms
60
 
61
+ def extract_image_embedding(image_path):
62
  image = Image.open(image_path).convert("RGB")
63
+ inputs = processor(images=image, return_tensors="pt")
64
  with torch.no_grad():
65
+ embedding = model.get_image_features(**inputs).squeeze()
66
+ return embedding
67
+
68
+ # Example
69
+ embedding = extract_image_embedding("some_patent_image.png")
70
+ print("🔍 Image embedding shape:", embedding.shape)
71
  ```
72
 
73
  You can now compare cosine similarity between embeddings to retrieve similar patent drawings.