Spaces:
Sleeping
Sleeping
Create imageCaptioning.py (#3)
Browse files- Create imageCaptioning.py (2b996da8d9dc6b4c3f1b546d7c965e53a6936607)
Co-authored-by: Khan <[email protected]>
src/models/imageCaptioning.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
from transformers import BlipProcessor , BlipForConditionalGeneration
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
processor = BlipProcessor.from_pretrained("src/models/Caption")
|
| 7 |
+
model = BlipForConditionalGeneration.from_pretrained("src/models/Caption")
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def generateCaption(image_path):
|
| 11 |
+
image = Image.open(image_path).convert("RGB")
|
| 12 |
+
inputs = processor(images = image , return_tensors="pt")
|
| 13 |
+
output = model.generate(**inputs)
|
| 14 |
+
caption = processor.decode(output[0], skip_special_tokens = True)
|
| 15 |
+
return caption
|