enable the usage on huggingface-inference-toolkit
#9
by
luquiT4
- opened
- handler.py +74 -0
handler.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import io
|
| 3 |
+
from typing import Dict, Any
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from transformers import AutoProcessor, VisionEncoderDecoderModel
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class EndpointHandler:
|
| 11 |
+
def __init__(self, path=""):
|
| 12 |
+
# Load processor and model from the provided path or model ID
|
| 13 |
+
self.processor = AutoProcessor.from_pretrained(path or "bytedance/Dolphin")
|
| 14 |
+
self.model = VisionEncoderDecoderModel.from_pretrained(path or "bytedance/Dolphin")
|
| 15 |
+
|
| 16 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 17 |
+
self.model.to(self.device)
|
| 18 |
+
self.model.eval()
|
| 19 |
+
self.model = self.model.half() # Half precision for speed
|
| 20 |
+
|
| 21 |
+
self.tokenizer = self.processor.tokenizer
|
| 22 |
+
|
| 23 |
+
def decode_base64_image(self, image_base64: str) -> Image.Image:
|
| 24 |
+
image_bytes = base64.b64decode(image_base64)
|
| 25 |
+
return Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
| 26 |
+
|
| 27 |
+
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
| 28 |
+
# Check for image input
|
| 29 |
+
if "inputs" not in data:
|
| 30 |
+
return {"error": "No inputs provided"}
|
| 31 |
+
|
| 32 |
+
image_input = data["inputs"]
|
| 33 |
+
|
| 34 |
+
# Support both base64 image strings and raw images (Hugging Face supports both)
|
| 35 |
+
if isinstance(image_input, str):
|
| 36 |
+
try:
|
| 37 |
+
image = self.decode_base64_image(image_input)
|
| 38 |
+
except Exception as e:
|
| 39 |
+
return {"error": f"Invalid base64 image: {str(e)}"}
|
| 40 |
+
else:
|
| 41 |
+
image = image_input # Assume PIL-compatible image
|
| 42 |
+
|
| 43 |
+
# Optional: Custom prompt (default: text reading)
|
| 44 |
+
prompt = data.get("prompt", "Read text in the image.")
|
| 45 |
+
full_prompt = f"<s>{prompt} <Answer/>"
|
| 46 |
+
|
| 47 |
+
# Preprocess inputs
|
| 48 |
+
inputs = self.processor(image, return_tensors="pt")
|
| 49 |
+
pixel_values = inputs.pixel_values.half().to(self.device)
|
| 50 |
+
|
| 51 |
+
prompt_ids = self.tokenizer(full_prompt, add_special_tokens=False, return_tensors="pt").input_ids.to(self.device)
|
| 52 |
+
decoder_attention_mask = torch.ones_like(prompt_ids).to(self.device)
|
| 53 |
+
|
| 54 |
+
# Inference
|
| 55 |
+
outputs = self.model.generate(
|
| 56 |
+
pixel_values=pixel_values,
|
| 57 |
+
decoder_input_ids=prompt_ids,
|
| 58 |
+
decoder_attention_mask=decoder_attention_mask,
|
| 59 |
+
min_length=1,
|
| 60 |
+
max_length=4096,
|
| 61 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
| 62 |
+
eos_token_id=self.tokenizer.eos_token_id,
|
| 63 |
+
use_cache=True,
|
| 64 |
+
bad_words_ids=[[self.tokenizer.unk_token_id]],
|
| 65 |
+
return_dict_in_generate=True,
|
| 66 |
+
do_sample=False,
|
| 67 |
+
num_beams=1,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
sequence = self.tokenizer.batch_decode(outputs.sequences, skip_special_tokens=False)[0]
|
| 71 |
+
# Clean up
|
| 72 |
+
generated_text = sequence.replace(full_prompt, "").replace("<pad>", "").replace("</s>", "").strip()
|
| 73 |
+
|
| 74 |
+
return {"text": generated_text}
|