SwordElucidator commited on
Commit
1aa27bb
1 Parent(s): 8d79b07

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +43 -0
handler.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from io import BytesIO
3
+ from typing import Any, List, Dict
4
+
5
+ from PIL import Image
6
+ from transformers import AutoTokenizer, AutoModel
7
+
8
+
9
+ class EndpointHandler():
10
+ def __init__(self, path=""):
11
+ # Use a pipeline as a high-level helper
12
+ model_name = "SwordElucidator/MiniCPM-Llama3-V-2_5"
13
+ model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
15
+ model.eval()
16
+ self.model = model
17
+ self.tokenizer = tokenizer
18
+
19
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
20
+ inputs = data.pop("inputs", data)
21
+
22
+ image = inputs.pop("image", None) # base64 image as bytes
23
+ question = inputs.pop("question", None)
24
+ msgs = inputs.pop("msgs", None)
25
+
26
+
27
+ parameters = data.pop("parameters", {})
28
+
29
+ image = Image.open(BytesIO(base64.b64decode(image)))
30
+
31
+ if not msgs:
32
+ msgs = [{'role': 'user', 'content': question}]
33
+
34
+ res = self.model.chat(
35
+ image=image,
36
+ msgs=msgs,
37
+ tokenizer=self.tokenizer,
38
+ sampling=True, # if sampling=False, beam_search will be used by default
39
+ temperature=parameters.get('temperature', 0.7),
40
+ # system_prompt='' # pass system_prompt if needed
41
+ )
42
+
43
+ return res