atifss commited on
Commit
e3f4c1f
·
1 Parent(s): b91f221

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +42 -0
handler.py CHANGED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Create the file named `handler.py` in the "File and versions" directory, and subsequently, insert the specified code into this file.
2
+
3
+ ```python
4
+
5
+ from typing import Any, Dict, List
6
+
7
+ import torch
8
+ import transformers
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer
10
+
11
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
12
+
13
+
14
+ class EndpointHandler:
15
+ def __init__(self, path=""):
16
+ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ path,
19
+ return_dict=True,
20
+ device_map="auto",
21
+ load_in_8bit=True,
22
+ torch_dtype=dtype,
23
+ trust_remote_code=True,
24
+ )
25
+
26
+ generation_config = model.generation_config
27
+ generation_config.max_new_tokens = 256
28
+ generation_config.temperature = 0.9
29
+ generation_config.num_return_sequences = 1
30
+ generation_config.pad_token_id = tokenizer.eos_token_id
31
+ generation_config.eos_token_id = tokenizer.eos_token_id
32
+ self.generation_config = generation_config
33
+
34
+ self.pipeline = transformers.pipeline(
35
+ "text-generation", model=model, tokenizer=tokenizer
36
+ )
37
+
38
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
39
+ prompt = data.pop("inputs", data)
40
+ result = self.pipeline(prompt, generation_config=self.generation_config)
41
+ return result
42
+ ```