tahsinhasem commited on
Commit
d236bfb
·
verified ·
1 Parent(s): 9886add

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +2 -1
main.py CHANGED
@@ -9,6 +9,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
9
  #Load pre-trained tokenizer and model (Works)
10
  model_name = "gpt2"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
12
  model = AutoModelForCausalLM.from_pretrained(
13
  model_name,
14
  device_map="auto",
@@ -52,7 +53,7 @@ async def generate_text(item: Item):
52
 
53
  # logging.info("Response generated")
54
 
55
- inputs = tokenizer(prompt, return_tensors="pt", padding=True, return_attention_mask=True).to(model.device)
56
 
57
 
58
  # input_ids = tokenizer.encode(item.prompt, return_tensors="pt")
 
9
  #Load pre-trained tokenizer and model (Works)
10
  model_name = "gpt2"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ tokenizer.pad_token = tokenizer.eos_token
13
  model = AutoModelForCausalLM.from_pretrained(
14
  model_name,
15
  device_map="auto",
 
53
 
54
  # logging.info("Response generated")
55
 
56
+ inputs = tokenizer(prompt, return_tensors="pt", padding=True, return_attention_mask=True, ).to(model.device)
57
 
58
 
59
  # input_ids = tokenizer.encode(item.prompt, return_tensors="pt")