naot97 commited on
Commit
33bd8cf
·
1 Parent(s): 36523f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -22
app.py CHANGED
@@ -53,27 +53,19 @@ else:
53
  device_map={"": device},
54
  )
55
 
56
-
57
- # def generate_prompt(instruction, input=None):
58
- # if input:
59
- # return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
60
- # ### Instruction:
61
- # {instruction}
62
- # ### Input:
63
- # {input}
64
- # ### Response:"""
65
- # else:
66
- # return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
67
- # ### Instruction:
68
- # {instruction}
69
- # ### Response:"""
70
-
71
  if device != "cpu":
72
  model.half()
73
  model.eval()
74
  if torch.__version__ >= "2":
75
  model = torch.compile(model)
76
 
 
 
 
 
 
 
 
77
 
78
  def evaluate(
79
  instruction,
@@ -85,15 +77,27 @@ def evaluate(
85
  num_beams=4
86
  max_new_tokens=128
87
  prompt = instruction
 
88
  inputs = tokenizer(prompt, return_tensors="pt")
89
  input_ids = inputs["input_ids"].to(device)
90
- generation_config = GenerationConfig(
91
- temperature=temperature,
92
- top_p=top_p,
93
- top_k=top_k,
94
- num_beams=num_beams,
95
- **kwargs,
96
- )
 
 
 
 
 
 
 
 
 
 
 
97
  # with torch.cuda.amp.autocast():
98
  # output_tokens = model.generate(**inputs, generation_config=generation_config)
99
  # output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
 
53
  device_map={"": device},
54
  )
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  if device != "cpu":
57
  model.half()
58
  model.eval()
59
  if torch.__version__ >= "2":
60
  model = torch.compile(model)
61
 
62
+ def check_number(text):
63
+ count = 0
64
+ for word in text.split():
65
+ if word.isnumeric():
66
+ count += 1
67
+
68
+ return count >= 2
69
 
70
  def evaluate(
71
  instruction,
 
77
  num_beams=4
78
  max_new_tokens=128
79
  prompt = instruction
80
+
81
  inputs = tokenizer(prompt, return_tensors="pt")
82
  input_ids = inputs["input_ids"].to(device)
83
+
84
+ if check_number(prompt):
85
+ generation_config = GenerationConfig(
86
+ temperature=temperature,
87
+ top_p=top_p,
88
+ top_k=top_k,
89
+ num_beams=num_beams,
90
+ **kwargs,
91
+ )
92
+ else:
93
+ generation_config = GenerationConfig(
94
+ temperature=temperature,
95
+ top_p=top_p,
96
+ top_k=top_k,
97
+ do_sample = True,
98
+ **kwargs,
99
+ )
100
+
101
  # with torch.cuda.amp.autocast():
102
  # output_tokens = model.generate(**inputs, generation_config=generation_config)
103
  # output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)