Tonic commited on
Commit
91f8c28
1 Parent(s): 0b077bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -34,6 +34,10 @@ dotenv.load_dotenv()
34
  seamless_client = Client("facebook/seamless_m4t")
35
  HuggingFace_Token = os.getenv("HuggingFace_Token")
36
  hf_token = os.getenv("HuggingFace_Token")
 
 
 
 
37
 
38
  def check_hallucination(assertion,citation):
39
  API_URL = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model"
@@ -336,13 +340,6 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
336
 
337
  return response_text
338
 
339
- # Define the device
340
- device = "cuda" if torch.cuda.is_available() else "cpu"
341
-
342
- # Use the base model's ID
343
- base_model_id = "stabilityai/stablelm-3b-4e1t"
344
- model_directory = "Tonic/stablemed"
345
-
346
  # Instantiate the Tokenizer
347
  tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t", token=hf_token, trust_remote_code=True, padding_side="left")
348
  # tokenizer = AutoTokenizer.from_pretrained("Tonic/stablemed", trust_remote_code=True, padding_side="left")
@@ -360,7 +357,7 @@ class ChatBot:
360
  self.history = []
361
 
362
  def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
363
- formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
364
  user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
365
  response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
366
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
 
34
  seamless_client = Client("facebook/seamless_m4t")
35
  HuggingFace_Token = os.getenv("HuggingFace_Token")
36
  hf_token = os.getenv("HuggingFace_Token")
37
+ base_model_id = os.getenv('BASE_MODEL_ID', 'default_base_model_id')
38
+ model_directory = os.getenv('MODEL_DIRECTORY', 'default_model_directory')
39
+ device = "cuda" if torch.cuda.is_available() else "cpu"
40
+
41
 
42
  def check_hallucination(assertion,citation):
43
  API_URL = "https://api-inference.huggingface.co/models/vectara/hallucination_evaluation_model"
 
340
 
341
  return response_text
342
 
 
 
 
 
 
 
 
343
  # Instantiate the Tokenizer
344
  tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t", token=hf_token, trust_remote_code=True, padding_side="left")
345
  # tokenizer = AutoTokenizer.from_pretrained("Tonic/stablemed", trust_remote_code=True, padding_side="left")
 
357
  self.history = []
358
 
359
  def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
360
+ formatted_input = f"<s>[INST] {user_input}</s>[/INST]{system_prompt}"
361
  user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
362
  response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
363
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)