prithivMLmods commited on
Commit
223d7b5
·
verified ·
1 Parent(s): 0829efb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -31,8 +31,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
31
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
32
 
33
  # Load text-only model and tokenizer
34
- #model_id = "prithivMLmods/FastThink-0.5B-Tiny"
35
- model_id = "prithivMLmods/Raptor-X5-UIGEN"
36
  tokenizer = AutoTokenizer.from_pretrained(model_id)
37
  model = AutoModelForCausalLM.from_pretrained(
38
  model_id,
 
31
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
32
 
33
  # Load text-only model and tokenizer
34
+ model_id = "prithivMLmods/FastThink-0.5B-Tiny"
 
35
  tokenizer = AutoTokenizer.from_pretrained(model_id)
36
  model = AutoModelForCausalLM.from_pretrained(
37
  model_id,