Allahbux commited on
Commit
6d0a489
·
verified ·
1 Parent(s): b388a5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import streamlit as st
2
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Cache model loading
5
  @st.cache_resource
6
  def load_model(model_name):
7
  try:
@@ -16,8 +16,8 @@ def load_model(model_name):
16
  # Primary model (DeepSeek-R1)
17
  MODEL_NAME = "deepseek-ai/DeepSeek-R1"
18
 
19
- # Fallback model (Mistral-7B-Instruct, optimized for CPU)
20
- FALLBACK_MODEL = "mistralai/Mistral-7B-Instruct"
21
 
22
  st.title("Text-to-Text AI with DeepSeek-R1")
23
 
@@ -41,3 +41,4 @@ if st.button("Generate Response"):
41
  st.error("No working model available.")
42
  else:
43
  st.warning("Please enter a prompt.")
 
 
1
  import streamlit as st
2
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Function to load models with caching
5
  @st.cache_resource
6
  def load_model(model_name):
7
  try:
 
16
  # Primary model (DeepSeek-R1)
17
  MODEL_NAME = "deepseek-ai/DeepSeek-R1"
18
 
19
+ # Fallback model (Mistral-7B-v0.1, smaller and CPU-friendly)
20
+ FALLBACK_MODEL = "mistralai/Mistral-7B-v0.1"
21
 
22
  st.title("Text-to-Text AI with DeepSeek-R1")
23
 
 
41
  st.error("No working model available.")
42
  else:
43
  st.warning("Please enter a prompt.")
44
+