import streamlit as st from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer # Function to load models safely @st.cache_resource def load_model(model_name): try: tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) return pipe except Exception as e: st.error(f"Failed to load {model_name}: {e}") return None # Primary model (DeepSeek-R1) MODEL_NAME = "deepseek-ai/DeepSeek-R1" # Open-source fallback model (Falcon-7B-Instruct, better for CPU) FALLBACK_MODEL = "tiiuae/falcon-7b-instruct" st.title("Text-to-Text AI with DeepSeek-R1") user_input = st.text_area("Enter your prompt:") use_negative_prompt = st.checkbox("Use Negative Prompt?") negative_prompt = st.text_area("Negative prompt (if any):") if use_negative_prompt else "" if st.button("Generate Response"): if user_input: pipe = load_model(MODEL_NAME) if pipe is None: st.warning("Switching to fallback model for better performance.") pipe = load_model(FALLBACK_MODEL) if pipe: full_prompt = f"{user_input} [Avoid: {negative_prompt}]" if use_negative_prompt else user_input result = pipe(full_prompt, max_length=200, do_sample=True, temperature=0.7) st.write(result[0]["generated_text"]) else: st.error("No working model available.") else: st.warning("Please enter a prompt.")