import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM from datetime import datetime # Custom CSS for UI st.markdown(""" """, unsafe_allow_html=True) # Cache model and tokenizer to avoid reloading @st.cache_resource def load_model_and_tokenizer(): checkpoint = "Salesforce/codegen-350M-mono" try: st.write("Loading tokenizer...") tokenizer = AutoTokenizer.from_pretrained(checkpoint) st.write("Loading model...") model = AutoModelForCausalLM.from_pretrained(checkpoint) st.write("Model and tokenizer loaded successfully!") return tokenizer, model except Exception as e: st.error(f"Failed to load model/tokenizer: {e}") return None, None # Load model and tokenizer once tokenizer, model = load_model_and_tokenizer() if tokenizer is None or model is None: st.stop() # Function to generate code def generate_code(description): prompt = f"Generate Python code for the following task: {description}\n" inputs = tokenizer(prompt, return_tensors="pt") try: outputs = model.generate( **inputs, max_length=500, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id ) code = tokenizer.decode(outputs[0], skip_special_tokens=True) return code[len(prompt):].strip() except Exception as e: st.error(f"Error generating code: {e}") return "Error: Could not generate code." # Initialize chat history if "chat_history" not in st.session_state: st.session_state.chat_history = [] # UI Layout st.markdown('