Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import torch | |
| # Load the model and tokenizer | |
| def load_model(): | |
| MODEL_NAME = "Salesforce/codegen-350M-mono" # Use a known model | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float32) | |
| # Load model into the pipeline for generation | |
| return pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # Initialize the model | |
| code_generator = load_model() | |
| # Streamlit UI | |
| st.title("CodeGen Code Bot π") | |
| st.subheader("Generate code snippets using Hugging Face CodeGen") | |
| # User input | |
| prompt = st.text_area("Enter a coding prompt (e.g., 'Write a Python function to sort a list'): ") | |
| # Generate Code | |
| if st.button("Generate Code"): | |
| if prompt.strip(): | |
| st.info("Generating code... Please wait β³") | |
| try: | |
| # Generate code using the model | |
| response = code_generator( | |
| prompt, | |
| max_length=512, # Adjust for longer code if needed | |
| temperature=0.2, # Lower temperature for more deterministic results | |
| do_sample=True, # Enable sampling | |
| num_return_sequences=1 | |
| ) | |
| generated_code = response[0]['generated_text'] | |
| # Display the generated code output | |
| st.code(generated_code, language="python") # Change language as needed | |
| except Exception as e: | |
| st.error(f"Error: {str(e)}") | |
| else: | |
| st.warning("Please enter a prompt.") | |
| st.caption("Powered by CodeGen | Streamlit UI") | |