|
import streamlit as st |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
import black |
|
|
|
|
|
model_name = "microsoft/CodeGPT-small-py" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, temperature=0.5, top_p=0.9, max_length=150) |
|
|
|
def generate_code_with_feedback(prompt): |
|
generated_code = generator(prompt, num_return_sequences=1)[0]['generated_text'] |
|
|
|
formatted_code = format_code(generated_code) |
|
return formatted_code |
|
|
|
def format_code(code): |
|
return black.format_str(code, mode=black.Mode()) |
|
|
|
|
|
st.title("Smart Code Generation and Fixing") |
|
st.write("Enter a prompt to generate or fix code:") |
|
|
|
option = st.radio("Select Action", ("Generate Code", "Fix Code")) |
|
if option == "Generate Code": |
|
prompt = st.text_area("Prompt", "Write a Python function that reverses a string:") |
|
else: |
|
prompt = st.text_area("Prompt", "Fix the following buggy Python code:\n\ndef reverse_string(s):\n return s[::-1]") |
|
|
|
if st.button("Generate Code"): |
|
if prompt: |
|
generated_code = generate_code_with_feedback(prompt) |
|
st.subheader("Generated Code") |
|
st.write(generated_code) |
|
else: |
|
st.warning("Please enter a prompt.") |
|
|
|
|
|
|