Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,8 @@ import streamlit as st
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
|
4 |
# Load the model and tokenizer from Hugging Face Hub
|
5 |
-
model_name = "
|
6 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
9 |
# Initialize the pipeline
|
@@ -15,17 +15,18 @@ def generate_text(prompt):
|
|
15 |
return generated[0]['generated_text']
|
16 |
|
17 |
# Streamlit UI
|
18 |
-
st.title("
|
19 |
-
st.write("Enter a prompt to generate text:")
|
20 |
|
21 |
# Text input for the user to enter a prompt
|
22 |
-
prompt = st.text_area("Prompt", "
|
23 |
|
24 |
# Button to trigger the model inference
|
25 |
-
if st.button("Generate
|
26 |
if prompt:
|
27 |
generated_text = generate_text(prompt)
|
28 |
-
st.subheader("Generated
|
29 |
st.write(generated_text)
|
30 |
else:
|
31 |
-
st.warning("Please enter a prompt to generate
|
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
|
4 |
# Load the model and tokenizer from Hugging Face Hub
|
5 |
+
model_name = "microsoft/CodeGPT-small-py" # Model name you provided
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
9 |
# Initialize the pipeline
|
|
|
15 |
return generated[0]['generated_text']
|
16 |
|
17 |
# Streamlit UI
|
18 |
+
st.title("CodeGPT Text Generation")
|
19 |
+
st.write("Enter a prompt to generate code or text:")
|
20 |
|
21 |
# Text input for the user to enter a prompt
|
22 |
+
prompt = st.text_area("Prompt", "def example_function():\n # Your code here...")
|
23 |
|
24 |
# Button to trigger the model inference
|
25 |
+
if st.button("Generate Code"):
|
26 |
if prompt:
|
27 |
generated_text = generate_text(prompt)
|
28 |
+
st.subheader("Generated Code")
|
29 |
st.write(generated_text)
|
30 |
else:
|
31 |
+
st.warning("Please enter a prompt to generate code.")
|
32 |
+
|