Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
from transformers import
|
| 3 |
import torch
|
| 4 |
import random
|
| 5 |
|
| 6 |
-
# Load pre-trained
|
| 7 |
-
model_name = "
|
| 8 |
-
model =
|
| 9 |
-
tokenizer =
|
| 10 |
|
| 11 |
# Set device to GPU if available for faster inference, otherwise fallback to CPU
|
| 12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
@@ -20,10 +20,10 @@ if 'conversation' not in st.session_state:
|
|
| 20 |
|
| 21 |
# Define multiple system prompts to control bot's behavior
|
| 22 |
system_prompts = [
|
| 23 |
-
"You are a
|
| 24 |
-
"You are a
|
| 25 |
-
"You are
|
| 26 |
-
"You are a
|
| 27 |
]
|
| 28 |
|
| 29 |
# Select a random system prompt to start the conversation
|
|
@@ -35,28 +35,25 @@ def generate_response(input_text):
|
|
| 35 |
if len(st.session_state['history']) == 0:
|
| 36 |
system_prompt = get_system_prompt()
|
| 37 |
st.session_state['conversation'].append(f"System: {system_prompt}")
|
| 38 |
-
|
| 39 |
-
st.session_state['history']
|
| 40 |
|
| 41 |
-
#
|
| 42 |
-
|
| 43 |
|
| 44 |
-
#
|
| 45 |
-
|
| 46 |
-
history_tensor = torch.tensor(st.session_state['history']).unsqueeze(0).to(device)
|
| 47 |
-
bot_input_ids = torch.cat([history_tensor, new_user_input_ids], dim=-1)
|
| 48 |
-
else:
|
| 49 |
-
bot_input_ids = new_user_input_ids
|
| 50 |
|
| 51 |
-
#
|
| 52 |
-
|
|
|
|
| 53 |
|
| 54 |
-
# Decode the model's output
|
| 55 |
-
|
| 56 |
-
bot_output = tokenizer.decode(chat_history_ids[0], skip_special_tokens=True)
|
| 57 |
|
| 58 |
-
# Update
|
| 59 |
-
st.session_state['history']
|
|
|
|
| 60 |
|
| 61 |
# Add both user input and bot response to the conversation history for display
|
| 62 |
st.session_state['conversation'].append(f"You: {input_text}")
|
|
@@ -65,7 +62,7 @@ def generate_response(input_text):
|
|
| 65 |
return bot_output
|
| 66 |
|
| 67 |
# Streamlit Interface
|
| 68 |
-
st.title("Chat with
|
| 69 |
|
| 70 |
# Display the conversation history
|
| 71 |
if st.session_state['conversation']:
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
| 3 |
import torch
|
| 4 |
import random
|
| 5 |
|
| 6 |
+
# Load pre-trained T5 model and tokenizer
|
| 7 |
+
model_name = "t5-small" # You can use "t5-base" or "t5-large" for better quality but slower response
|
| 8 |
+
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
| 9 |
+
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
| 10 |
|
| 11 |
# Set device to GPU if available for faster inference, otherwise fallback to CPU
|
| 12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 20 |
|
| 21 |
# Define multiple system prompts to control bot's behavior
|
| 22 |
system_prompts = [
|
| 23 |
+
"You are a helpful assistant. Respond in a polite, friendly, and informative manner.",
|
| 24 |
+
"You are a conversational chatbot. Provide friendly, engaging, and empathetic responses.",
|
| 25 |
+
"You are an informative assistant. Respond clearly and concisely to any questions asked.",
|
| 26 |
+
"You are a fun, casual chatbot. Keep the conversation light-hearted and interesting."
|
| 27 |
]
|
| 28 |
|
| 29 |
# Select a random system prompt to start the conversation
|
|
|
|
| 35 |
if len(st.session_state['history']) == 0:
|
| 36 |
system_prompt = get_system_prompt()
|
| 37 |
st.session_state['conversation'].append(f"System: {system_prompt}")
|
| 38 |
+
system_input = f"conversation: {system_prompt} "
|
| 39 |
+
st.session_state['history'].append(system_input)
|
| 40 |
|
| 41 |
+
# Prepare the user input by appending it to the history
|
| 42 |
+
user_input = f"conversation: {input_text} "
|
| 43 |
|
| 44 |
+
# Concatenate history (system prompt + user input)
|
| 45 |
+
full_input = "".join(st.session_state['history']) + user_input
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
+
# Tokenize input text and generate response from the model
|
| 48 |
+
input_ids = tokenizer.encode(full_input, return_tensors="pt").to(device)
|
| 49 |
+
outputs = model.generate(input_ids, max_length=1000, num_beams=5, top_p=0.95, temperature=0.7, pad_token_id=tokenizer.eos_token_id)
|
| 50 |
|
| 51 |
+
# Decode the model's output
|
| 52 |
+
bot_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
| 53 |
|
| 54 |
+
# Update the history with the new user input and the model's output
|
| 55 |
+
st.session_state['history'].append(user_input)
|
| 56 |
+
st.session_state['history'].append(f"bot: {bot_output} ")
|
| 57 |
|
| 58 |
# Add both user input and bot response to the conversation history for display
|
| 59 |
st.session_state['conversation'].append(f"You: {input_text}")
|
|
|
|
| 62 |
return bot_output
|
| 63 |
|
| 64 |
# Streamlit Interface
|
| 65 |
+
st.title("Chat with T5")
|
| 66 |
|
| 67 |
# Display the conversation history
|
| 68 |
if st.session_state['conversation']:
|