Spaces:
Sleeping
Sleeping
# -*- coding: utf-8 -*- | |
"""nino bot | |
Automatically generated by Colab. | |
Original file is located at | |
https://colab.research.google.com/drive/1UgXple_p_R-0mq9p5vhOmFPo9cgdayJy | |
""" | |
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
model_name = "microsoft/DialoGPT-small" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
rules = { | |
"hi": "Hello! How can I help you today?", | |
"hello": "Hi there! How can I assist you?", | |
"hey": "Hey! What can I do for you?", | |
"how are you": "I'm just a bot, but I'm doing great! π How about you?", | |
"good morning": "Good morning! Hope you have a wonderful day!", | |
"good afternoon": "Good afternoon! How can I help you?", | |
"good evening": "Good evening! What can I do for you?", | |
"bye": "Goodbye! Have a nice day! π", | |
"thank you": "You're welcome! π", | |
"thanks": "No problem! Happy to help!", | |
"what is your name": "I'm your friendly chatbot assistant.", | |
"help": "Sure! Ask me anything or type 'bye' to exit.", | |
"what can you do": "I can answer simple questions and chat with you. Try saying hi!", | |
"tell me a joke": "Why did the computer show up at work late? It had a hard drive!", | |
"what time is it": "Sorry, I don't have a clock yet. But you can check your device's time!", | |
"where are you from": "I'm from the cloud, here to assist you anytime!", | |
"what is ai": "AI stands for Artificial Intelligence, which is intelligence demonstrated by machines.", | |
"who created you": "I was created by a talented developer using Python and machine learning!", | |
"how can i learn programming": "Start with basics like Python. There are many free tutorials online to get you started!", | |
'ok':'ok', | |
'who are you?':'I am nino', | |
'hi nino' : 'hi there', | |
} | |
def respond(user_input, history): | |
if history is None: | |
history = [] | |
user_input_clean = user_input.lower().strip() | |
if user_input_clean in rules: | |
bot_reply = rules[user_input_clean] | |
else: | |
prompt = "" | |
# Build the prompt including the conversation history | |
for user_msg, bot_msg in history: | |
# Ensure bot_msg is not None before adding to prompt | |
if bot_msg is not None: | |
prompt += f"{user_msg} {tokenizer.eos_token}\n{bot_msg} {tokenizer.eos_token}\n" | |
else: | |
# If bot_msg is None, just add the user message | |
prompt += f"{user_msg} {tokenizer.eos_token}\n" | |
# Add the current user input | |
prompt += f"{user_input} {tokenizer.eos_token}\n" | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=100, | |
pad_token_id=tokenizer.eos_token_id, | |
do_sample=True, | |
temperature=0.7, | |
top_p=0.9, | |
) | |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Extract the newly generated bot response from the full text | |
# This assumes the model output starts with the prompt | |
bot_reply = generated_text[len(prompt):].strip() | |
if len(bot_reply) < 5 or bot_reply.lower() in ["", "idk", "i don't know", "huh"]: | |
bot_reply = "I'm not sure how to respond to that. Can you rephrase it?" | |
# Append the new interaction to the history | |
history.append((user_input, bot_reply)) | |
return history, history | |
def save_chat(history): | |
# Ensure history is not None before attempting to save | |
if history is not None: | |
with open("chat_history.txt", "w", encoding="utf-8") as f: | |
for user_msg, bot_msg in history: | |
# Ensure bot_msg is not None before writing | |
if bot_msg is not None: | |
f.write(f"You: {user_msg}\nBot: {bot_msg}\n\n") | |
else: | |
f.write(f"You: {user_msg}\nBot: (No response)\n\n") | |
# New function to process input, respond, save, and clear the textbox | |
def process_input(user_input, history): | |
# Get the updated history and bot response | |
updated_history, _ = respond(user_input, history) | |
# Save the updated chat history | |
save_chat(updated_history) | |
# Return the updated history for the chatbot display and an empty string for the textbox | |
return updated_history, "", updated_history # Also return updated history for the state | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(placeholder="Type your message here...") | |
state = gr.State([]) # This state variable holds the chat history | |
# Update the submit action to call the new function | |
# Add 'msg' to the outputs so its value can be updated | |
msg.submit(process_input, inputs=[msg, state], outputs=[chatbot, msg, state]) | |
demo.launch() | |