Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""simple pre-trained bot
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1UgXple_p_R-0mq9p5vhOmFPo9cgdayJy
|
8 |
+
"""
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
12 |
+
|
13 |
+
model_name = "microsoft/DialoGPT-small"
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
16 |
+
|
17 |
+
rules = {
|
18 |
+
"hi": "Hello! How can I help you?",
|
19 |
+
"hello": "Hi there!",
|
20 |
+
"how are you": "I'm just a bot, but I'm doing great! 😊",
|
21 |
+
"good morning": "Good morning! Hope you have a great day!",
|
22 |
+
"bye": "Goodbye! Have a nice day!",
|
23 |
+
"what is your name": "I'm your friendly chatbot assistant!",
|
24 |
+
"who are you": "I'm a chatbot here to chat with you!",
|
25 |
+
"am zara": "Nice to meet you, Zara!",
|
26 |
+
"thank you": "You're welcome! 😊",
|
27 |
+
}
|
28 |
+
|
29 |
+
def respond(user_input, history):
|
30 |
+
if history is None:
|
31 |
+
history = []
|
32 |
+
|
33 |
+
user_input_clean = user_input.lower().strip()
|
34 |
+
|
35 |
+
if user_input_clean in rules:
|
36 |
+
bot_reply = rules[user_input_clean]
|
37 |
+
else:
|
38 |
+
prompt = ""
|
39 |
+
for user_msg, bot_msg in history:
|
40 |
+
prompt += f"\nUser: {user_msg}\nBot: {bot_msg}"
|
41 |
+
prompt += f"\nUser: {user_input}\nBot:"
|
42 |
+
|
43 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
44 |
+
outputs = model.generate(
|
45 |
+
**inputs,
|
46 |
+
max_new_tokens=100,
|
47 |
+
pad_token_id=tokenizer.eos_token_id,
|
48 |
+
do_sample=True,
|
49 |
+
temperature=0.7,
|
50 |
+
top_p=0.9,
|
51 |
+
)
|
52 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
53 |
+
bot_reply = generated_text.split("Bot:")[-1].strip()
|
54 |
+
|
55 |
+
history.append([user_input, bot_reply])
|
56 |
+
return history, history
|
57 |
+
|
58 |
+
with gr.Blocks() as demo:
|
59 |
+
chatbot = gr.Chatbot()
|
60 |
+
msg = gr.Textbox(placeholder="Type your message here...")
|
61 |
+
state = gr.State()
|
62 |
+
|
63 |
+
msg.submit(respond, inputs=[msg, state], outputs=[chatbot, state])
|
64 |
+
|
65 |
+
demo.launch()
|