zahraa12355 commited on
Commit
0582f15
Β·
verified Β·
1 Parent(s): a8656f2

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +122 -0
app.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """nino bot
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1UgXple_p_R-0mq9p5vhOmFPo9cgdayJy
8
+ """
9
+
10
+ import gradio as gr
11
+ from transformers import AutoModelForCausalLM, AutoTokenizer
12
+
13
+ model_name = "microsoft/DialoGPT-small"
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name)
16
+
17
+ rules = {
18
+ "hi": "Hello! How can I help you today?",
19
+ "hello": "Hi there! How can I assist you?",
20
+ "hey": "Hey! What can I do for you?",
21
+ "how are you": "I'm just a bot, but I'm doing great! 😊 How about you?",
22
+ "good morning": "Good morning! Hope you have a wonderful day!",
23
+ "good afternoon": "Good afternoon! How can I help you?",
24
+ "good evening": "Good evening! What can I do for you?",
25
+ "bye": "Goodbye! Have a nice day! πŸ‘‹",
26
+ "thank you": "You're welcome! 😊",
27
+ "thanks": "No problem! Happy to help!",
28
+ "what is your name": "I'm your friendly chatbot assistant.",
29
+ "help": "Sure! Ask me anything or type 'bye' to exit.",
30
+ "what can you do": "I can answer simple questions and chat with you. Try saying hi!",
31
+ "tell me a joke": "Why did the computer show up at work late? It had a hard drive!",
32
+ "what time is it": "Sorry, I don't have a clock yet. But you can check your device's time!",
33
+ "where are you from": "I'm from the cloud, here to assist you anytime!",
34
+ "what is ai": "AI stands for Artificial Intelligence, which is intelligence demonstrated by machines.",
35
+ "who created you": "I was created by a talented developer using Python and machine learning!",
36
+ "how can i learn programming": "Start with basics like Python. There are many free tutorials online to get you started!",
37
+ 'ok':'ok',
38
+ 'who are you?':'I am nino',
39
+ 'hi nino' : 'hi there',
40
+ }
41
+
42
+ def respond(user_input, history):
43
+ if history is None:
44
+ history = []
45
+
46
+ user_input_clean = user_input.lower().strip()
47
+
48
+ if user_input_clean in rules:
49
+ bot_reply = rules[user_input_clean]
50
+ else:
51
+ prompt = ""
52
+ # Build the prompt including the conversation history
53
+ for user_msg, bot_msg in history:
54
+ # Ensure bot_msg is not None before adding to prompt
55
+ if bot_msg is not None:
56
+ prompt += f"{user_msg} {tokenizer.eos_token}\n{bot_msg} {tokenizer.eos_token}\n"
57
+ else:
58
+ # If bot_msg is None, just add the user message
59
+ prompt += f"{user_msg} {tokenizer.eos_token}\n"
60
+ # Add the current user input
61
+ prompt += f"{user_input} {tokenizer.eos_token}\n"
62
+
63
+ inputs = tokenizer(prompt, return_tensors="pt")
64
+ outputs = model.generate(
65
+ **inputs,
66
+ max_new_tokens=100,
67
+ pad_token_id=tokenizer.eos_token_id,
68
+ do_sample=True,
69
+ temperature=0.7,
70
+ top_p=0.9,
71
+ )
72
+
73
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
74
+
75
+ # Extract the newly generated bot response from the full text
76
+ # This assumes the model output starts with the prompt
77
+ bot_reply = generated_text[len(prompt):].strip()
78
+
79
+
80
+ if len(bot_reply) < 5 or bot_reply.lower() in ["", "idk", "i don't know", "huh"]:
81
+ bot_reply = "I'm not sure how to respond to that. Can you rephrase it?"
82
+
83
+ # Append the new interaction to the history
84
+ history.append((user_input, bot_reply))
85
+ return history, history
86
+
87
+ def save_chat(history):
88
+ # Ensure history is not None before attempting to save
89
+ if history is not None:
90
+ with open("chat_history.txt", "w", encoding="utf-8") as f:
91
+ for user_msg, bot_msg in history:
92
+ # Ensure bot_msg is not None before writing
93
+ if bot_msg is not None:
94
+ f.write(f"You: {user_msg}\nBot: {bot_msg}\n\n")
95
+ else:
96
+ f.write(f"You: {user_msg}\nBot: (No response)\n\n")
97
+
98
+
99
+ # New function to process input, respond, save, and clear the textbox
100
+ def process_input(user_input, history):
101
+ # Get the updated history and bot response
102
+ updated_history, _ = respond(user_input, history)
103
+
104
+ # Save the updated chat history
105
+ save_chat(updated_history)
106
+
107
+ # Return the updated history for the chatbot display and an empty string for the textbox
108
+ return updated_history, "", updated_history # Also return updated history for the state
109
+
110
+ with gr.Blocks() as demo:
111
+ chatbot = gr.Chatbot()
112
+ msg = gr.Textbox(placeholder="Type your message here...")
113
+ state = gr.State([]) # This state variable holds the chat history
114
+
115
+ # Update the submit action to call the new function
116
+ # Add 'msg' to the outputs so its value can be updated
117
+ msg.submit(process_input, inputs=[msg, state], outputs=[chatbot, msg, state])
118
+
119
+ demo.launch()
120
+
121
+ with open("my_data.txt", "w", encoding="utf-8") as f:
122
+ f.write("Hello! This is my nino bot text file.\nYou can use this as a data source.")