zahraa12355 commited on
Commit
a8656f2
Β·
verified Β·
1 Parent(s): b755e33

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -143
app.py DELETED
@@ -1,143 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """nino bot
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1UgXple_p_R-0mq9p5vhOmFPo9cgdayJy
8
- """
9
-
10
- import gradio as gr
11
- from transformers import AutoModelForCausalLM, AutoTokenizer
12
- print("Starting app.py")
13
-
14
- import gradio as gr
15
- from transformers import AutoModelForCausalLM, AutoTokenizer
16
-
17
- print("Imports successful")
18
-
19
- model_name = "microsoft/DialoGPT-small"
20
- print(f"Loading model {model_name}...")
21
-
22
- tokenizer = AutoTokenizer.from_pretrained(model_name)
23
- model = AutoModelForCausalLM.from_pretrained(model_name)
24
- model.eval()
25
-
26
- print("Model loaded successfully")
27
-
28
- # your rest of the code...
29
-
30
- with gr.Blocks() as demo:
31
- # ... UI components ...
32
-
33
- demo.launch()
34
-
35
-
36
-
37
-
38
-
39
-
40
- model_name = "microsoft/DialoGPT-small"
41
- tokenizer = AutoTokenizer.from_pretrained(model_name)
42
- model = AutoModelForCausalLM.from_pretrained(model_name)
43
-
44
- rules = {
45
- "hi": "Hello! How can I help you today?",
46
- "hello": "Hi there! How can I assist you?",
47
- "hey": "Hey! What can I do for you?",
48
- "how are you": "I'm just a bot, but I'm doing great! 😊 How about you?",
49
- "good morning": "Good morning! Hope you have a wonderful day!",
50
- "good afternoon": "Good afternoon! How can I help you?",
51
- "good evening": "Good evening! What can I do for you?",
52
- "bye": "Goodbye! Have a nice day! πŸ‘‹",
53
- "thank you": "You're welcome! 😊",
54
- "thanks": "No problem! Happy to help!",
55
- "what is your name": "I'm your friendly chatbot assistant.",
56
- "help": "Sure! Ask me anything or type 'bye' to exit.",
57
- "what can you do": "I can answer simple questions and chat with you. Try saying hi!",
58
- "tell me a joke": "Why did the computer show up at work late? It had a hard drive!",
59
- "what time is it": "Sorry, I don't have a clock yet. But you can check your device's time!",
60
- "where are you from": "I'm from the cloud, here to assist you anytime!",
61
- "what is ai": "AI stands for Artificial Intelligence, which is intelligence demonstrated by machines.",
62
- "who created you": "I was created by a talented developer using Python and machine learning!",
63
- "how can i learn programming": "Start with basics like Python. There are many free tutorials online to get you started!",
64
- 'ok':'ok',
65
- 'who are you?':'I am nino',
66
- 'hi nino' : 'hi there',
67
- }
68
-
69
- def respond(user_input, history):
70
- if history is None:
71
- history = []
72
-
73
- user_input_clean = user_input.lower().strip()
74
-
75
- if user_input_clean in rules:
76
- bot_reply = rules[user_input_clean]
77
- else:
78
- prompt = ""
79
- # Build the prompt including the conversation history
80
- for user_msg, bot_msg in history:
81
- # Ensure bot_msg is not None before adding to prompt
82
- if bot_msg is not None:
83
- prompt += f"{user_msg} {tokenizer.eos_token}\n{bot_msg} {tokenizer.eos_token}\n"
84
- else:
85
- # If bot_msg is None, just add the user message
86
- prompt += f"{user_msg} {tokenizer.eos_token}\n"
87
- # Add the current user input
88
- prompt += f"{user_input} {tokenizer.eos_token}\n"
89
-
90
- inputs = tokenizer(prompt, return_tensors="pt")
91
- outputs = model.generate(
92
- **inputs,
93
- max_new_tokens=100,
94
- pad_token_id=tokenizer.eos_token_id,
95
- do_sample=True,
96
- temperature=0.7,
97
- top_p=0.9,
98
- )
99
-
100
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
101
-
102
- # Extract the newly generated bot response from the full text
103
- # This assumes the model output starts with the prompt
104
- bot_reply = generated_text[len(prompt):].strip()
105
-
106
-
107
- if len(bot_reply) < 5 or bot_reply.lower() in ["", "idk", "i don't know", "huh"]:
108
- bot_reply = "I'm not sure how to respond to that. Can you rephrase it?"
109
-
110
- # Append the new interaction to the history
111
- history.append((user_input, bot_reply))
112
- return history, history
113
-
114
- def save_chat(history):
115
- # Ensure history is not None before attempting to save
116
- if history is not None:
117
- with open("chat_history.txt", "w", encoding="utf-8") as f:
118
- for user_msg, bot_msg in history:
119
- # Ensure bot_msg is not None before writing
120
- if bot_msg is not None:
121
- f.write(f"You: {user_msg}\nBot: {bot_msg}\n\n")
122
- else:
123
- f.write(f"You: {user_msg}\nBot: (No response)\n\n")
124
-
125
-
126
- # New function to process input, respond, save, and clear the textbox
127
- def process_input(user_input, history):
128
- # Get the updated history and bot response
129
- updated_history, _ = respond(user_input, history)
130
-
131
- save_chat(updated_history)
132
-
133
-
134
- return updated_history, "", updated_history
135
-
136
- with gr.Blocks() as demo:
137
- chatbot = gr.Chatbot()
138
- msg = gr.Textbox(placeholder="Type your message here...")
139
- state = gr.State([])
140
-
141
-
142
- msg.submit(process_input, inputs=[msg, state], outputs=[chatbot, msg, state])
143
- demo.launch()