BryanBradfo commited on
Commit
1347a43
·
1 Parent(s): 875da6c

sound added to the chat and new user interface

Browse files
Files changed (3) hide show
  1. app.py +636 -110
  2. submit_sound.mp3 +0 -0
  3. typing_sound.mp3 +0 -0
app.py CHANGED
@@ -1,145 +1,671 @@
 
1
  from huggingface_hub import InferenceClient
2
  from typing import List, Tuple
3
  import gradio as gr
4
 
5
- # Ensure you have the required libraries installed
6
-
7
- # Chatbot configuration
8
  class ChatConfig:
 
9
  # MODEL = "google/gemma-3-4b-it"
10
- MODEL = "mistralai/Mistral-7B-Instruct-v0.1"
11
- DEFAULT_SYSTEM_MSG = "You are a extremely smart and useful Chatbot."
12
  DEFAULT_MAX_TOKENS = 1024
13
- DEFAULT_TEMP = 0.4
14
  DEFAULT_TOP_P = 0.95
15
 
16
- import os
17
  HF_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN")
 
 
 
 
18
 
19
- # Not recommended to hardcode your token in the script for security reasons.
20
- # HF_TOKEN = "hf_YOUR_TOKEN_HERE"
21
-
22
  client = InferenceClient(ChatConfig.MODEL, token=HF_TOKEN)
23
 
 
24
  def generate_response(
25
  message: str,
26
- history: List[Tuple[str, str]],
27
- system_message: str = ChatConfig.DEFAULT_SYSTEM_MSG,
28
- max_tokens: int = ChatConfig.DEFAULT_MAX_TOKENS,
29
- temperature: float = ChatConfig.DEFAULT_TEMP,
30
- top_p: float = ChatConfig.DEFAULT_TOP_P
31
  ) -> str:
32
-
33
- messages = [{"role": "system", "content": system_message}]
34
-
35
- # Conversation history
36
- for user_msg, bot_msg in history:
37
- if user_msg:
38
- messages.append({"role": "user", "content": user_msg})
39
- if bot_msg:
40
- messages.append({"role": "assistant", "content": bot_msg})
41
-
42
- messages.append({"role": "user", "content": message})
43
-
44
- response = ""
45
-
46
- # Stream the response
47
- # The stream=True parameter allows for real-time response generation
48
- for chunk in client.chat_completion(
49
- messages,
50
- max_tokens=max_tokens,
51
- stream=True,
52
- temperature=temperature,
53
- top_p=top_p,
54
- ):
55
- token = chunk.choices[0].delta.content or ""
56
- response += token
57
- yield response
58
-
59
-
60
- def create_interface() -> gr.ChatInterface:
61
- """Create and configure the chat interface."""
62
- # Custom CSS for a modern look
63
- custom_css = """
64
- .chatbot .message {
65
- border-radius: 12px;
66
- margin: 5px;
67
- padding: 10px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  }
69
- .chatbot .user-message {
70
- background-color: #e3f2fd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  }
72
- .chatbot .bot-message {
73
- background-color: #f5f5f5;
 
 
 
 
 
 
 
74
  }
75
- .gr-button {
76
- border-radius: 8px;
77
- padding: 8px 16px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  }
79
- """
80
-
81
- # Custom chatbot
82
- chatbot = gr.Chatbot(
83
- label="Gemma Chat",
84
- avatar_images=("./user.png", "./gemma.png"),
85
- height=450,
86
- show_copy_button=True
87
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- # Chat interface
90
- interface = gr.ChatInterface(
91
- fn=generate_response,
92
- chatbot=chatbot,
93
- title="Your Gemma 3-(iend)",
94
- theme=gr.themes.Soft(),
95
- css=custom_css,
96
- additional_inputs=[
97
- gr.Textbox(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  value=ChatConfig.DEFAULT_SYSTEM_MSG,
99
- label="System Prompt (You can change the text below)",
100
- lines=2,
101
- placeholder="Enter system message..."
102
- ),
103
- gr.Slider(
104
- minimum=1,
105
- maximum=8192,
106
- value=ChatConfig.DEFAULT_MAX_TOKENS,
107
- step=1,
108
- label="Max Tokens",
109
- info="Controls response length"
110
- ),
111
- gr.Slider(
112
- minimum=0.1,
113
- maximum=1.0,
114
- value=ChatConfig.DEFAULT_TEMP,
115
- step=0.1,
116
- label="Temperature",
117
- info="Controls randomness"
118
- ),
119
- gr.Slider(
120
- minimum=0.1,
121
- maximum=1.0,
122
- value=ChatConfig.DEFAULT_TOP_P,
123
- step=0.05,
124
- label="Top-P",
125
- info="Controls diversity"
126
  )
127
- ],
128
- additional_inputs_accordion=gr.Accordion(label="Advanced Settings", open=False)
129
- )
130
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  return interface
132
 
 
133
  def main():
134
  app = create_interface()
135
  app.launch(
136
- server_name="0.0.0.0",
137
  server_port=7860,
138
- share=False,
139
  show_api=False,
140
  show_error=True,
141
- debug=True
142
  )
143
 
144
  if __name__ == "__main__":
 
 
 
 
 
 
 
145
  main()
 
1
+ import os
2
  from huggingface_hub import InferenceClient
3
  from typing import List, Tuple
4
  import gradio as gr
5
 
6
+ # --- Configuration ---
 
 
7
  class ChatConfig:
8
+ # Switch back to Gemma 3 if the 503 errors are resolved and you prefer it
9
  # MODEL = "google/gemma-3-4b-it"
10
+ MODEL = "mistralai/Mistral-7B-Instruct-v0.1" # Keep Mistral for stability if needed
11
+ DEFAULT_SYSTEM_MSG = "You are a highly intelligent and helpful AI assistant."
12
  DEFAULT_MAX_TOKENS = 1024
13
+ DEFAULT_TEMP = 0.6 # Slightly increased default temp for potentially more engaging responses
14
  DEFAULT_TOP_P = 0.95
15
 
16
+ # --- Hugging Face Client Initialization ---
17
  HF_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN")
18
+ if not HF_TOKEN:
19
+ print("Warning: HUGGING_FACE_HUB_TOKEN environment variable not set. Inference might fail.")
20
+ # Optionally, raise an error or provide a default (not recommended for production)
21
+ # raise ValueError("Missing HUGGING_FACE_HUB_TOKEN for Inference API authentication")
22
 
23
+ # Initialize client, handle potential missing token gracefully if needed elsewhere
 
 
24
  client = InferenceClient(ChatConfig.MODEL, token=HF_TOKEN)
25
 
26
+ # --- Core Chat Logic ---
27
  def generate_response(
28
  message: str,
29
+ history_with_sys_prompt: list, # History now includes the system prompt
30
+ max_tokens: int,
31
+ temperature: float,
32
+ top_p: float
 
33
  ) -> str:
34
+ """Generates a streaming response using the Hugging Face Inference API."""
35
+
36
+ # The history_with_sys_prompt already contains the system message as the first element
37
+ messages = history_with_sys_prompt + [{"role": "user", "content": message}]
38
+
39
+ # Check if client initialization failed earlier due to missing token
40
+ if not client.token:
41
+ yield "Error: Hugging Face API token is missing. Please set the HUGGING_FACE_HUB_TOKEN secret."
42
+ return # Stop generation
43
+
44
+ try:
45
+ response_stream = client.chat_completion(
46
+ messages,
47
+ max_tokens=max_tokens,
48
+ stream=True,
49
+ temperature=temperature,
50
+ top_p=top_p,
51
+ )
52
+
53
+ response = ""
54
+ for chunk in response_stream:
55
+ token = chunk.choices[0].delta.content or ""
56
+ response += token
57
+ yield response # Yield the accumulating response
58
+
59
+ except Exception as e:
60
+ # Basic error handling - yield the error message to the UI
61
+ print(f"Error during API call: {e}") # Log the full error server-side
62
+ # Check for specific common errors (like auth or model loading)
63
+ if "authorization" in str(e).lower() or "401" in str(e):
64
+ yield f"Error: Authentication failed. Check your Hugging Face token. Details: {e}"
65
+ elif "service unavailable" in str(e).lower() or "503" in str(e):
66
+ yield f"Error: The model ({ChatConfig.MODEL}) seems busy or unavailable (503). Please try again later. Details: {e}"
67
+ else:
68
+ yield f"An error occurred: {e}"
69
+
70
+
71
+ # --- Gradio Interface Creation (Using gr.Blocks) ---
72
+
73
+ # 1. JavaScript for Sound Effects
74
+ # NOTE: Make sure 'typing_sound.mp3' and 'submit_sound.mp3' are in the root directory of your Space
75
+ js_code = """
76
+ () => {
77
+ // Ensure audio context is resumed on user interaction
78
+ let audioContextResumed = false;
79
+ function resumeAudioContext() {
80
+ if (!audioContextResumed) {
81
+ // Create a dummy AudioContext (or use existing if available)
82
+ // Playing a short silent sound on user interaction is a common workaround
83
+ // for browsers blocking autoplay audio.
84
+ const AudioContext = window.AudioContext || window.webkitAudioContext;
85
+ if (AudioContext) {
86
+ const dummyContext = new AudioContext();
87
+ const buffer = dummyContext.createBuffer(1, 1, 22050);
88
+ const source = dummyContext.createBufferSource();
89
+ source.buffer = buffer;
90
+ source.connect(dummyContext.destination);
91
+ source.start(0);
92
+ if (dummyContext.state === 'suspended') {
93
+ dummyContext.resume();
94
+ }
95
+ }
96
+ audioContextResumed = true;
97
+ // Remove the listener after the first interaction
98
+ document.removeEventListener('click', resumeAudioContext);
99
+ document.removeEventListener('keydown', resumeAudioContext);
100
+ console.log("Audio context potentially resumed by user interaction.");
101
  }
102
+ }
103
+ // Add listeners for the first user interaction
104
+ document.addEventListener('click', resumeAudioContext, { once: true });
105
+ document.addEventListener('keydown', resumeAudioContext, { once: true });
106
+
107
+
108
+ // Preload sounds
109
+ const submitSound = new Audio('file/submit_sound.mp3'); // Gradio serves files from '/file/' route
110
+ const typingSound = new Audio('file/typing_sound.mp3'); // Make sure these files exist!
111
+ submitSound.preload = 'auto';
112
+ typingSound.preload = 'auto';
113
+ typingSound.loop = true; // Loop typing sound if desired
114
+
115
+ let isTyping = false; // Flag to track typing sound status
116
+
117
+ // Function to play submit sound
118
+ function playSubmitSound() {
119
+ resumeAudioContext(); // Ensure context is active
120
+ submitSound.currentTime = 0; // Rewind to start
121
+ submitSound.play().catch(e => console.error("Error playing submit sound:", e));
122
+ console.log("Submit sound played");
123
+ }
124
+
125
+ // Function to start typing sound
126
+ function startTypingSound() {
127
+ if (!isTyping) {
128
+ resumeAudioContext(); // Ensure context is active
129
+ typingSound.currentTime = 0; // Rewind
130
+ typingSound.play().catch(e => console.error("Error playing typing sound:", e));
131
+ isTyping = true;
132
+ console.log("Typing sound started");
133
  }
134
+ }
135
+
136
+ // Function to stop typing sound
137
+ function stopTypingSound() {
138
+ if (isTyping) {
139
+ typingSound.pause();
140
+ typingSound.currentTime = 0; // Rewind
141
+ isTyping = false;
142
+ console.log("Typing sound stopped");
143
  }
144
+ }
145
+
146
+ // --- Event Listeners ---
147
+
148
+ // Use MutationObserver to detect when bot starts/stops responding
149
+ const observer = new MutationObserver((mutationsList, observer) => {
150
+ for(const mutation of mutationsList) {
151
+ if (mutation.type === 'childList' || mutation.type === 'characterData') {
152
+ // Check if a bot message is being added or actively updated
153
+ // This selector might need adjustment based on Gradio's internal structure.
154
+ // Inspect the chatbot HTML to confirm the classes for bot messages.
155
+ const botMessages = document.querySelectorAll('.chatbot .message-wrap.bot'); // More specific selector
156
+ if (botMessages.length > 0) {
157
+ const lastBotMessage = botMessages[botMessages.length - 1];
158
+ // Check if the last message is potentially being streamed into
159
+ // This is heuristic: check if it was recently added or text changed
160
+ // A better check might involve looking for specific gradio streaming classes if they exist
161
+ if (lastBotMessage.classList.contains('generating')) { // Gradio might add a class like this
162
+ startTypingSound();
163
+ } else {
164
+ // Simple check: if bot messages exist and the sound isn't playing, start it.
165
+ // This might start the sound even when just loading history initially.
166
+ // Refinement needed for perfect start/stop based on actual streaming state.
167
+ // A simple approach: Start sound when bot message appears/updates, stop when input is enabled again?
168
+ startTypingSound(); // Tentative start
169
+ }
170
+ }
171
+ // We need a reliable way to know when the *streaming* specifically stops.
172
+ // Often, the input textbox becomes enabled again. Let's try monitoring that.
173
+ const textbox = document.querySelector('#chat-input textarea'); // Find input textbox (check ID/class)
174
+ if (textbox && !textbox.disabled && isTyping) {
175
+ stopTypingSound();
176
+ }
177
+
178
+ }
179
  }
180
+ });
181
+
182
+ // Target the chatbot output area for observation
183
+ // Need to find a stable parent element containing the messages.
184
+ // Use a timeout to ensure the element exists after Gradio renders.
185
+ setTimeout(() => {
186
+ const chatArea = document.querySelector('.chatbot'); // Adjust selector if needed
187
+ if (chatArea) {
188
+ observer.observe(chatArea, { childList: true, subtree: true, characterData: true });
189
+ console.log("MutationObserver attached to chatbot area.");
190
+ } else {
191
+ console.error("Could not find chatbot area to observe.");
192
+ }
193
+
194
+ // Attach listener to the SUBMIT button
195
+ // Find the submit button - its selector might change. Inspect the element!
196
+ // Often it's a button next to the input textbox.
197
+ // Let's try finding it by looking for a button within the input row.
198
+ const inputRow = document.querySelector('#chat-input'); // Assuming input textbox container has this ID
199
+ if (inputRow) {
200
+ const submitButton = inputRow.querySelector('button'); // Find the first button within
201
+ if (submitButton) {
202
+ submitButton.addEventListener('click', () => {
203
+ console.log("Submit button clicked via JS.");
204
+ playSubmitSound();
205
+ // Also stop typing sound immediately on submit
206
+ stopTypingSound();
207
+ });
208
+ console.log("Submit button listener attached.");
209
+ } else {
210
+ console.error("Could not find the submit button within #chat-input.");
211
+ }
212
+ } else {
213
+ console.error("Could not find the chat input container #chat-input.");
214
+ }
215
+
216
+
217
+ }, 2000); // Wait 2 seconds for Gradio UI to likely be ready
218
+
219
+
220
+ // Monitor the input textbox enable/disable state as another way to stop sound
221
+ setInterval(() => {
222
+ const textbox = document.querySelector('#chat-input textarea');
223
+ if (textbox && textbox.disabled && !isTyping) {
224
+ // If textbox becomes disabled (likely bot is thinking/typing), start sound
225
+ // This might conflict with MutationObserver, needs careful testing
226
+ // startTypingSound(); // Let's rely on MutationObserver primarily for starting
227
+ } else if (textbox && !textbox.disabled && isTyping) {
228
+ // If textbox becomes enabled (bot finished), stop sound
229
+ stopTypingSound();
230
+ }
231
+ }, 500); // Check every 500ms
232
+
233
+
234
+ }
235
+ """
236
+
237
+ # 2. Custom CSS for Gemini/Gemma Theme
238
+ # (Adjust colors and gradients as desired)
239
+ css = """
240
+ :root {
241
+ /* Core Colors */
242
+ --color-background-dark: #131314; /* Very dark grey/black */
243
+ --color-background-secondary: #202124; /* Slightly lighter dark grey */
244
+ --color-text-light: #E8EAED; /* Light grey/off-white text */
245
+ --color-text-medium: #BDC1C6; /* Medium grey text */
246
+ --color-accent-blue: #89b4f8; /* Google's lighter blue */
247
+ --color-accent-blue-darker: #1a73e8; /* Google's darker blue */
248
+ --color-border: #5f6368; /* Border color */
249
+
250
+ /* Gradients */
251
+ --gradient-blue-white: linear-gradient(to right, var(--color-accent-blue), #ffffff);
252
+ --gradient-blue-dark: linear-gradient(to right, var(--color-accent-blue-darker), var(--color-accent-blue));
253
+
254
+ /* Component Specifics */
255
+ --input-background: var(--color-background-secondary);
256
+ --button-background: var(--color-accent-blue-darker);
257
+ --button-text-color: var(--color-background-dark);
258
+ --slider-track-color: var(--color-background-secondary);
259
+ --slider-thumb-color: var(--color-accent-blue);
260
+
261
+ /* Font */
262
+ --font-family: 'Roboto', sans-serif; /* Example: Using Roboto */
263
+ --font-family-mono: 'Roboto Mono', monospace;
264
+ }
265
+
266
+ /* --- Global Styles --- */
267
+ body, .gradio-container {
268
+ background-color: var(--color-background-dark) !important;
269
+ color: var(--color-text-light) !important;
270
+ font-family: var(--font-family) !important;
271
+ height: 100vh;
272
+ display: flex;
273
+ flex-direction: column;
274
+ }
275
+
276
+ /* Ensure container takes full height */
277
+ .gradio-container {
278
+ flex-grow: 1;
279
+ display: flex;
280
+ flex-direction: column;
281
+ }
282
+
283
+ /* Adjust main content area if needed */
284
+ .main, .contain {
285
+ flex-grow: 1;
286
+ display: flex;
287
+ flex-direction: column;
288
+ overflow: hidden; /* Prevent double scrollbars */
289
+ }
290
+
291
+
292
+ /* --- Titles & Labels --- */
293
+ .gr-panel > h1, .gr-panel > .label, .gr-box > .label, .gr-form > .label { /* Adjust selectors as needed */
294
+ color: var(--color-text-light) !important;
295
+ font-weight: bold;
296
+ margin-bottom: 10px;
297
+ }
298
+ .gr-panel > h1 { /* Main Title */
299
+ text-align: center;
300
+ font-size: 2em;
301
+ margin-bottom: 20px;
302
+ color: var(--color-accent-blue) !important; /* Blue title */
303
+ }
304
+
305
+ /* --- Chatbot Area --- */
306
+ .chatbot {
307
+ background-color: var(--color-background-secondary);
308
+ border-radius: 12px;
309
+ border: 1px solid var(--color-border);
310
+ flex-grow: 1; /* Make chatbot take available space */
311
+ overflow-y: auto; /* Ensure scrolling within chatbot */
312
+ display: flex;
313
+ flex-direction: column-reverse; /* New messages at the bottom */
314
+ padding: 10px;
315
+ margin-bottom: 15px; /* Space before input */
316
+ }
317
+
318
+ .message-wrap { /* Container for avatar + message */
319
+ display: flex;
320
+ margin-bottom: 10px;
321
+ width: 100%;
322
+ }
323
+
324
+ .message-wrap.user {
325
+ justify-content: flex-end; /* Align user messages to the right */
326
+ }
327
+ .message-wrap.bot {
328
+ justify-content: flex-start; /* Align bot messages to the left */
329
+ }
330
+
331
+ .message {
332
+ padding: 10px 15px;
333
+ border-radius: 18px;
334
+ max-width: 75%; /* Limit message width */
335
+ word-wrap: break-word;
336
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.1);
337
+ }
338
 
339
+ /* User Message Bubble */
340
+ .message.user {
341
+ background-color: var(--color-accent-blue-darker); /* Dark blue background */
342
+ color: var(--color-text-light); /* Light text */
343
+ border-bottom-right-radius: 4px; /* Slightly shape the bubble */
344
+ }
345
+
346
+ /* Bot Message Bubble */
347
+ .message.bot {
348
+ background-color: #3c4043; /* Darker grey for bot */
349
+ color: var(--color-text-light);
350
+ border-bottom-left-radius: 4px;
351
+ }
352
+
353
+ /* Avatar Styling (Optional) */
354
+ .avatar-container img {
355
+ width: 35px;
356
+ height: 35px;
357
+ border-radius: 50%;
358
+ margin: 0 8px;
359
+ align-self: flex-end; /* Align avatar with bottom of message */
360
+ }
361
+ .message-wrap.user .avatar-container { order: 1; margin-left: 8px; } /* Avatar after message */
362
+ .message-wrap.bot .avatar-container { order: -1; margin-right: 8px; } /* Avatar before message */
363
+
364
+
365
+ /* --- Input Area --- */
366
+ #chat-input, .gr-form { /* Target input row/form */
367
+ background-color: var(--color-background-dark);
368
+ border-top: 1px solid var(--color-border);
369
+ padding: 10px 15px;
370
+ margin-top: auto; /* Push input to bottom if container allows */
371
+ }
372
+
373
+ textarea {
374
+ background-color: var(--input-background) !important;
375
+ color: var(--color-text-light) !important;
376
+ border: 1px solid var(--color-border) !important;
377
+ border-radius: 8px !important;
378
+ padding: 10px !important;
379
+ }
380
+ textarea:focus {
381
+ border-color: var(--color-accent-blue) !important;
382
+ box-shadow: 0 0 0 2px rgba(137, 180, 248, 0.3) !important; /* Subtle focus ring */
383
+ }
384
+
385
+ /* Submit Button */
386
+ .gr-button {
387
+ background: var(--gradient-blue-dark) !important; /* Blue Gradient */
388
+ color: white !important; /* White text on button */
389
+ border: none !important;
390
+ border-radius: 8px !important;
391
+ padding: 10px 20px !important;
392
+ font-weight: bold !important;
393
+ transition: filter 0.2s ease;
394
+ }
395
+ .gr-button:hover {
396
+ filter: brightness(1.1); /* Slightly brighter on hover */
397
+ }
398
+ .gr-button:disabled {
399
+ background: var(--color-border) !important;
400
+ cursor: not-allowed;
401
+ }
402
+
403
+
404
+ /* --- Advanced Settings Accordion --- */
405
+ .gr-accordion {
406
+ background-color: transparent !important; /* Make accordion background transparent */
407
+ border: 1px solid var(--color-border) !important;
408
+ border-radius: 8px !important;
409
+ margin-top: 15px;
410
+ }
411
+ .gr-accordion > .label, details > summary { /* Accordion Header */
412
+ color: var(--color-text-medium) !important;
413
+ padding: 10px 15px !important;
414
+ background-color: var(--color-background-secondary) !important;
415
+ border-radius: 8px 8px 0 0 !important; /* Round top corners */
416
+ border-bottom: 1px solid var(--color-border);
417
+ cursor: pointer;
418
+ }
419
+ details[open] > summary {
420
+ border-radius: 8px 8px 0 0 !important;
421
+ }
422
+ .gr-accordion .gr-form { /* Content inside accordion */
423
+ background-color: var(--color-background-secondary) !important;
424
+ border: none !important;
425
+ padding: 15px !important;
426
+ border-radius: 0 0 8px 8px !important; /* Round bottom corners */
427
+ }
428
+
429
+ /* Sliders */
430
+ .gr-slider {
431
+ background: transparent !important; /* Remove default slider background */
432
+ }
433
+
434
+ input[type="range"] {
435
+ -webkit-appearance: none; /* Override default look */
436
+ appearance: none;
437
+ width: 100%;
438
+ height: 8px;
439
+ background: var(--slider-track-color); /* Track color */
440
+ border-radius: 5px;
441
+ cursor: pointer;
442
+ }
443
+
444
+ /* Thumb (the draggable circle) */
445
+ input[type="range"]::-webkit-slider-thumb {
446
+ -webkit-appearance: none;
447
+ appearance: none;
448
+ width: 18px;
449
+ height: 18px;
450
+ background: var(--slider-thumb-color); /* Thumb color */
451
+ border-radius: 50%;
452
+ cursor: pointer;
453
+ }
454
+
455
+ input[type="range"]::-moz-range-thumb {
456
+ width: 18px;
457
+ height: 18px;
458
+ background: var(--slider-thumb-color);
459
+ border-radius: 50%;
460
+ cursor: pointer;
461
+ border: none; /* Remove default border in Firefox */
462
+ }
463
+
464
+ /* Slider Value/Label */
465
+ .gr-slider .number { /* Adjust selector if needed */
466
+ color: var(--color-text-medium) !important;
467
+ }
468
+ .gr-slider .label, .gr-slider .info {
469
+ color: var(--color-text-light) !important;
470
+ }
471
+
472
+ /* Textbox within Accordion */
473
+ .gr-accordion .gr-form textarea {
474
+ background-color: var(--color-background-dark) !important; /* Even darker input bg */
475
+ border-color: #444 !important;
476
+ }
477
+
478
+ /* --- Scrollbars (Optional, for Webkit browsers) --- */
479
+ ::-webkit-scrollbar {
480
+ width: 8px;
481
+ height: 8px;
482
+ }
483
+ ::-webkit-scrollbar-track {
484
+ background: var(--color-background-secondary);
485
+ border-radius: 4px;
486
+ }
487
+ ::-webkit-scrollbar-thumb {
488
+ background: var(--color-border);
489
+ border-radius: 4px;
490
+ }
491
+ ::-webkit-scrollbar-thumb:hover {
492
+ background: var(--color-text-medium);
493
+ }
494
+
495
+ /* Make interface take full height */
496
+ div[data-testid="block-label"] { /* This targets the container Gradio adds */
497
+ height: 100%;
498
+ display: flex;
499
+ flex-direction: column;
500
+ }
501
+ """
502
+
503
+ # 3. Build the UI with gr.Blocks
504
+ def create_interface() -> gr.Blocks:
505
+ """Creates the Gradio interface using gr.Blocks."""
506
+
507
+ with gr.Blocks(theme=gr.themes.Base(), # Use Base and rely on CSS overrides
508
+ css=css,
509
+ title="Gemma/Gemini Style Chat",
510
+ js=js_code) as interface:
511
+
512
+ gr.Markdown("# ✨ Gemini/Gemma Style Chat ✨", elem_classes="title") # Main title
513
+
514
+ # Store conversation history, including the initial system prompt
515
+ # The first element will always be the system prompt dict
516
+ # We use gr.State to manage history between interactions
517
+ initial_history = [{"role": "system", "content": ChatConfig.DEFAULT_SYSTEM_MSG}]
518
+ chat_history_state = gr.State(initial_history)
519
+
520
+ # Chatbot display - Use elem_id for easier CSS targeting if needed
521
+ chatbot_display = gr.Chatbot(
522
+ value=lambda hist: hist[1:], # Display history *without* the system prompt
523
+ label="Chat",
524
+ elem_id="chatbot",
525
+ elem_classes="chatbot", # CSS class
526
+ show_label=False,
527
+ avatar_images=("./user.png", "./gemma.png"), # Make sure these exist!
528
+ height=550, # Adjust height as needed
529
+ show_copy_button=True,
530
+ bubble_full_width=False # Bubbles don't take full width
531
+ )
532
+
533
+ # Input Textbox area
534
+ with gr.Row(elem_id="chat-input-row"): # Use elem_id for JS targeting
535
+ chat_input_box = gr.Textbox(
536
+ show_label=False,
537
+ placeholder="Type your message here...",
538
+ container=False, # Remove default container styling/padding
539
+ scale=7, # Take more horizontal space
540
+ elem_id="chat-input" # Use elem_id for JS targeting
541
+ )
542
+ submit_button = gr.Button("Send", scale=1, variant="primary") # Use variant for potential base styling
543
+
544
+ # Accordion for Advanced Settings
545
+ with gr.Accordion("Advanced Settings", open=False, elem_classes="gr-accordion"):
546
+ system_prompt_input = gr.Textbox(
547
  value=ChatConfig.DEFAULT_SYSTEM_MSG,
548
+ label="System Prompt",
549
+ lines=3, # More lines for system prompt
550
+ # elem_id="system-prompt" # Optional ID
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
  )
552
+ max_tokens_slider = gr.Slider(
553
+ minimum=256, maximum=8192, value=ChatConfig.DEFAULT_MAX_TOKENS,
554
+ step=128, label="Max New Tokens", info="Max length of the bot's response."
555
+ )
556
+ temperature_slider = gr.Slider(
557
+ minimum=0.1, maximum=1.5, value=ChatConfig.DEFAULT_TEMP, # Wider temp range
558
+ step=0.1, label="Temperature", info="Controls randomness (higher = more random)."
559
+ )
560
+ top_p_slider = gr.Slider(
561
+ minimum=0.1, maximum=1.0, value=ChatConfig.DEFAULT_TOP_P,
562
+ step=0.05, label="Top-P", info="Controls diversity via nucleus sampling."
563
+ )
564
+ clear_button = gr.Button("Clear Chat History")
565
+
566
+
567
+ # --- Event Handling ---
568
+
569
+ def handle_submit(message: str, history_state: list, max_tokens: int, temp: float, top_p: float):
570
+ """Called when user sends message or presses Enter."""
571
+ # Add user message to state (history includes system prompt)
572
+ updated_history = history_state + [{"role": "user", "content": message}]
573
+ # Immediately update chatbot display to show user message
574
+ # Yield history state and empty textbox
575
+ yield updated_history, gr.update(value="")
576
+
577
+ # Stream response
578
+ # Pass the updated history directly to the generator
579
+ bot_stream = generate_response(message, history_state, max_tokens, temp, top_p)
580
+
581
+ # Accumulate the full response while streaming to the UI
582
+ full_response = ""
583
+ for partial_response in bot_stream:
584
+ full_response = partial_response
585
+ # Update history state with the partial bot response for display
586
+ display_history = updated_history + [{"role": "assistant", "content": full_response}]
587
+ yield display_history, gr.update(value="") # Keep textbox empty
588
+
589
+ # Once streaming is done, update the *actual* history state with the final bot message
590
+ final_history = updated_history + [{"role": "assistant", "content": full_response}]
591
+ # Update the state, no need to update chatbot display again if streaming worked
592
+ yield final_history, gr.update(value="")
593
+
594
+
595
+ def update_history_and_chatbot(new_history_state):
596
+ """Helper to update both state and chatbot display."""
597
+ # Chatbot display needs history *without* the system prompt
598
+ return new_history_state, new_history_state[1:]
599
+
600
+ # Connect Submit Button click
601
+ submit_button.click(
602
+ fn=handle_submit,
603
+ inputs=[chat_input_box, chat_history_state, max_tokens_slider, temperature_slider, top_p_slider],
604
+ outputs=[chat_history_state, chat_input_box] # Update history state and clear textbox
605
+ ).then(
606
+ fn=lambda hist: hist[1:], # Get history without sys prompt for display
607
+ inputs=[chat_history_state],
608
+ outputs=[chatbot_display] # Update chatbot display
609
+ )
610
+
611
+ # Connect Textbox Enter key press
612
+ chat_input_box.submit(
613
+ fn=handle_submit,
614
+ inputs=[chat_input_box, chat_history_state, max_tokens_slider, temperature_slider, top_p_slider],
615
+ outputs=[chat_history_state, chat_input_box] # Update history state and clear textbox
616
+ ).then(
617
+ fn=lambda hist: hist[1:], # Get history without sys prompt for display
618
+ inputs=[chat_history_state],
619
+ outputs=[chatbot_display] # Update chatbot display
620
+ )
621
+
622
+ def update_system_prompt(new_prompt: str, history_state: list):
623
+ """Update the system prompt in the history state."""
624
+ if history_state and history_state[0]["role"] == "system":
625
+ history_state[0]["content"] = new_prompt
626
+ else: # Should not happen with initial_history, but safety check
627
+ history_state.insert(0, {"role": "system", "content": new_prompt})
628
+ # No UI update needed here, just update the state
629
+ return history_state
630
+
631
+ # Update system prompt in state when textbox changes
632
+ system_prompt_input.change(
633
+ fn=update_system_prompt,
634
+ inputs=[system_prompt_input, chat_history_state],
635
+ outputs=[chat_history_state] # Only update the state
636
+ )
637
+
638
+ def clear_chat(current_system_prompt: str):
639
+ """Clears chat history, keeping the current system prompt."""
640
+ new_history = [{"role": "system", "content": current_system_prompt}]
641
+ return new_history, [] # Return new state and empty list for chatbot display
642
+
643
+ # Connect Clear Button
644
+ clear_button.click(
645
+ fn=clear_chat,
646
+ inputs=[system_prompt_input], # Get the current system prompt from its input box
647
+ outputs=[chat_history_state, chatbot_display] # Update state and clear chatbot
648
+ )
649
+
650
  return interface
651
 
652
+ # --- Main Execution ---
653
  def main():
654
  app = create_interface()
655
  app.launch(
656
+ server_name="0.0.0.0", # Important for Docker/Spaces
657
  server_port=7860,
 
658
  show_api=False,
659
  show_error=True,
660
+ debug=True # Enable debug for easier troubleshooting
661
  )
662
 
663
  if __name__ == "__main__":
664
+ # Ensure avatar files exist or handle gracefully
665
+ if not os.path.exists("./user.png"):
666
+ print("Warning: ./user.png not found. Using default avatar.")
667
+ # You might want to download a default one here if needed
668
+ if not os.path.exists("./gemma.png"):
669
+ print("Warning: ./gemma.png not found. Using default avatar.")
670
+
671
  main()
submit_sound.mp3 ADDED
Binary file (33 kB). View file
 
typing_sound.mp3 ADDED
Binary file (49.2 kB). View file