kimhyunwoo commited on
Commit
ebcf47f
Β·
verified Β·
1 Parent(s): 35c0ea2

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +180 -67
index.html CHANGED
@@ -3,9 +3,9 @@
3
  <head>
4
  <meta charset="UTF-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
- <title>AI Assistant (Gemma 3 1B - v3 Attempt)</title>
7
  <style>
8
- /* CSS styles remain the same as the previous correct version */
9
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
10
  :root {
11
  --primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
@@ -22,19 +22,9 @@
22
  }
23
  * { box-sizing: border-box; margin: 0; padding: 0; }
24
  html { height: 100%; }
25
- body {
26
- font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start;
27
- min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none;
28
- }
29
- #control-panel {
30
- background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px;
31
- box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color);
32
- text-align: center;
33
- }
34
- #loadModelButton {
35
- padding: 10px 20px; font-size: 1em; background-color: var(--primary-color);
36
- color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px;
37
- }
38
  #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
39
  #loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; }
40
  #model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; }
@@ -42,12 +32,7 @@
42
  #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
43
  #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
44
  #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
45
-
46
- #chat-container {
47
- width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff;
48
- border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column;
49
- overflow: hidden; border: 1px solid var(--border-color);
50
- }
51
  h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; }
52
  #chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); }
53
  #chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; }
@@ -66,9 +51,8 @@
66
  .control-button:active:not(:disabled) { transform: scale(0.95); }
67
  .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
68
  #toggleSpeakerButton.muted { background-color: #aaa; }
69
- @media (max-width: 600px) { /* Responsive styles */
70
- body { padding: 5px; justify-content: flex-start; }
71
- #control-panel { margin-bottom: 5px; padding: 12px; }
72
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
73
  h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; }
74
  #messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;}
@@ -88,7 +72,7 @@
88
  <div id="control-panel">
89
  <h2>Model Loader</h2>
90
  <button id="loadModelButton">Load Gemma 3 1B Model (Q4)</button>
91
- <div id="model-status" class="info">Click button to load Gemma 3 1B (using Transformers.js v3+). Success depends on library support for 'gemma3_text'.</div>
92
  </div>
93
 
94
  <div id="chat-container">
@@ -99,7 +83,7 @@
99
  </div>
100
  </div>
101
  <div id="input-area">
102
- <textarea id="userInput" placeholder="Please load the model first..." rows="1" disabled></textarea>
103
  <button id="speechButton" class="control-button" title="Speak message" disabled>🎀</button>
104
  <button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>πŸ”Š</button>
105
  <button id="sendButton" class="control-button" title="Send message" disabled>➀</button>
@@ -107,7 +91,6 @@
107
  </div>
108
 
109
  <script type="module">
110
- // Importing from the latest version specified in import map
111
  import { pipeline, env } from '@xenova/transformers';
112
 
113
  const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA';
@@ -120,7 +103,7 @@
120
  console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
121
  env.backends.onnx.prefer_alternative_execution_providers = true;
122
 
123
- // DOM Elements (no changes)
124
  const chatbox = document.getElementById('messages');
125
  const userInput = document.getElementById('userInput');
126
  const sendButton = document.getElementById('sendButton');
@@ -130,15 +113,15 @@
130
  const modelStatus = document.getElementById('model-status');
131
  const loadModelButton = document.getElementById('loadModelButton');
132
 
133
- // State Management (no changes)
134
  let generator = null;
135
  let isLoadingModel = false;
136
- let conversationHistory = [];
137
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
138
- const stateKey = 'gemma3_1b_v3_state_v1';
139
- const historyKey = 'gemma3_1b_v3_history_v1';
140
 
141
- // Web Speech API (no changes)
142
  let recognition = null;
143
  let synthesis = window.speechSynthesis;
144
  let targetVoice = null;
@@ -146,78 +129,208 @@
146
 
147
  // --- Initialization ---
148
  window.addEventListener('load', () => {
149
- loadState();
150
  chatbotNameElement.textContent = botState.botName;
151
  updateSpeakerButtonUI();
152
  initializeSpeechAPI();
153
  setupInputAutosize();
154
- updateChatUIState(false);
155
- if (conversationHistory.length > 0) displayHistory();
156
  setTimeout(loadVoices, 500);
157
  loadModelButton.addEventListener('click', handleLoadModelClick);
158
- console.log("Attempting to use Transformers.js (latest version from CDN)");
159
- displayMessage('system', `Using latest Transformers.js from CDN. Attempting to load ${MODEL_NAME}.`, false);
160
  });
161
 
162
  // --- State Persistence ---
163
- function loadState() { /* No changes */
164
  const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
165
- const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); } catch(e) { conversationHistory = []; } }
 
166
  }
167
- function saveState() { /* No changes */
168
  localStorage.setItem(stateKey, JSON.stringify(botState));
 
169
  localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
170
  }
171
- function displayHistory() { /* No changes */
172
- chatbox.innerHTML = ''; conversationHistory.forEach(msg => displayMessage(msg.sender, msg.text, false));
 
 
 
 
 
 
 
173
  }
174
 
 
175
  // --- UI Update Functions ---
176
- function displayMessage(sender, text, animate = true, isError = false) { /* No changes */
177
- const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
 
 
 
 
 
178
  }
179
- function updateModelStatus(message, type = 'info') { /* No changes */
180
  modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
181
  }
182
- function updateChatUIState(isModelLoadedSuccessfully) { /* No changes */
183
- userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please load the model first..."; }
184
  }
185
- function updateSpeakerButtonUI() { /* No changes */
186
  toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? 'πŸ”Š' : 'πŸ”‡'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
187
  }
188
  function showSpeechStatus(message) { console.log("Speech Status:", message); }
189
  function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }
190
 
191
  // --- Model & AI Logic ---
192
- async function handleLoadModelClick() { /* No changes */
193
- if (isLoadingModel || generator) return; isLoadingModel = true; generator = null; updateChatUIState(false); await initializeModel(MODEL_NAME); isLoadingModel = false; updateChatUIState(generator !== null);
 
 
 
 
 
194
  }
195
- async function initializeModel(modelId) { /* No changes */
196
- updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" } using latest Transformers.js...`, 'loading'); displayMessage('system', `Attempting to load ${modelId} using latest library version.`, false);
 
 
197
  try {
198
- generator = await pipeline(TASK, modelId, { dtype: QUANTIZATION, progress_callback: (progress) => { const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`; updateModelStatus(msg, 'loading'); } });
199
- updateModelStatus(`${modelId} loaded successfully with latest library!`, 'success'); displayMessage('system', `[SUCCESS] ${modelId} loaded. The newer library version might support it.`, false);
 
 
 
 
 
 
 
 
 
200
  } catch (error) {
201
- console.error(`Model loading failed for ${modelId} (with latest library):`, error); let errorMsg = `Failed to load ${modelId}: ${error.message}.`; if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) { errorMsg += " The 'gemma3_text' type is still likely unsupported even in the latest library version, or the ONNX conversion has issues."; } else if (error.message.includes("split")) { errorMsg += " A TypeError occurred, possibly related to model config parsing incompatibility."; } else { errorMsg += " Check console for details. Memory limits on HF Spaces could also be a factor."; } updateModelStatus(errorMsg, 'error'); displayMessage('system', `[ERROR] ${errorMsg}`, true, true); generator = null;
 
 
 
 
 
 
 
 
 
 
 
 
202
  }
203
  }
204
- function buildPrompt() { /* No changes */
205
- const historyLimit = 5; const recentHistory = conversationHistory.slice(-historyLimit); let prompt = "<start_of_turn>system\nYou are 'AI Assistant', a helpful AI assistant. Answer the user's questions clearly and concisely in English.\n<end_of_turn>\n"; recentHistory.forEach(msg => { const role = msg.sender === 'user' ? 'user' : 'model'; prompt += `<start_of_turn>${role}\n${msg.text}\n<end_of_turn>\n`; }); prompt += "<start_of_turn>model\n"; console.log("Generated Prompt:", prompt); return prompt;
206
- }
207
- function cleanupResponse(responseText, prompt) { /* No changes */
208
- let cleaned = responseText; if (cleaned.startsWith(prompt)) { cleaned = cleaned.substring(prompt.length); } else { cleaned = cleaned.replace(/^model\n?/, '').trim(); } cleaned = cleaned.replace(/<end_of_turn>/g, '').trim(); cleaned = cleaned.replace(/<start_of_turn>/g, '').trim(); cleaned = cleaned.replace(/^['"]/, '').replace(/['"]$/, ''); if (!cleaned || cleaned.length < 2) { const fallbacks = [ "Sorry, I didn't quite understand.", "Could you please rephrase that?", "I'm not sure how to respond." ]; return fallbacks[Math.floor(Math.random() * fallbacks.length)]; } return cleaned;
209
- }
210
- async function handleUserMessage() { /* No changes */
211
- const userText = userInput.value.trim(); if (!userText || !generator || isLoadingModel) return; userInput.value = ''; userInput.style.height = 'auto'; updateChatUIState(true); displayMessage('user', userText); conversationHistory.push({ sender: 'user', text: userText }); updateModelStatus("AI thinking...", "loading"); const prompt = buildPrompt(); try { const outputs = await generator(prompt, { max_new_tokens: 300, temperature: 0.7, repetition_penalty: 1.1, top_k: 50, top_p: 0.9, do_sample: true }); const rawResponse = Array.isArray(outputs) ? outputs[0].generated_text : outputs.generated_text; const replyText = cleanupResponse(rawResponse, prompt); displayMessage('bot', replyText); conversationHistory.push({ sender: 'bot', text: replyText }); if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) { speakText(replyText); } saveState(); } catch (error) { console.error("AI response generation error:", error); displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true); const errorReply = "Sorry, I encountered an error generating the response."; displayMessage('bot', errorReply); conversationHistory.push({ sender: 'bot', text: errorReply }); } finally { if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success"); updateChatUIState(generator !== null); userInput.focus(); }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  }
213
 
214
  // --- Speech API Functions ---
215
  function initializeSpeechAPI() { /* No changes */
216
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
217
  }
218
- function loadVoices() { /* No changes */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
219
- function findAndSetVoice(voices) { /* No changes */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
220
- function speakText(text) { /* No changes */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
221
 
222
  // --- Event Listeners ---
223
  sendButton.addEventListener('click', handleUserMessage);
 
3
  <head>
4
  <meta charset="UTF-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
+ <title>AI Assistant (Gemma 3 1B - Doc Example Attempt)</title>
7
  <style>
8
+ /* CSSλŠ” 이전과 동일 */
9
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
10
  :root {
11
  --primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
 
22
  }
23
  * { box-sizing: border-box; margin: 0; padding: 0; }
24
  html { height: 100%; }
25
+ body { font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start; min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none; }
26
+ #control-panel { background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px; box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color); text-align: center; }
27
+ #loadModelButton { padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px; }
 
 
 
 
 
 
 
 
 
 
28
  #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
29
  #loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; }
30
  #model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; }
 
32
  #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
33
  #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
34
  #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
35
+ #chat-container { width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff; border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column; overflow: hidden; border: 1px solid var(--border-color); }
 
 
 
 
 
36
  h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; }
37
  #chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); }
38
  #chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; }
 
51
  .control-button:active:not(:disabled) { transform: scale(0.95); }
52
  .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
53
  #toggleSpeakerButton.muted { background-color: #aaa; }
54
+ @media (max-width: 600px) { /* Responsive styles */
55
+ body { padding: 5px; justify-content: flex-start; } #control-panel { margin-bottom: 5px; padding: 12px; }
 
56
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
57
  h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; }
58
  #messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;}
 
72
  <div id="control-panel">
73
  <h2>Model Loader</h2>
74
  <button id="loadModelButton">Load Gemma 3 1B Model (Q4)</button>
75
+ <div id="model-status" class="info">Click button to load Gemma 3 1B (using Transformers.js v3+ & Q4 dtype). **Warning:** Model loading is expected to fail due to library incompatibility.</div>
76
  </div>
77
 
78
  <div id="chat-container">
 
83
  </div>
84
  </div>
85
  <div id="input-area">
86
+ <textarea id="userInput" placeholder="Please attempt to load the model first..." rows="1" disabled></textarea>
87
  <button id="speechButton" class="control-button" title="Speak message" disabled>🎀</button>
88
  <button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>πŸ”Š</button>
89
  <button id="sendButton" class="control-button" title="Send message" disabled>➀</button>
 
91
  </div>
92
 
93
  <script type="module">
 
94
  import { pipeline, env } from '@xenova/transformers';
95
 
96
  const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA';
 
103
  console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
104
  env.backends.onnx.prefer_alternative_execution_providers = true;
105
 
106
+ // DOM Elements
107
  const chatbox = document.getElementById('messages');
108
  const userInput = document.getElementById('userInput');
109
  const sendButton = document.getElementById('sendButton');
 
113
  const modelStatus = document.getElementById('model-status');
114
  const loadModelButton = document.getElementById('loadModelButton');
115
 
116
+ // State
117
  let generator = null;
118
  let isLoadingModel = false;
119
+ let conversationHistory = []; // Will store messages in { role: '...', content: '...' } format
120
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
121
+ const stateKey = 'gemma3_1b_doc_state_v1'; // New key
122
+ const historyKey = 'gemma3_1b_doc_history_v1';
123
 
124
+ // Speech API
125
  let recognition = null;
126
  let synthesis = window.speechSynthesis;
127
  let targetVoice = null;
 
129
 
130
  // --- Initialization ---
131
  window.addEventListener('load', () => {
132
+ loadState(); // Load state, including potentially saved history
133
  chatbotNameElement.textContent = botState.botName;
134
  updateSpeakerButtonUI();
135
  initializeSpeechAPI();
136
  setupInputAutosize();
137
+ updateChatUIState(false); // Initial UI state: disabled
138
+ displayHistory(); // Display saved history after elements are ready
139
  setTimeout(loadVoices, 500);
140
  loadModelButton.addEventListener('click', handleLoadModelClick);
141
+ console.log("Attempting to use Transformers.js (latest version from CDN)");
142
+ displayMessage('system', `Using latest Transformers.js from CDN. Ready to attempt loading ${MODEL_NAME}.`, false);
143
  });
144
 
145
  // --- State Persistence ---
146
+ function loadState() {
147
  const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
148
+ // Load history - it should already be in the correct [{role:'...', content:'...'}, ...] format if saved previously
149
+ const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
150
  }
151
+ function saveState() {
152
  localStorage.setItem(stateKey, JSON.stringify(botState));
153
+ // Ensure history is saved in the messages format
154
  localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
155
  }
156
+ function displayHistory() {
157
+ chatbox.innerHTML = '';
158
+ // Display history assuming it's in messages format
159
+ conversationHistory.forEach(msg => {
160
+ // Only display user and model messages visually
161
+ if (msg.role === 'user' || msg.role === 'model') {
162
+ displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false);
163
+ }
164
+ });
165
  }
166
 
167
+
168
  // --- UI Update Functions ---
169
+ function displayMessage(sender, text, animate = true, isError = false) {
170
+ const messageDiv = document.createElement('div');
171
+ let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message';
172
+ if (sender === 'system' && isError) messageClass = 'error-message';
173
+ messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none';
174
+ text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>');
175
+ messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
176
  }
177
+ function updateModelStatus(message, type = 'info') {
178
  modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
179
  }
180
+ function updateChatUIState(isModelLoadedSuccessfully) {
181
+ userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; }
182
  }
183
+ function updateSpeakerButtonUI() { /* No change */
184
  toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? 'πŸ”Š' : 'πŸ”‡'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
185
  }
186
  function showSpeechStatus(message) { console.log("Speech Status:", message); }
187
  function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }
188
 
189
  // --- Model & AI Logic ---
190
+ async function handleLoadModelClick() {
191
+ if (isLoadingModel || generator) return;
192
+ isLoadingModel = true; generator = null;
193
+ updateChatUIState(false);
194
+ await initializeModel(MODEL_NAME);
195
+ isLoadingModel = false;
196
+ updateChatUIState(generator !== null);
197
  }
198
+
199
+ async function initializeModel(modelId) {
200
+ updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" } using latest library...`, 'loading');
201
+ displayMessage('system', `Attempting to load ${modelId} using latest library & Q4 dtype (as per docs)...`, false);
202
  try {
203
+ // Use pipeline exactly as in the documentation example
204
+ generator = await pipeline(TASK, modelId, {
205
+ dtype: QUANTIZATION,
206
+ progress_callback: (progress) => {
207
+ const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
208
+ updateModelStatus(msg, 'loading');
209
+ }
210
+ });
211
+ updateModelStatus(`${modelId} loaded successfully! (Unexpected?)`, 'success');
212
+ displayMessage('system', `[SUCCESS] ${modelId} loaded. If this worked, the library/model compatibility might have changed.`, false);
213
+
214
  } catch (error) {
215
+ console.error(`Model loading failed for ${modelId} (with latest library, Q4):`, error);
216
+ let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
217
+ // Provide specific feedback based on the likely errors
218
+ if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
219
+ errorMsg += " The 'gemma3_text' model type is likely still unsupported by this library version.";
220
+ } else if (error.message.includes("split is not a function")) {
221
+ errorMsg += " A TypeError occurred, possibly due to config parsing issues (incompatibility).";
222
+ } else {
223
+ errorMsg += " Check console for details. Memory limits or network issues could also be factors.";
224
+ }
225
+ updateModelStatus(errorMsg, 'error');
226
+ displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
227
+ generator = null;
228
  }
229
  }
230
+
231
+ // Builds the messages array for the pipeline
232
+ function buildMessages(newUserMessage) {
233
+ // Start with a system prompt if desired (optional but good practice)
234
+ let messages = [{ role: "system", content: "You are a helpful assistant." }];
235
+
236
+ // Add history, ensuring it alternates user/model roles correctly
237
+ // Note: conversationHistory already stores {role, content}
238
+ messages = messages.concat(conversationHistory);
239
+
240
+ // Add the new user message
241
+ messages.push({ role: "user", content: newUserMessage });
242
+
243
+ console.log("Input Messages:", messages);
244
+ return messages;
245
+ }
246
+
247
+ // Cleans the response from the generator output
248
+ function cleanupResponse(output) {
249
+ // The documentation example uses output[0].generated_text.at(-1).content
250
+ // Let's try that structure first, with checks.
251
+ try {
252
+ if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) {
253
+ const lastMessage = output[0].generated_text.at(-1); // Get the last message object
254
+ if (lastMessage && lastMessage.role === 'assistant' && lastMessage.content) { // Check role and content
255
+ let cleaned = lastMessage.content.trim();
256
+ // Additional cleanup for potential artifacts
257
+ cleaned = cleaned.replace(/<end_of_turn>/g, '').trim();
258
+ cleaned = cleaned.replace(/<start_of_turn>/g, '').trim(); // Just in case
259
+ if (cleaned.length > 0) return cleaned;
260
+ }
261
+ }
262
+ } catch (e) {
263
+ console.error("Error parsing generator output structure:", e, "Output:", output);
264
+ }
265
+
266
+ // Fallback if the expected structure isn't found or parsing fails
267
+ console.warn("Could not extract response using standard messages structure. Using fallback.");
268
+ const fallbacks = [ "Sorry, I had trouble formatting my response.", "My response might be incomplete.", "Something went wrong displaying the answer." ];
269
+ return fallbacks[Math.floor(Math.random() * fallbacks.length)];
270
+ }
271
+
272
+
273
+ // --- Main Interaction Logic ---
274
+ async function handleUserMessage() {
275
+ const userText = userInput.value.trim();
276
+ if (!userText || !generator || isLoadingModel) return;
277
+
278
+ userInput.value = ''; userInput.style.height = 'auto';
279
+ updateChatUIState(true); // Disable send button during processing
280
+
281
+ // Add user message to UI *and* history in the correct format
282
+ displayMessage('user', userText);
283
+ conversationHistory.push({ role: 'user', content: userText });
284
+
285
+ updateModelStatus("AI thinking...", "loading");
286
+
287
+ // Prepare messages array for the model
288
+ const messages = buildMessages(userText); // No need to pass userText separately
289
+
290
+ try {
291
+ // Call generator with the messages array
292
+ const outputs = await generator(messages, {
293
+ max_new_tokens: 300,
294
+ temperature: 0.7,
295
+ // repetition_penalty: 1.1, // May not be supported by all models/tasks in message format
296
+ top_k: 50,
297
+ // top_p: 0.9, // May not be supported
298
+ do_sample: true, // Sample for more varied responses
299
+ // streamer: streamer // Streaming could be added later if needed
300
+ });
301
+
302
+ const replyText = cleanupResponse(outputs); // Process the output
303
+
304
+ console.log("Cleaned AI Output:", replyText);
305
+
306
+ // Add AI response to UI *and* history
307
+ displayMessage('bot', replyText);
308
+ conversationHistory.push({ role: 'assistant', content: replyText }); // Use 'assistant' or 'model' role
309
+
310
+ if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) {
311
+ speakText(replyText);
312
+ }
313
+ saveState(); // Save updated history
314
+
315
+ } catch (error) {
316
+ console.error("AI response generation error:", error);
317
+ displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
318
+ // Optionally remove the last user message from history if generation fails?
319
+ // conversationHistory.pop();
320
+ } finally {
321
+ if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
322
+ updateChatUIState(generator !== null);
323
+ userInput.focus();
324
+ }
325
  }
326
 
327
  // --- Speech API Functions ---
328
  function initializeSpeechAPI() { /* No changes */
329
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
330
  }
331
+ function loadVoices() { /* No changes */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
332
+ function findAndSetVoice(voices) { /* No changes */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
333
+ function speakText(text) { /* No changes */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
334
 
335
  // --- Event Listeners ---
336
  sendButton.addEventListener('click', handleUserMessage);