Spaces:
Running
Running
Update index.html
Browse files- index.html +107 -107
index.html
CHANGED
@@ -3,11 +3,11 @@
|
|
3 |
<head>
|
4 |
<meta charset="UTF-8">
|
5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
|
6 |
-
<title>AI Assistant (Gemma 3 1B -
|
7 |
<style>
|
8 |
-
/* CSS
|
9 |
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
|
10 |
-
:root {
|
11 |
--primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
|
12 |
--bg-color: #f8f9fa; --user-msg-bg: #e7f5ff; --user-msg-text: #004085;
|
13 |
--bot-msg-bg: #ffffff; --bot-msg-border: #dee2e6; --system-msg-color: #6c757d;
|
@@ -60,6 +60,11 @@
|
|
60 |
.control-button { width: 40px; height: 40px; font-size: 1.2em; }
|
61 |
}
|
62 |
</style>
|
|
|
|
|
|
|
|
|
|
|
63 |
<script type="importmap">
|
64 |
{
|
65 |
"imports": {
|
@@ -91,19 +96,24 @@
|
|
91 |
</div>
|
92 |
|
93 |
<script type="module">
|
|
|
|
|
94 |
import { pipeline, env } from '@xenova/transformers';
|
95 |
|
96 |
-
|
|
|
97 |
const TASK = 'text-generation';
|
98 |
-
const QUANTIZATION = 'q4';
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
env.
|
|
|
|
|
103 |
console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
|
104 |
-
env.backends.onnx.prefer_alternative_execution_providers = true;
|
105 |
|
106 |
-
// DOM Elements
|
107 |
const chatbox = document.getElementById('messages');
|
108 |
const userInput = document.getElementById('userInput');
|
109 |
const sendButton = document.getElementById('sendButton');
|
@@ -113,15 +123,15 @@
|
|
113 |
const modelStatus = document.getElementById('model-status');
|
114 |
const loadModelButton = document.getElementById('loadModelButton');
|
115 |
|
116 |
-
// State
|
117 |
-
let generator = null;
|
118 |
-
let isLoadingModel = false;
|
119 |
-
let conversationHistory = []; //
|
120 |
let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
|
121 |
-
const stateKey = '
|
122 |
-
const historyKey = '
|
123 |
|
124 |
-
// Speech API
|
125 |
let recognition = null;
|
126 |
let synthesis = window.speechSynthesis;
|
127 |
let targetVoice = null;
|
@@ -129,50 +139,35 @@
|
|
129 |
|
130 |
// --- Initialization ---
|
131 |
window.addEventListener('load', () => {
|
132 |
-
loadState();
|
133 |
chatbotNameElement.textContent = botState.botName;
|
134 |
updateSpeakerButtonUI();
|
135 |
initializeSpeechAPI();
|
136 |
setupInputAutosize();
|
137 |
-
updateChatUIState(false); //
|
138 |
-
displayHistory();
|
139 |
setTimeout(loadVoices, 500);
|
140 |
-
loadModelButton.addEventListener('click', handleLoadModelClick);
|
141 |
-
console.log("Attempting to use Transformers.js
|
142 |
-
displayMessage('system', `Using
|
143 |
});
|
144 |
|
145 |
// --- State Persistence ---
|
146 |
-
|
147 |
const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
|
148 |
-
// Load history - it should already be in the correct [{role:'...', content:'...'}, ...] format if saved previously
|
149 |
const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
|
150 |
}
|
151 |
function saveState() {
|
152 |
localStorage.setItem(stateKey, JSON.stringify(botState));
|
153 |
-
// Ensure history is saved in the messages format
|
154 |
localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
|
155 |
}
|
156 |
function displayHistory() {
|
157 |
-
chatbox.innerHTML = '';
|
158 |
-
// Display history assuming it's in messages format
|
159 |
-
conversationHistory.forEach(msg => {
|
160 |
-
// Only display user and model messages visually
|
161 |
-
if (msg.role === 'user' || msg.role === 'model') {
|
162 |
-
displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false);
|
163 |
-
}
|
164 |
-
});
|
165 |
}
|
166 |
|
167 |
-
|
168 |
// --- UI Update Functions ---
|
169 |
function displayMessage(sender, text, animate = true, isError = false) {
|
170 |
-
const messageDiv = document.createElement('div');
|
171 |
-
let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message';
|
172 |
-
if (sender === 'system' && isError) messageClass = 'error-message';
|
173 |
-
messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none';
|
174 |
-
text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>');
|
175 |
-
messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
|
176 |
}
|
177 |
function updateModelStatus(message, type = 'info') {
|
178 |
modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
|
@@ -180,7 +175,7 @@
|
|
180 |
function updateChatUIState(isModelLoadedSuccessfully) {
|
181 |
userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; }
|
182 |
}
|
183 |
-
function updateSpeakerButtonUI() {
|
184 |
toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '🔊' : '🔇'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
|
185 |
}
|
186 |
function showSpeechStatus(message) { console.log("Speech Status:", message); }
|
@@ -189,148 +184,153 @@
|
|
189 |
// --- Model & AI Logic ---
|
190 |
async function handleLoadModelClick() {
|
191 |
if (isLoadingModel || generator) return;
|
192 |
-
isLoadingModel = true; generator = null;
|
193 |
updateChatUIState(false);
|
194 |
-
await initializeModel(MODEL_NAME);
|
195 |
isLoadingModel = false;
|
196 |
-
updateChatUIState(generator !== null);
|
197 |
}
|
198 |
|
|
|
199 |
async function initializeModel(modelId) {
|
200 |
-
updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" }
|
201 |
-
displayMessage('system', `Attempting to load ${modelId} using
|
|
|
202 |
try {
|
203 |
-
//
|
204 |
generator = await pipeline(TASK, modelId, {
|
205 |
-
dtype: QUANTIZATION,
|
206 |
progress_callback: (progress) => {
|
207 |
const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
|
208 |
updateModelStatus(msg, 'loading');
|
209 |
}
|
210 |
});
|
211 |
-
|
212 |
-
|
|
|
|
|
213 |
|
214 |
} catch (error) {
|
215 |
-
|
|
|
216 |
let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
|
217 |
-
// Provide specific feedback based on the likely errors
|
218 |
if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
|
219 |
-
errorMsg += " The 'gemma3_text' model type is
|
220 |
} else if (error.message.includes("split is not a function")) {
|
221 |
-
errorMsg += "
|
222 |
} else {
|
223 |
-
errorMsg += "
|
224 |
}
|
225 |
updateModelStatus(errorMsg, 'error');
|
226 |
displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
|
227 |
-
generator = null;
|
228 |
}
|
229 |
}
|
230 |
|
231 |
-
//
|
232 |
function buildMessages(newUserMessage) {
|
233 |
-
// Start with
|
234 |
let messages = [{ role: "system", content: "You are a helpful assistant." }];
|
235 |
-
|
236 |
-
// Add history, ensuring it alternates user/model roles correctly
|
237 |
-
// Note: conversationHistory already stores {role, content}
|
238 |
messages = messages.concat(conversationHistory);
|
239 |
-
|
240 |
-
// Add the new user message
|
241 |
messages.push({ role: "user", content: newUserMessage });
|
242 |
-
|
243 |
console.log("Input Messages:", messages);
|
244 |
return messages;
|
245 |
}
|
246 |
|
247 |
-
//
|
248 |
function cleanupResponse(output) {
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
|
|
|
|
|
|
|
|
|
|
265 |
|
266 |
-
// Fallback if the expected structure isn't found or parsing fails
|
267 |
-
console.warn("Could not extract response using standard messages structure. Using fallback.");
|
268 |
-
const fallbacks = [ "Sorry, I had trouble formatting my response.", "My response might be incomplete.", "Something went wrong displaying the answer." ];
|
269 |
-
return fallbacks[Math.floor(Math.random() * fallbacks.length)];
|
270 |
-
}
|
271 |
|
|
|
|
|
|
|
|
|
|
|
272 |
|
273 |
// --- Main Interaction Logic ---
|
274 |
async function handleUserMessage() {
|
275 |
const userText = userInput.value.trim();
|
|
|
276 |
if (!userText || !generator || isLoadingModel) return;
|
277 |
|
278 |
userInput.value = ''; userInput.style.height = 'auto';
|
279 |
-
updateChatUIState(true); // Disable
|
280 |
|
281 |
-
// Add user message to UI
|
282 |
displayMessage('user', userText);
|
283 |
conversationHistory.push({ role: 'user', content: userText });
|
284 |
|
285 |
updateModelStatus("AI thinking...", "loading");
|
286 |
|
287 |
-
//
|
288 |
-
const messages = buildMessages(userText); // No need to pass userText separately
|
289 |
|
290 |
try {
|
291 |
-
// Call generator with
|
292 |
const outputs = await generator(messages, {
|
293 |
max_new_tokens: 300,
|
|
|
|
|
294 |
temperature: 0.7,
|
295 |
-
// repetition_penalty: 1.1, // May not be supported by all models/tasks in message format
|
296 |
top_k: 50,
|
297 |
-
//
|
298 |
-
|
299 |
-
// streamer: streamer // Streaming could be added later if needed
|
300 |
});
|
301 |
|
302 |
-
const replyText = cleanupResponse(outputs); // Process the output
|
303 |
|
304 |
console.log("Cleaned AI Output:", replyText);
|
305 |
|
306 |
-
// Add AI response to UI
|
307 |
displayMessage('bot', replyText);
|
308 |
-
|
|
|
309 |
|
310 |
if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) {
|
311 |
speakText(replyText);
|
312 |
}
|
313 |
-
saveState();
|
314 |
|
315 |
} catch (error) {
|
316 |
console.error("AI response generation error:", error);
|
317 |
displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
|
318 |
-
|
319 |
-
// conversationHistory.pop();
|
320 |
} finally {
|
321 |
if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
|
322 |
-
updateChatUIState(generator !== null);
|
323 |
userInput.focus();
|
324 |
}
|
325 |
}
|
326 |
|
327 |
// --- Speech API Functions ---
|
328 |
-
function initializeSpeechAPI() { /* No changes */
|
329 |
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
|
330 |
}
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
|
335 |
// --- Event Listeners ---
|
336 |
sendButton.addEventListener('click', handleUserMessage);
|
|
|
3 |
<head>
|
4 |
<meta charset="UTF-8">
|
5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
|
6 |
+
<title>AI Assistant (Gemma 3 1B - Strict Load Attempt)</title>
|
7 |
<style>
|
8 |
+
/* CSS styles remain the same as the previous valid version */
|
9 |
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
|
10 |
+
:root { /* Using the neutral blue theme */
|
11 |
--primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
|
12 |
--bg-color: #f8f9fa; --user-msg-bg: #e7f5ff; --user-msg-text: #004085;
|
13 |
--bot-msg-bg: #ffffff; --bot-msg-border: #dee2e6; --system-msg-color: #6c757d;
|
|
|
60 |
.control-button { width: 40px; height: 40px; font-size: 1.2em; }
|
61 |
}
|
62 |
</style>
|
63 |
+
<!--
|
64 |
+
Import Map: Specifies how to resolve the '@xenova/transformers' module specifier.
|
65 |
+
We load the latest stable version directly from the jsDelivr CDN.
|
66 |
+
This is a standard way to load ES modules in modern browsers without a build step.
|
67 |
+
-->
|
68 |
<script type="importmap">
|
69 |
{
|
70 |
"imports": {
|
|
|
96 |
</div>
|
97 |
|
98 |
<script type="module">
|
99 |
+
// Import necessary functions from the loaded library.
|
100 |
+
// This relies on the import map defined above.
|
101 |
import { pipeline, env } from '@xenova/transformers';
|
102 |
|
103 |
+
// --- Configuration ---
|
104 |
+
const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA'; // The specific model requested
|
105 |
const TASK = 'text-generation';
|
106 |
+
const QUANTIZATION = 'q4'; // As specified in the model card example
|
107 |
|
108 |
+
// --- Environment Setup ---
|
109 |
+
// Basic setup for Transformers.js environment
|
110 |
+
env.allowLocalModels = false; // Only load from Hub/CDN
|
111 |
+
env.useBrowserCache = true; // Cache models in the browser
|
112 |
+
env.backends.onnx.executionProviders = ['webgpu', 'wasm']; // Prioritize WebGPU
|
113 |
console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
|
114 |
+
env.backends.onnx.prefer_alternative_execution_providers = true; // Try WebGPU first if available
|
115 |
|
116 |
+
// --- DOM Elements ---
|
117 |
const chatbox = document.getElementById('messages');
|
118 |
const userInput = document.getElementById('userInput');
|
119 |
const sendButton = document.getElementById('sendButton');
|
|
|
123 |
const modelStatus = document.getElementById('model-status');
|
124 |
const loadModelButton = document.getElementById('loadModelButton');
|
125 |
|
126 |
+
// --- State Management ---
|
127 |
+
let generator = null; // Holds the loaded pipeline if successful
|
128 |
+
let isLoadingModel = false; // Flag to prevent concurrent loading attempts
|
129 |
+
let conversationHistory = []; // Stores chat history in messages format
|
130 |
let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
|
131 |
+
const stateKey = 'gemma3_1b_strict_state_v1'; // Unique key for this version
|
132 |
+
const historyKey = 'gemma3_1b_strict_history_v1';
|
133 |
|
134 |
+
// --- Web Speech API ---
|
135 |
let recognition = null;
|
136 |
let synthesis = window.speechSynthesis;
|
137 |
let targetVoice = null;
|
|
|
139 |
|
140 |
// --- Initialization ---
|
141 |
window.addEventListener('load', () => {
|
142 |
+
loadState();
|
143 |
chatbotNameElement.textContent = botState.botName;
|
144 |
updateSpeakerButtonUI();
|
145 |
initializeSpeechAPI();
|
146 |
setupInputAutosize();
|
147 |
+
updateChatUIState(false); // Initially, UI is disabled
|
148 |
+
displayHistory();
|
149 |
setTimeout(loadVoices, 500);
|
150 |
+
loadModelButton.addEventListener('click', handleLoadModelClick); // Attach button listener
|
151 |
+
console.log("Attempting to use Transformers.js library loaded via import map.");
|
152 |
+
displayMessage('system', `Using Transformers.js (latest). Ready to attempt loading ${MODEL_NAME}.`, false);
|
153 |
});
|
154 |
|
155 |
// --- State Persistence ---
|
156 |
+
function loadState() {
|
157 |
const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
|
|
|
158 |
const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
|
159 |
}
|
160 |
function saveState() {
|
161 |
localStorage.setItem(stateKey, JSON.stringify(botState));
|
|
|
162 |
localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
|
163 |
}
|
164 |
function displayHistory() {
|
165 |
+
chatbox.innerHTML = ''; conversationHistory.forEach(msg => { if (msg.role === 'user' || msg.role === 'assistant') { displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false); } });
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
}
|
167 |
|
|
|
168 |
// --- UI Update Functions ---
|
169 |
function displayMessage(sender, text, animate = true, isError = false) {
|
170 |
+
const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
|
|
|
|
|
|
|
|
|
|
|
171 |
}
|
172 |
function updateModelStatus(message, type = 'info') {
|
173 |
modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
|
|
|
175 |
function updateChatUIState(isModelLoadedSuccessfully) {
|
176 |
userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; }
|
177 |
}
|
178 |
+
function updateSpeakerButtonUI() {
|
179 |
toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '🔊' : '🔇'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
|
180 |
}
|
181 |
function showSpeechStatus(message) { console.log("Speech Status:", message); }
|
|
|
184 |
// --- Model & AI Logic ---
|
185 |
async function handleLoadModelClick() {
|
186 |
if (isLoadingModel || generator) return;
|
187 |
+
isLoadingModel = true; generator = null; // Reset state
|
188 |
updateChatUIState(false);
|
189 |
+
await initializeModel(MODEL_NAME); // Attempt to load
|
190 |
isLoadingModel = false;
|
191 |
+
updateChatUIState(generator !== null); // Update UI based on outcome
|
192 |
}
|
193 |
|
194 |
+
// Initialize model exactly as per documentation example for this model
|
195 |
async function initializeModel(modelId) {
|
196 |
+
updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" }... (Strict doc example)`, 'loading');
|
197 |
+
displayMessage('system', `Attempting to load ${modelId} using documented method (dtype: ${QUANTIZATION})...`, false);
|
198 |
+
|
199 |
try {
|
200 |
+
// Directly use the pipeline function as shown in the model card
|
201 |
generator = await pipeline(TASK, modelId, {
|
202 |
+
dtype: QUANTIZATION, // Explicitly use q4
|
203 |
progress_callback: (progress) => {
|
204 |
const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
|
205 |
updateModelStatus(msg, 'loading');
|
206 |
}
|
207 |
});
|
208 |
+
|
209 |
+
// If successful (still unlikely given previous errors)
|
210 |
+
updateModelStatus(`${modelId} loaded successfully!`, 'success');
|
211 |
+
displayMessage('system', `[SUCCESS] ${modelId} loaded. The environment might be different or the library was updated.`, false);
|
212 |
|
213 |
} catch (error) {
|
214 |
+
// Catch and report the inevitable error
|
215 |
+
console.error(`Model loading failed for ${modelId} (Strict Attempt):`, error);
|
216 |
let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
|
|
|
217 |
if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
|
218 |
+
errorMsg += " Confirmed: The 'gemma3_text' model type is unsupported by this library version.";
|
219 |
} else if (error.message.includes("split is not a function")) {
|
220 |
+
errorMsg += " Confirmed: TypeError during config parsing, likely due to unsupported 'gemma3_text' type.";
|
221 |
} else {
|
222 |
+
errorMsg += " Unknown error. Check console and consider Space resource limits.";
|
223 |
}
|
224 |
updateModelStatus(errorMsg, 'error');
|
225 |
displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
|
226 |
+
generator = null; // Ensure it's null on failure
|
227 |
}
|
228 |
}
|
229 |
|
230 |
+
// Build messages array as per documentation example
|
231 |
function buildMessages(newUserMessage) {
|
232 |
+
// Start with system prompt (can be customized)
|
233 |
let messages = [{ role: "system", content: "You are a helpful assistant." }];
|
234 |
+
// Append history
|
|
|
|
|
235 |
messages = messages.concat(conversationHistory);
|
236 |
+
// Append new user message
|
|
|
237 |
messages.push({ role: "user", content: newUserMessage });
|
|
|
238 |
console.log("Input Messages:", messages);
|
239 |
return messages;
|
240 |
}
|
241 |
|
242 |
+
// Cleanup response based on messages output format
|
243 |
function cleanupResponse(output) {
|
244 |
+
// Expecting output like: [{ generated_text: [..., {role: 'assistant', content: '...'}] }]
|
245 |
+
try {
|
246 |
+
if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) {
|
247 |
+
const lastMessage = output[0].generated_text.at(-1);
|
248 |
+
if (lastMessage && lastMessage.role === 'assistant' && typeof lastMessage.content === 'string') {
|
249 |
+
let cleaned = lastMessage.content.trim();
|
250 |
+
cleaned = cleaned.replace(/<end_of_turn>/g, '').trim();
|
251 |
+
if (cleaned.length > 0) return cleaned;
|
252 |
+
}
|
253 |
+
}
|
254 |
+
// If structure is different (e.g., older library version returned flat text)
|
255 |
+
if (output && output.length > 0 && typeof output[0].generated_text === 'string') {
|
256 |
+
// Fallback for potentially different output structure
|
257 |
+
let cleaned = output[0].generated_text;
|
258 |
+
// Need to remove the prompt part if it's included
|
259 |
+
// This part is tricky without knowing the exact prompt format used internally by the pipeline for messages
|
260 |
+
// Let's just remove common artifacts for now
|
261 |
+
cleaned = cleaned.split("<start_of_turn>model").pop().trim(); // Attempt to get text after last model turn
|
262 |
+
cleaned = cleaned.replace(/<end_of_turn>/g, '').trim();
|
263 |
+
if (cleaned.length > 0) return cleaned;
|
264 |
+
}
|
265 |
|
|
|
|
|
|
|
|
|
|
|
266 |
|
267 |
+
} catch (e) { console.error("Error parsing generator output structure:", e, "Output:", output); }
|
268 |
+
console.warn("Could not reliably extract assistant response from output:", output);
|
269 |
+
const fallbacks = [ "Sorry, response format was unexpected.", "My response might be garbled.", "Error processing the AI answer." ];
|
270 |
+
return fallbacks[Math.floor(Math.random() * fallbacks.length)];
|
271 |
+
}
|
272 |
|
273 |
// --- Main Interaction Logic ---
|
274 |
async function handleUserMessage() {
|
275 |
const userText = userInput.value.trim();
|
276 |
+
// Proceed only if generator is loaded and not currently loading
|
277 |
if (!userText || !generator || isLoadingModel) return;
|
278 |
|
279 |
userInput.value = ''; userInput.style.height = 'auto';
|
280 |
+
updateChatUIState(true); // Disable input during generation
|
281 |
|
282 |
+
// Add user message to UI and history
|
283 |
displayMessage('user', userText);
|
284 |
conversationHistory.push({ role: 'user', content: userText });
|
285 |
|
286 |
updateModelStatus("AI thinking...", "loading");
|
287 |
|
288 |
+
const messages = buildMessages(userText); // Use the messages array format
|
|
|
289 |
|
290 |
try {
|
291 |
+
// Call generator with messages array
|
292 |
const outputs = await generator(messages, {
|
293 |
max_new_tokens: 300,
|
294 |
+
// Generation parameters from docs example:
|
295 |
+
do_sample: true, // Typically true for more natural chat
|
296 |
temperature: 0.7,
|
|
|
297 |
top_k: 50,
|
298 |
+
// repetition_penalty: 1.1, // Check if supported with messages format
|
299 |
+
// top_p: 0.9, // Check if supported
|
|
|
300 |
});
|
301 |
|
302 |
+
const replyText = cleanupResponse(outputs); // Process the potentially complex output
|
303 |
|
304 |
console.log("Cleaned AI Output:", replyText);
|
305 |
|
306 |
+
// Add AI response to UI and history
|
307 |
displayMessage('bot', replyText);
|
308 |
+
// Ensure the role matches what the model/library uses ('assistant' is common)
|
309 |
+
conversationHistory.push({ role: 'assistant', content: replyText });
|
310 |
|
311 |
if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) {
|
312 |
speakText(replyText);
|
313 |
}
|
314 |
+
saveState();
|
315 |
|
316 |
} catch (error) {
|
317 |
console.error("AI response generation error:", error);
|
318 |
displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
|
319 |
+
// Don't add a bot message on error, the system message covers it
|
|
|
320 |
} finally {
|
321 |
if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
|
322 |
+
updateChatUIState(generator !== null); // Re-enable UI
|
323 |
userInput.focus();
|
324 |
}
|
325 |
}
|
326 |
|
327 |
// --- Speech API Functions ---
|
328 |
+
function initializeSpeechAPI() { /* No changes needed */
|
329 |
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
|
330 |
}
|
331 |
+
function loadVoices() { /* No changes needed */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
|
332 |
+
function findAndSetVoice(voices) { /* No changes needed */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
|
333 |
+
function speakText(text) { /* No changes needed */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
|
334 |
|
335 |
// --- Event Listeners ---
|
336 |
sendButton.addEventListener('click', handleUserMessage);
|