Spaces:
Running
Running
| <html lang="en"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <title>Ask Questions to the Model</title> | |
| <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script> | |
| </head> | |
| <body> | |
| <h1>Ask Questions to the Model</h1> | |
| <a href="entrenament-pdf.html" style="margin:5px;padding: 5px; border:1px solid green">Entrenament PDF</a> | |
| <a href="preguntar-pdf.html" style="margin:5px;padding: 5px; border:1px solid green">Preguntar PDF</a> | |
| <br><br> | |
| <input type="text" id="question" placeholder="Type your question here"> | |
| <button id="askQuestion">Ask</button> | |
| <pre id="response"></pre> | |
| <script> | |
| async function loadModel() { | |
| try { | |
| console.log('Checking available models in IndexedDB...'); | |
| const model = await tf.loadLayersModel('indexeddb://pdf-trained-model'); | |
| console.log('Model loaded successfully from IndexedDB.'); | |
| return model; | |
| } catch (err) { | |
| document.getElementById('response').textContent = 'Model not found. Train it first!'; | |
| console.error('Error loading model:', err); | |
| throw err; | |
| } | |
| } | |
| function tokenizeQuestion(question, tokenizer) { | |
| const tokens = question.split(/\s+/); | |
| console.log('Tokens from question:', tokens); | |
| return tokens.map(token => tokenizer[token] || 0); | |
| } | |
| document.getElementById('askQuestion').addEventListener('click', async () => { | |
| const question = document.getElementById('question').value; | |
| const responseElement = document.getElementById('response'); | |
| if (!question) { | |
| responseElement.textContent = 'Please enter a question.'; | |
| return; | |
| } | |
| responseElement.textContent = 'Loading model...'; | |
| try { | |
| const model = await loadModel(); | |
| // Tokenizer setup (replace with actual tokenizer logic from training) | |
| const tokenizer = { "example": 1, "question": 2 }; // Placeholder for actual tokenizer | |
| const input = tokenizeQuestion(question, tokenizer); | |
| if (input.length === 0) { | |
| responseElement.textContent = 'Error: Question could not be tokenized.'; | |
| return; | |
| } | |
| const paddedInput = tf.pad( | |
| tf.tensor2d([input], [1, input.length]), | |
| [[0, 0], [0, Math.max(0, 10 - input.length)]], | |
| 'constant' | |
| ); | |
| console.log('Padded input for prediction:', paddedInput.arraySync()); | |
| try { | |
| const prediction = model.predict(paddedInput); | |
| const predictionArray = await prediction.array(); | |
| console.log('Prediction result:', predictionArray); | |
| responseElement.textContent = `Model response: ${JSON.stringify(predictionArray)}`; | |
| } catch (err) { | |
| console.error('Prediction error:', err); | |
| responseElement.textContent = 'Error during prediction.'; | |
| } | |
| } catch (err) { | |
| responseElement.textContent = 'Error: Could not load model or process question.'; | |
| console.error('Error in processing question:', err); | |
| } | |
| }); | |
| </script> | |
| </body> | |
| </html> | |