Spaces:
Running
Running
File size: 2,986 Bytes
cca4a24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>SmolLM Test</title>
<script type="module">
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
window.transformers = { pipeline, env };
window.transformersLoaded = true;
console.log('β
Transformers.js loaded');
</script>
</head>
<body>
<h1>SmolLM Model Test</h1>
<div id="status">Loading...</div>
<button onclick="testSmolLM()">Test SmolLM Models</button>
<div id="result"></div>
<script>
async function testSmolLM() {
const statusDiv = document.getElementById('status');
const resultDiv = document.getElementById('result');
statusDiv.textContent = 'Testing SmolLM models...';
resultDiv.innerHTML = '';
const modelsToTest = [
'onnx-community/Phi-3.5-mini-instruct-onnx-web',
'Xenova/SmolLM-135M',
'Xenova/SmolLM-360M',
'HuggingFaceTB/SmolLM2-135M-Instruct',
'HuggingFaceTB/SmolLM-135M'
];
for (const modelName of modelsToTest) {
try {
resultDiv.innerHTML += `<p>π Testing ${modelName}...</p>`;
console.log(`Testing ${modelName}`);
const { pipeline } = window.transformers;
const generator = await pipeline('text-generation', modelName);
const result = await generator('Hello, my name is', {
max_new_tokens: 20,
temperature: 0.7,
do_sample: true,
return_full_text: false
});
resultDiv.innerHTML += `<p>β
${modelName} works!</p>`;
resultDiv.innerHTML += `<p>Generated: "${result[0].generated_text}"</p><hr>`;
statusDiv.textContent = `β
Found working model: ${modelName}`;
break;
} catch (error) {
console.error(`${modelName} failed:`, error);
resultDiv.innerHTML += `<p>β ${modelName} failed: ${error.message}</p>`;
}
}
if (!resultDiv.innerHTML.includes('β
')) {
statusDiv.textContent = 'β No SmolLM models work with current setup';
resultDiv.innerHTML += '<p><strong>Recommendation:</strong> Use DistilGPT-2 or GPT-2 as fallback</p>';
}
}
// Auto-test when page loads
document.addEventListener('DOMContentLoaded', () => {
setTimeout(testSmolLM, 2000);
});
</script>
</body>
</html>
|