trial / index.html
Maikuuuu's picture
Update index.html
9d34e4e verified
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Guitar Chord Detection</title>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]/dist/tf.min.js"></script>
<style>
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
margin: 20px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
min-height: 100vh;
}
.container {
max-width: 800px;
margin: 0 auto;
background: rgba(255, 255, 255, 0.1);
padding: 30px;
border-radius: 15px;
backdrop-filter: blur(10px);
box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.37);
}
h2 {
text-align: center;
margin-bottom: 30px;
font-size: 2.5em;
text-shadow: 2px 2px 4px rgba(0,0,0,0.5);
}
.upload-section {
margin-bottom: 30px;
text-align: center;
}
#imageUpload {
padding: 10px;
border-radius: 8px;
border: none;
background: rgba(255, 255, 255, 0.2);
color: white;
font-size: 16px;
margin-bottom: 20px;
}
#imageUpload::file-selector-button {
background: linear-gradient(45deg, #ff6b6b, #ee5a24);
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
cursor: pointer;
margin-right: 10px;
}
#canvasWrapper {
position: relative;
display: inline-block;
margin: 20px auto;
border-radius: 10px;
overflow: hidden;
box-shadow: 0 4px 15px rgba(0,0,0,0.3);
}
#displayCanvas {
border-radius: 10px;
max-width: 100%;
height: auto;
}
#overlayCanvas {
position: absolute;
left: 0;
top: 0;
pointer-events: none;
border-radius: 10px;
}
#result {
white-space: pre-wrap;
margin-top: 20px;
background: rgba(0, 0, 0, 0.3);
padding: 20px;
border-radius: 10px;
font-family: 'Courier New', monospace;
min-height: 50px;
border: 2px solid rgba(255, 255, 255, 0.2);
}
.status-loading { color: #feca57; }
.status-success { color: #48cab3; }
.status-error { color: #ff6b6b; }
.status-info { color: #74b9ff; }
.demo-section {
margin-top: 30px;
padding: 20px;
background: rgba(255, 255, 255, 0.1);
border-radius: 10px;
}
.demo-button {
background: linear-gradient(45deg, #48cab3, #18dcff);
color: white;
border: none;
padding: 12px 24px;
border-radius: 8px;
cursor: pointer;
font-size: 16px;
margin: 5px;
transition: transform 0.2s;
}
.demo-button:hover {
transform: translateY(-2px);
}
</style>
</head>
<body>
<div class="container">
<h2>🎸 Guitar Chord Detection Demo</h2>
<div class="upload-section">
<input type="file" id="imageUpload" accept="image/*" />
<div id="canvasWrapper">
<canvas id="displayCanvas" width="640" height="640"></canvas>
<canvas id="overlayCanvas" width="640" height="640"></canvas>
</div>
</div>
<div id="result" class="status-info">🎡 Ready to detect guitar chords! Upload an image to get started...</div>
<div class="demo-section">
<h3>Instructions:</h3>
<p>Upload an image containing guitar chord hand positions to detect and identify the chords automatically.</p>
<button class="demo-button" onclick="showModelInfo()">πŸ“Š Model Info</button>
<button class="demo-button" onclick="clearResults()">🧹 Clear</button>
</div>
</div>
<script>
let model = null;
let currentImage = null;
const chordLabels = [
"A MAJOR CHORD", // 0
"A minor chord", // 1
"B MINOR CHORD", // 2
"CMAJOR CHORD", // 3
"D MAJOR CHORD", // 4
"D MINOR CHORD", // 5
"E MAJOR CHORD", // 6
"E MINOR CHORD", // 7
"F MAJOR CHORD", // 8
"F SHARP MINOR CHORD", // 9
"G MAJOR CHORD" // 10
];
// Updated model loading function for sharded model format
async function loadModel() {
const resultEl = document.getElementById('result');
try {
resultEl.innerHTML = '<span class="status-loading">πŸ”„ Loading TensorFlow.js model...</span>';
// Your model is in the same directory as the HTML file
console.log('Attempting to load model from: ./model.json');
model = await tf.loadGraphModel('./model.json');
resultEl.innerHTML = '<span class="status-success">βœ… Model loaded successfully! Ready for chord detection.</span>';
console.log('Model loaded successfully:', model);
console.log('Model input shape:', model.inputs[0].shape);
console.log('Model output shape:', model.outputs[0].shape);
return true;
} catch (error) {
resultEl.innerHTML = `
<span class="status-error">❌ Model loading failed</span>
<br><br>
<strong>Troubleshooting steps:</strong>
<br>β€’ Ensure you're running this from a web server (not file://)
<br>β€’ All files (model.json, group1-shard1of3.bin, etc.) must be in the same directory
<br>β€’ Check browser console for detailed error messages
<br>β€’ Try a simple HTTP server: <code>python -m http.server 8000</code>
<br><br>
<strong>Error details:</strong> ${error.message}
<br><br>
<strong>Files detected in your folder:</strong>
<br>βœ“ model.json
<br>βœ“ group1-shard1of3.bin
<br>βœ“ group1-shard2of3.bin
<br>βœ“ group1-shard3of3.bin
`;
console.error('Model load error:', error);
console.error('Make sure you are serving the files via HTTP, not opening the HTML file directly');
return false;
}
}
function preprocessImage(image) {
return tf.tidy(() => {
// Convert image to tensor and normalize
let tensor = tf.browser.fromPixels(image)
.resizeBilinear([640, 640]) // Resize to model input size
.toFloat()
.div(255.0) // Normalize to [0,1]
.expandDims(0); // Add batch dimension [1, 640, 640, 3]
return tensor;
});
}
function parseDetections(outputTensor, threshold = 0.3) {
const rawData = outputTensor.dataSync();
console.log('Raw model output shape:', outputTensor.shape);
console.log('Raw data length:', rawData.length);
const detections = [];
// Handle different YOLO output formats
if (outputTensor.shape.length === 3) {
// Format: [1, num_detections, 5 + num_classes]
const [batch, numDetections, features] = outputTensor.shape;
const numClasses = features - 5; // x, y, w, h, confidence + classes
for (let i = 0; i < numDetections; i++) {
const detection = [];
for (let j = 0; j < features; j++) {
detection.push(rawData[i * features + j]);
}
const [x, y, w, h, objectness, ...classProbs] = detection;
const maxProb = Math.max(...classProbs);
const classIndex = classProbs.indexOf(maxProb);
const finalConf = objectness * maxProb;
if (finalConf > threshold) {
// Ensure class index is within bounds
if (classIndex >= 0 && classIndex < chordLabels.length) {
detections.push({
chord: chordLabels[classIndex],
confidence: finalConf,
classIndex: classIndex, // Keep for debugging
bbox: {
x_center: x * 640,
y_center: y * 640,
width: w * 640,
height: h * 640
}
});
} else {
console.warn(`Invalid class index ${classIndex}, max allowed: ${chordLabels.length - 1}`);
}
}
}
} else if (outputTensor.shape.length === 2) {
// Format: [num_detections, 5 + num_classes]
const [numDetections, features] = outputTensor.shape;
const numClasses = Math.min(features - 5, chordLabels.length);
for (let i = 0; i < numDetections; i++) {
const startIdx = i * features;
const [x, y, w, h, objectness] = rawData.slice(startIdx, startIdx + 5);
const classProbs = rawData.slice(startIdx + 5, startIdx + 5 + numClasses);
const maxProb = Math.max(...classProbs);
const classIndex = classProbs.indexOf(maxProb);
const finalConf = objectness * maxProb;
if (finalConf > threshold) {
// Ensure class index is within bounds
if (classIndex >= 0 && classIndex < chordLabels.length) {
detections.push({
chord: chordLabels[classIndex],
confidence: finalConf,
classIndex: classIndex, // Keep for debugging
bbox: {
x_center: x * 640,
y_center: y * 640,
width: w * 640,
height: h * 640
}
});
} else {
console.warn(`Invalid class index ${classIndex}, max allowed: ${chordLabels.length - 1}`);
}
}
}
}
console.log(`Found ${detections.length} detections above threshold ${threshold}`);
return detections;
}
function drawBoxes(canvas, detections) {
const ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.lineWidth = 3;
ctx.font = 'bold 16px Arial';
ctx.textBaseline = 'top';
const colors = ['#ff6b6b', '#48cab3', '#feca57', '#74b9ff', '#a29bfe', '#6c5ce7'];
detections.forEach((det, index) => {
const { x_center, y_center, width, height } = det.bbox;
const x = x_center - width / 2;
const y = y_center - height / 2;
const color = colors[index % colors.length];
// Draw bounding box
ctx.strokeStyle = color;
ctx.strokeRect(x, y, width, height);
// Draw label background
const text = `${det.chord} (${(det.confidence * 100).toFixed(1)}%)`;
const textMetrics = ctx.measureText(text);
const textWidth = textMetrics.width;
const textHeight = 22;
ctx.fillStyle = color;
ctx.fillRect(x, y - textHeight, textWidth + 10, textHeight);
// Draw label text
ctx.fillStyle = 'white';
ctx.fillText(text, x + 5, y - textHeight + 3);
});
}
async function handleImageUpload(event) {
const file = event.target.files[0];
if (!file) return;
const image = new Image();
const displayCanvas = document.getElementById('displayCanvas');
const overlayCanvas = document.getElementById('overlayCanvas');
const displayCtx = displayCanvas.getContext('2d');
const resultEl = document.getElementById('result');
image.onload = async () => {
// Store current image
currentImage = image;
// Draw image on display canvas
displayCtx.drawImage(image, 0, 0, 640, 640);
// Update overlay canvas size to match
overlayCanvas.width = displayCanvas.width;
overlayCanvas.height = displayCanvas.height;
resultEl.innerHTML = '<span class="status-loading">πŸ”„ Processing image...</span>';
// Load model if not already loaded
if (!model) {
const loadSuccess = await loadModel();
if (!loadSuccess) return;
}
try {
// Preprocess image for model input
console.log('Preprocessing image...');
const inputTensor = preprocessImage(image);
console.log('Input tensor shape:', inputTensor.shape);
// Run model inference
console.log('Running model inference...');
const startTime = performance.now();
const outputTensor = await model.executeAsync(inputTensor);
const inferenceTime = performance.now() - startTime;
console.log(`Inference completed in ${inferenceTime.toFixed(2)}ms`);
// Handle single or multiple outputs
const detectionTensor = Array.isArray(outputTensor) ? outputTensor[0] : outputTensor;
console.log('Detection tensor shape:', detectionTensor.shape);
// Parse model predictions
const detections = parseDetections(detectionTensor, 0.25); // Lower threshold for better detection
// Clean up tensors to prevent memory leaks
tf.dispose([inputTensor]);
if (Array.isArray(outputTensor)) {
tf.dispose(outputTensor);
} else {
tf.dispose(outputTensor);
}
// Display results
if (detections.length > 0) {
// Sort detections by confidence
detections.sort((a, b) => b.confidence - a.confidence);
const resultText = detections.map((det, i) =>
`🎡 ${i + 1}. ${det.chord} - ${(det.confidence * 100).toFixed(1)}% confidence`
).join('\n');
resultEl.innerHTML = `
<span class="status-success">βœ… Detected ${detections.length} chord(s) in ${inferenceTime.toFixed(0)}ms:</span>
\n\n${resultText}
`;
// Draw bounding boxes on overlay
drawBoxes(overlayCanvas, detections);
console.log('Detections:', detections);
} else {
resultEl.innerHTML = `
<span class="status-info">πŸ” No chords detected above 25% confidence threshold.</span>
<br><br>Tips:
<br>β€’ Ensure the image shows clear guitar chord hand positions
<br>β€’ Try images with better lighting and focus
<br>β€’ Make sure fingers and fret positions are visible
`;
}
} catch (error) {
resultEl.innerHTML = `<span class="status-error">❌ Detection failed: ${error.message}</span>`;
console.error('Detection error:', error);
}
};
image.src = URL.createObjectURL(file);
}
// Demo functions removed - using actual model only
function showModelInfo() {
const resultEl = document.getElementById('result');
resultEl.innerHTML = `
<span class="status-info">πŸ“Š Guitar Chord Detection Model</span>
<strong>Input Requirements:</strong> 640x640 RGB images
<strong>Supported Chords:</strong> ${chordLabels.length} chord types
${chordLabels.map((chord, i) => ` ${i + 1}. ${chord}`).join('\n')}
<strong>Detection Threshold:</strong> 25% confidence minimum
<strong>Model Format:</strong> TensorFlow.js Graph Model
<strong>Architecture:</strong> YOLO-based object detection
`;
}
function clearResults() {
const resultEl = document.getElementById('result');
const overlayCanvas = document.getElementById('overlayCanvas');
const displayCanvas = document.getElementById('displayCanvas');
resultEl.innerHTML = '<span class="status-info">🎡 Ready to detect guitar chords! Upload an image to get started...</span>';
overlayCanvas.getContext('2d').clearRect(0, 0, overlayCanvas.width, overlayCanvas.height);
displayCanvas.getContext('2d').clearRect(0, 0, displayCanvas.width, displayCanvas.height);
}
// Event listeners
document.getElementById('imageUpload').addEventListener('change', handleImageUpload);
// Initialize TensorFlow.js and attempt to load model
console.log('TensorFlow.js version:', tf.version.tfjs);
// Automatically try to load the model when page loads
window.addEventListener('load', async () => {
console.log('Page loaded, attempting to load model...');
await loadModel();
});
</script>
</body>
</html>