Spaces:
Sleeping
Sleeping
Commit
·
713d275
1
Parent(s):
562e2ca
pushing to hf spaces
Browse files- Dockerfile +37 -0
- LICENSE +21 -0
- README.md +17 -12
- __init__.py +0 -0
- app.py +67 -0
- config.yaml +18 -0
- model_comparison.txt +44 -0
- models/onnx/yolov8m.onnx +3 -0
- models/onnx/yolov8n.onnx +3 -0
- models/onnx/yolov8s.onnx +3 -0
- models/pt/yolov8m.pt +3 -0
- models/pt/yolov8n.pt +3 -0
- models/pt/yolov8s.pt +3 -0
- render.yaml +7 -0
- requirements.txt +9 -0
- spaces.yaml +1 -0
- src/__init__.py +0 -0
- src/__pycache__/__init__.cpython-310.pyc +0 -0
- src/__pycache__/__init__.cpython-313.pyc +0 -0
- src/__pycache__/config.cpython-310.pyc +0 -0
- src/__pycache__/detection.cpython-310.pyc +0 -0
- src/__pycache__/detection.cpython-313.pyc +0 -0
- src/config.py +23 -0
- src/detection.py +88 -0
- src/main.py +40 -0
- src/utils.py +93 -0
- weights/yolo11n.pt +3 -0
- weights/yolov8n.pt +3 -0
Dockerfile
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use slim Python base
|
2 |
+
FROM python:3.10-slim
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV PYTHONDONTWRITEBYTECODE 1
|
6 |
+
ENV PYTHONUNBUFFERED 1
|
7 |
+
|
8 |
+
# Install system dependencies
|
9 |
+
RUN apt-get update && apt-get install -y \
|
10 |
+
git ffmpeg libsm6 libxext6 libgl1 && \
|
11 |
+
apt-get clean && rm -rf /var/lib/apt/lists/*
|
12 |
+
|
13 |
+
# Set working directory
|
14 |
+
WORKDIR /app
|
15 |
+
|
16 |
+
# Copy only relevant files
|
17 |
+
COPY requirements.txt .
|
18 |
+
|
19 |
+
# Install Python dependencies first (leverages Docker cache)
|
20 |
+
RUN pip install --upgrade pip && pip install -r requirements.txt
|
21 |
+
|
22 |
+
# Now copy the rest of the code (after installing deps for better caching)
|
23 |
+
COPY . .
|
24 |
+
|
25 |
+
# ✅ Explicitly copy the pretrained model weights if you're using them
|
26 |
+
COPY models/ models/
|
27 |
+
|
28 |
+
# Streamlit environment config
|
29 |
+
ENV STREAMLIT_SERVER_HEADLESS=true
|
30 |
+
ENV STREAMLIT_SERVER_PORT=7860
|
31 |
+
ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0
|
32 |
+
|
33 |
+
# Expose the port used by Streamlit
|
34 |
+
EXPOSE 7860
|
35 |
+
|
36 |
+
# Start the app
|
37 |
+
CMD ["streamlit", "run", "app.py"]
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2025 Akshath
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,12 +1,17 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# doors-and-windows
|
2 |
+
an application that serves inference to detect doors and windows from architecture / floor plans.
|
3 |
+
|
4 |
+
step 0: learn to read architecture plans to distinguish between doors and windows. (DONE)
|
5 |
+
|
6 |
+
1. annotate data and save the labels. (DONE)
|
7 |
+
2. train the model, explain the difference between yolov8n and yolov8s, show which one is better. (DONE)
|
8 |
+
4. api development: will be a little hard. (DONE)
|
9 |
+
5. full cleanup (DONE) + streamlit setup (DONE)
|
10 |
+
6. deployment onto hf spaces + docker.
|
11 |
+
|
12 |
+
extras:
|
13 |
+
|
14 |
+
1. linting + formatting.
|
15 |
+
2. mention model stats, like inference time, precision, etc.
|
16 |
+
3. add visualization by integrating wandb / tensorboard (user choice)
|
17 |
+
4. smooth web app and create api contract.
|
__init__.py
ADDED
File without changes
|
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# streamlit_app.py
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from PIL import Image, ImageDraw
|
5 |
+
import numpy as np
|
6 |
+
import io
|
7 |
+
from src.config import AVAILABLE_MODELS, DEFAULT_MODEL
|
8 |
+
from src.detection import load_model, read_image_bytes, run_detection
|
9 |
+
|
10 |
+
st.set_page_config(page_title="Blueprint Object Detection", layout="centered")
|
11 |
+
|
12 |
+
st.title("Door & Window Detection from Blueprints")
|
13 |
+
st.markdown("Upload a floorplan and select a model to detect doors and windows.")
|
14 |
+
|
15 |
+
# Sidebar - Model selection
|
16 |
+
model_choice = st.sidebar.selectbox("Choose YOLOv8 Model", AVAILABLE_MODELS, index=AVAILABLE_MODELS.index(DEFAULT_MODEL))
|
17 |
+
|
18 |
+
# Upload image
|
19 |
+
uploaded_file = st.file_uploader("Upload a blueprint image", type=["jpg", "jpeg", "png"])
|
20 |
+
|
21 |
+
import os
|
22 |
+
os.environ["TORCH_HOME"] = "/tmp/torch_home" # Optional, to reduce torch path issues
|
23 |
+
|
24 |
+
import warnings
|
25 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
26 |
+
|
27 |
+
|
28 |
+
if uploaded_file:
|
29 |
+
# Show the uploaded image
|
30 |
+
image = Image.open(uploaded_file).convert("RGB")
|
31 |
+
st.image(image, caption="Uploaded Image", use_container_width=True)
|
32 |
+
|
33 |
+
if st.button("🔍 Run Detection"):
|
34 |
+
# Inference - Use the run_detection function instead
|
35 |
+
image_bytes = uploaded_file.getvalue()
|
36 |
+
results = run_detection(image_bytes, uploaded_file.name, model_choice)
|
37 |
+
detections = results["detections"]
|
38 |
+
|
39 |
+
# Draw bounding boxes
|
40 |
+
draw = ImageDraw.Draw(image)
|
41 |
+
for det in detections:
|
42 |
+
label = det["label"]
|
43 |
+
conf = det["confidence"]
|
44 |
+
bbox = det["bbox"]
|
45 |
+
# bbox is in format [x1, y1, x2, y2]
|
46 |
+
draw.rectangle([(bbox[0], bbox[1]), (bbox[2], bbox[3])], outline="red", width=2)
|
47 |
+
draw.text((bbox[0], bbox[1] - 10), f"{label} ({conf:.2f})", fill="red")
|
48 |
+
|
49 |
+
st.image(image, caption="Detections", use_container_width=True)
|
50 |
+
|
51 |
+
# Show detection results
|
52 |
+
st.markdown("### Detection Results")
|
53 |
+
if detections:
|
54 |
+
st.success(f"Found {len(detections)} detections!")
|
55 |
+
st.dataframe(detections)
|
56 |
+
else:
|
57 |
+
st.warning("No detections found. This could be due to:")
|
58 |
+
st.write("- Low confidence threshold")
|
59 |
+
st.write("- Model not trained on this type of image")
|
60 |
+
st.write("- Image quality or resolution issues")
|
61 |
+
st.write("- Objects too small or unclear in the image")
|
62 |
+
|
63 |
+
# Add some debugging info
|
64 |
+
st.markdown("### Debug Information")
|
65 |
+
st.write(f"Image size: {image.size}")
|
66 |
+
st.write(f"Model: {model_choice}")
|
67 |
+
st.write("Try adjusting the confidence threshold in your config file.")
|
config.yaml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model_paths:
|
2 |
+
yolov8n: models/pt/yolov8n.pt
|
3 |
+
yolov8s: models/pt/yolov8s.pt
|
4 |
+
yolov8m: models/pt/yolov8m.pt
|
5 |
+
|
6 |
+
gflops_map:
|
7 |
+
YOLOv8n: 8.1
|
8 |
+
YOLOv8s: 28.4
|
9 |
+
YOLOv8m: 78.7
|
10 |
+
|
11 |
+
dataset:
|
12 |
+
input_dir: ./images/
|
13 |
+
annos_dir: ./annotations/
|
14 |
+
output_dir: ./dataset/
|
15 |
+
data_yaml: ./dataset/data.yaml
|
16 |
+
|
17 |
+
params:
|
18 |
+
confidence_threshold: 0.1
|
model_comparison.txt
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
========================================
|
2 |
+
YOLOv8n Validation Results
|
3 |
+
========================================
|
4 |
+
Parameters: 3006038
|
5 |
+
GFLOPs: 8.1
|
6 |
+
Speed (inference): 32.65 ms/image
|
7 |
+
|
8 |
+
Class | Precision | Recall
|
9 |
+
0 | 0.945 | 0.206
|
10 |
+
1 | 1.000 | 0.000
|
11 |
+
|
12 |
+
Overall:
|
13 |
+
mAP@50: 0.231
|
14 |
+
mAP@50-95: 0.119
|
15 |
+
|
16 |
+
========================================
|
17 |
+
YOLOv8s Validation Results
|
18 |
+
========================================
|
19 |
+
Parameters: 11126358
|
20 |
+
GFLOPs: 28.4
|
21 |
+
Speed (inference): 74.78 ms/image
|
22 |
+
|
23 |
+
Class | Precision | Recall
|
24 |
+
0 | 0.682 | 0.559
|
25 |
+
1 | 0.290 | 0.267
|
26 |
+
|
27 |
+
Overall:
|
28 |
+
mAP@50: 0.411
|
29 |
+
mAP@50-95: 0.239
|
30 |
+
|
31 |
+
========================================
|
32 |
+
YOLOv8m Validation Results
|
33 |
+
========================================
|
34 |
+
Parameters: 25840918
|
35 |
+
GFLOPs: 78.7
|
36 |
+
Speed (inference): 158.06 ms/image
|
37 |
+
|
38 |
+
Class | Precision | Recall
|
39 |
+
0 | 0.807 | 0.559
|
40 |
+
1 | 0.165 | 0.433
|
41 |
+
|
42 |
+
Overall:
|
43 |
+
mAP@50: 0.406
|
44 |
+
mAP@50-95: 0.235
|
models/onnx/yolov8m.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a09df51d6f12870cc6ea53771962caaeca97f9a181c3457042b3fc177425a0b
|
3 |
+
size 103608920
|
models/onnx/yolov8n.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c2bf9d9cd5ecf5a5f476466f121181b09a15ca51ba68d373c29d8ac96e558df
|
3 |
+
size 12251856
|
models/onnx/yolov8s.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0babb5180269963e20cbae3667836a6e47cb070a4a9a76e2167c694c8c78e269
|
3 |
+
size 44733295
|
models/pt/yolov8m.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8df7f367af9dfcbf6f973c754e913d43cdeaca8003ae80beefc2d90adafc5e3d
|
3 |
+
size 52027531
|
models/pt/yolov8n.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f91d3fdd587210139267e1e2622294f6922d69a376453051bb1e17d3cf5524d2
|
3 |
+
size 6246115
|
models/pt/yolov8s.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:42353438b604798c11d162c4cef1c007635dfa9039b7c7fa01690174e72f55ee
|
3 |
+
size 22515491
|
render.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
- type: web
|
3 |
+
name: doors-and-windows
|
4 |
+
env: python
|
5 |
+
plan: free
|
6 |
+
buildCommand: pip install -r requirements.txt
|
7 |
+
startCommand: streamlit run app.py
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
ultralytics
|
3 |
+
opencv-python
|
4 |
+
opencv-python-headless
|
5 |
+
Pillow
|
6 |
+
numpy
|
7 |
+
onnx
|
8 |
+
fastapi
|
9 |
+
python-dotenv
|
spaces.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
sdk: docker
|
src/__init__.py
ADDED
File without changes
|
src/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (146 Bytes). View file
|
|
src/__pycache__/__init__.cpython-313.pyc
ADDED
Binary file (150 Bytes). View file
|
|
src/__pycache__/config.cpython-310.pyc
ADDED
Binary file (806 Bytes). View file
|
|
src/__pycache__/detection.cpython-310.pyc
ADDED
Binary file (2.68 kB). View file
|
|
src/__pycache__/detection.cpython-313.pyc
ADDED
Binary file (3.8 kB). View file
|
|
src/config.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import yaml
|
3 |
+
|
4 |
+
from pathlib import Path
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
# Load YAML config
|
9 |
+
CONFIG_PATH = Path(__file__).resolve().parents[1] / "config.yaml"
|
10 |
+
with open(CONFIG_PATH, "r") as f:
|
11 |
+
config = yaml.safe_load(f)
|
12 |
+
|
13 |
+
# YOLO model paths
|
14 |
+
MODEL_PATHS = config.get("model_paths", {})
|
15 |
+
DEFAULT_MODEL = next(iter(MODEL_PATHS), "yolov8n") # Use first model if none explicitly set
|
16 |
+
AVAILABLE_MODELS = list(MODEL_PATHS.keys())
|
17 |
+
|
18 |
+
CONFIDENCE_THRESHOLD = config.get("params", {}).get("confidence_threshold")
|
19 |
+
DATASET_CONFIG = config.get("palcode-ai-1", {})
|
20 |
+
ROBOFLOW_API = os.getenv("ROBOFLOW_API")
|
21 |
+
WORKSPACE_NAME = os.getenv("WORKSPACE_NAME")
|
22 |
+
PROJECT_NAME = os.getenv("PROJECT_NAME")
|
23 |
+
DEBUG = True
|
src/detection.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app/detection.py
|
2 |
+
|
3 |
+
from ultralytics import YOLO
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
from io import BytesIO
|
7 |
+
from PIL import Image
|
8 |
+
from typing import List, Dict, Any
|
9 |
+
from .config import MODEL_PATHS, CONFIDENCE_THRESHOLD
|
10 |
+
|
11 |
+
# Global model cache
|
12 |
+
MODEL_CACHE = {}
|
13 |
+
|
14 |
+
def load_model(model_name: str) -> YOLO:
|
15 |
+
"""
|
16 |
+
Load and cache a YOLO model.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
model_name (str): Key name of the model.
|
20 |
+
|
21 |
+
Returns:
|
22 |
+
YOLO: Loaded YOLO model instance.
|
23 |
+
"""
|
24 |
+
if model_name not in MODEL_CACHE:
|
25 |
+
model_path = MODEL_PATHS.get(model_name)
|
26 |
+
if model_path is None:
|
27 |
+
raise ValueError(f"Model path for {model_name} not found.")
|
28 |
+
MODEL_CACHE[model_name] = YOLO(model_path)
|
29 |
+
|
30 |
+
return MODEL_CACHE[model_name]
|
31 |
+
|
32 |
+
def read_image_bytes(image_bytes: bytes) -> np.ndarray:
|
33 |
+
"""
|
34 |
+
Converts uploaded image bytes to a NumPy array.
|
35 |
+
|
36 |
+
Args:
|
37 |
+
image_bytes (bytes): Image file bytes.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
np.ndarray: OpenCV-compatible image array (BGR).
|
41 |
+
"""
|
42 |
+
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
43 |
+
return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
44 |
+
|
45 |
+
def run_detection(image_bytes: bytes, filename: str, model_name: str, conf_threshold: float = None) -> Dict[str, Any]:
|
46 |
+
"""
|
47 |
+
Perform detection using a YOLO model.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
image_bytes (bytes): Image file bytes.
|
51 |
+
filename (str): Name of the uploaded file.
|
52 |
+
model_name (str): Name of the model to use.
|
53 |
+
conf_threshold (float, optional): Confidence threshold. Uses config default if None.
|
54 |
+
|
55 |
+
Returns:
|
56 |
+
Dict[str, Any]: Detection results with filename and detections list.
|
57 |
+
"""
|
58 |
+
model = load_model(model_name)
|
59 |
+
image = read_image_bytes(image_bytes)
|
60 |
+
|
61 |
+
# Use provided threshold or fall back to config
|
62 |
+
threshold = conf_threshold if conf_threshold is not None else CONFIDENCE_THRESHOLD
|
63 |
+
|
64 |
+
# Run inference with lower confidence to see if there are any detections
|
65 |
+
results = model.predict(image, conf=threshold, verbose=True)
|
66 |
+
|
67 |
+
detections = []
|
68 |
+
for result in results:
|
69 |
+
if result.boxes is not None and len(result.boxes) > 0:
|
70 |
+
num_boxes = len(result.boxes)
|
71 |
+
for i in range(num_boxes):
|
72 |
+
cls_id = int(result.boxes.cls[i].item())
|
73 |
+
conf = float(result.boxes.conf[i].item())
|
74 |
+
bbox = result.boxes.xyxy[i].tolist()
|
75 |
+
|
76 |
+
detections.append({
|
77 |
+
"label": model.names[cls_id],
|
78 |
+
"confidence": round(conf, 3),
|
79 |
+
"bbox": [round(coord, 2) for coord in bbox]
|
80 |
+
})
|
81 |
+
|
82 |
+
return {
|
83 |
+
"filename": filename,
|
84 |
+
"detections": detections,
|
85 |
+
"total_detections": len(detections),
|
86 |
+
"confidence_threshold": threshold,
|
87 |
+
"image_shape": image.shape
|
88 |
+
}
|
src/main.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app/main.py
|
2 |
+
|
3 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException, Query
|
4 |
+
from fastapi.responses import JSONResponse
|
5 |
+
from detection import run_detection
|
6 |
+
from .config import AVAILABLE_MODELS, DEFAULT_MODEL
|
7 |
+
|
8 |
+
app = FastAPI(title="Blueprint Object Detector", version="1.0")
|
9 |
+
|
10 |
+
@app.post("/detect")
|
11 |
+
async def detect(
|
12 |
+
image: UploadFile = File(...),
|
13 |
+
model_name: str = Query(DEFAULT_MODEL, description="Choose model variant")
|
14 |
+
):
|
15 |
+
"""
|
16 |
+
Endpoint to detect doors/windows in uploaded blueprint image.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
image (UploadFile): Uploaded blueprint image.
|
20 |
+
model_name (str): YOLO model name (e.g., 'yolov8n', 'yolov8s').
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
JSONResponse: Formatted detection output.
|
24 |
+
"""
|
25 |
+
if model_name not in AVAILABLE_MODELS:
|
26 |
+
raise HTTPException(
|
27 |
+
status_code=400,
|
28 |
+
detail=f"Invalid model name. Available: {AVAILABLE_MODELS}"
|
29 |
+
)
|
30 |
+
|
31 |
+
if not image.content_type.startswith("image/"):
|
32 |
+
raise HTTPException(status_code=400, detail="Invalid file type. Upload an image.")
|
33 |
+
|
34 |
+
image_bytes = await image.read()
|
35 |
+
|
36 |
+
try:
|
37 |
+
result = run_detection(image_bytes, filename=image.filename, model_name=model_name)
|
38 |
+
return JSONResponse(content=result)
|
39 |
+
except Exception as e:
|
40 |
+
raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
|
src/utils.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import random
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Union, Dict
|
6 |
+
from ultralytics import YOLO
|
7 |
+
import onnx
|
8 |
+
from .config import DATASET_CONFIG, MODEL_PATHS, WORKSPACE_NAME, PROJECT_NAME, ROBOFLOW_API
|
9 |
+
|
10 |
+
random.seed(42)
|
11 |
+
|
12 |
+
def create_dataset(val_split: float = 0.2):
|
13 |
+
input_dir = Path(DATASET_CONFIG.get("input_dir", "images/"))
|
14 |
+
annos_dir = Path(DATASET_CONFIG.get("annos_dir", "annotations/"))
|
15 |
+
output_dir = Path(DATASET_CONFIG.get("output_dir", "dataset/"))
|
16 |
+
|
17 |
+
for split in ["train", "val"]:
|
18 |
+
(output_dir / split / "images").mkdir(parents=True, exist_ok=True)
|
19 |
+
(output_dir / split / "labels").mkdir(parents=True, exist_ok=True)
|
20 |
+
|
21 |
+
image_files = list(input_dir.glob("*.jpg")) + list(input_dir.glob("*.png"))
|
22 |
+
random.shuffle(image_files)
|
23 |
+
|
24 |
+
split_index = int(len(image_files) * (1 - val_split))
|
25 |
+
train_images = image_files[:split_index]
|
26 |
+
val_images = image_files[split_index:]
|
27 |
+
|
28 |
+
def process(images, split):
|
29 |
+
for img_path in images:
|
30 |
+
label_path = annos_dir / (img_path.stem + ".txt")
|
31 |
+
if not label_path.exists():
|
32 |
+
print(f"[WARNING] Missing label for: {img_path.name}")
|
33 |
+
continue
|
34 |
+
shutil.copy2(img_path, output_dir / split / "images" / img_path.name)
|
35 |
+
shutil.copy2(label_path, output_dir / split / "labels" / label_path.name)
|
36 |
+
|
37 |
+
process(train_images, "train")
|
38 |
+
process(val_images, "val")
|
39 |
+
|
40 |
+
print(f"[DONE] Dataset created at: {output_dir}")
|
41 |
+
print(f" - Training images: {len(train_images)}")
|
42 |
+
print(f" - Validation images: {len(val_images)}")
|
43 |
+
|
44 |
+
|
45 |
+
def log_model_comparisons(gflops_map: Dict[str, float], log_file: str = "model_comparison_log.txt"):
|
46 |
+
data_yaml = DATASET_CONFIG.get("data_yaml", "./dataset/data.yaml")
|
47 |
+
|
48 |
+
with open(log_file, "w") as f:
|
49 |
+
for name, path in MODEL_PATHS.items():
|
50 |
+
print(f"Validating model: {name}, path: {path}")
|
51 |
+
model = YOLO(path)
|
52 |
+
metrics = model.val(data=data_yaml)
|
53 |
+
|
54 |
+
f.write(f"\n{'='*40}\n{name} Validation Results\n{'='*40}\n")
|
55 |
+
f.write(f"Parameters: {sum(p.numel() for p in model.model.parameters())}\n")
|
56 |
+
f.write(f"GFLOPs: {gflops_map.get(name, 'N/A')}\n")
|
57 |
+
f.write(f"Speed (inference): {metrics.speed['inference']:.2f} ms/image\n\n")
|
58 |
+
|
59 |
+
f.write("Class | Precision | Recall\n")
|
60 |
+
for i, cls in enumerate(model.model.names):
|
61 |
+
prec = metrics.box.p[i].item()
|
62 |
+
rec = metrics.box.r[i].item()
|
63 |
+
f.write(f"{cls:<10} | {prec:.3f} | {rec:.3f}\n")
|
64 |
+
|
65 |
+
f.write("\nOverall:\n")
|
66 |
+
f.write(f"mAP@50: {metrics.box.map50:.3f}\n")
|
67 |
+
f.write(f"mAP@50-95: {metrics.box.map:.3f}\n")
|
68 |
+
|
69 |
+
|
70 |
+
def export_models_to_weights_dir(output_dir: str = "weights"):
|
71 |
+
os.makedirs(output_dir, exist_ok=True)
|
72 |
+
|
73 |
+
for model_name, model_path in MODEL_PATHS.items():
|
74 |
+
print(f"\nExporting {model_name} from {model_path}...")
|
75 |
+
|
76 |
+
model = YOLO(model_path)
|
77 |
+
|
78 |
+
# Copy .pt file
|
79 |
+
pt_dest = os.path.join(output_dir, f"{model_name.lower()}.pt")
|
80 |
+
shutil.copy(model_path, pt_dest)
|
81 |
+
print(f"Saved: {pt_dest}")
|
82 |
+
|
83 |
+
# Export to ONNX format
|
84 |
+
onnx_path = model.export(format="onnx")
|
85 |
+
exported_onnx_path = os.path.join(output_dir, f"{model_name.lower()}.onnx")
|
86 |
+
shutil.move(onnx_path, exported_onnx_path)
|
87 |
+
print(f"Saved: {exported_onnx_path}")
|
88 |
+
|
89 |
+
def init_roboflow():
|
90 |
+
from roboflow import Roboflow
|
91 |
+
rf = Roboflow(api_key=ROBOFLOW_API)
|
92 |
+
project = rf.workspace(WORKSPACE_NAME).project(PROJECT_NAME)
|
93 |
+
dataset = project.version(1).download("yolov5")
|
weights/yolo11n.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ebbc80d4a7680d14987a577cd21342b65ecfd94632bd9a8da63ae6417644ee1
|
3 |
+
size 5613764
|
weights/yolov8n.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f59b3d833e2ff32e194b5bb8e08d211dc7c5bdf144b90d2c8412c47ccfc83b36
|
3 |
+
size 6549796
|