Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,46 @@
|
|
1 |
import gradio as gr
|
2 |
import PIL.Image as Image
|
3 |
from ultralytics import YOLO
|
|
|
|
|
4 |
|
5 |
-
model
|
|
|
|
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
"""Predicts objects in an image using YOLOv8m Defence model with adjustable confidence and IOU thresholds."""
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
# Custom CSS for font styling
|
25 |
css = """
|
@@ -39,13 +60,13 @@ body, .gradio-container {
|
|
39 |
}
|
40 |
"""
|
41 |
|
|
|
42 |
iface = gr.Interface(
|
43 |
fn=predict_image,
|
44 |
inputs=[
|
45 |
gr.Image(type="pil", label="Upload Image"),
|
46 |
gr.Slider(minimum=0, maximum=1, value=0.25, label="Confidence threshold"),
|
47 |
gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold"),
|
48 |
-
gr.Radio(choices=["yolov8m_defence.pt"], label="Model Name", value="yolov8m_defence.pt"),
|
49 |
],
|
50 |
outputs=gr.Image(type="pil", label="Detection Results"),
|
51 |
title="YOLOv8m Defence Object Detection",
|
@@ -56,21 +77,25 @@ iface = gr.Interface(
|
|
56 |
Vehicles (car, truck, tank, bus, van), Ships (cargo, yacht, cruise, warship, sailboat),
|
57 |
and specialized items (drone, missile).
|
58 |
|
|
|
|
|
59 |
Developed for DSTA Brainhack 2025 - TIL-AI Category (Semi-Finalist)
|
60 |
""",
|
61 |
examples=[
|
62 |
-
["examples/test1.jpg", 0.25, 0.45
|
63 |
-
["examples/test2.jpg", 0.25, 0.45
|
64 |
-
["examples/test3.jpg", 0.25, 0.45
|
65 |
-
["examples/test4.jpg", 0.25, 0.45
|
66 |
-
["examples/test5.jpg", 0.25, 0.45
|
67 |
-
["examples/test6.jpg", 0.25, 0.45
|
68 |
-
["examples/test7.jpg", 0.25, 0.45
|
69 |
-
["examples/test8.jpg", 0.25, 0.45
|
70 |
-
["examples/test9.jpg", 0.25, 0.45
|
71 |
-
["examples/test10.jpg", 0.25, 0.45
|
72 |
],
|
73 |
-
css=css
|
|
|
74 |
)
|
75 |
|
76 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
import PIL.Image as Image
|
3 |
from ultralytics import YOLO
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
|
7 |
+
# Load model once at startup
|
8 |
+
print("Loading YOLOv8m Defence model...")
|
9 |
+
model = YOLO("yolov8m_defence.pt")
|
10 |
|
11 |
+
# Set device and optimize for CPU inference
|
12 |
+
if torch.cuda.is_available():
|
13 |
+
device = 'cuda'
|
14 |
+
print("Using GPU acceleration")
|
15 |
+
else:
|
16 |
+
device = 'cpu'
|
17 |
+
print("Using CPU inference")
|
18 |
+
|
19 |
+
model.to(device)
|
20 |
+
|
21 |
+
def predict_image(img, conf_threshold, iou_threshold):
|
22 |
"""Predicts objects in an image using YOLOv8m Defence model with adjustable confidence and IOU thresholds."""
|
23 |
+
try:
|
24 |
+
results = model.predict(
|
25 |
+
source=img,
|
26 |
+
conf=conf_threshold,
|
27 |
+
iou=iou_threshold,
|
28 |
+
show_labels=True,
|
29 |
+
show_conf=True,
|
30 |
+
imgsz=640,
|
31 |
+
verbose=False,
|
32 |
+
device=device
|
33 |
+
)
|
34 |
+
|
35 |
+
for r in results:
|
36 |
+
im_array = r.plot()
|
37 |
+
im = Image.fromarray(im_array[..., ::-1])
|
38 |
+
|
39 |
+
return im
|
40 |
+
|
41 |
+
except Exception as e:
|
42 |
+
print(f"Error during prediction: {e}")
|
43 |
+
return img
|
44 |
|
45 |
# Custom CSS for font styling
|
46 |
css = """
|
|
|
60 |
}
|
61 |
"""
|
62 |
|
63 |
+
# Create interface
|
64 |
iface = gr.Interface(
|
65 |
fn=predict_image,
|
66 |
inputs=[
|
67 |
gr.Image(type="pil", label="Upload Image"),
|
68 |
gr.Slider(minimum=0, maximum=1, value=0.25, label="Confidence threshold"),
|
69 |
gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold"),
|
|
|
70 |
],
|
71 |
outputs=gr.Image(type="pil", label="Detection Results"),
|
72 |
title="YOLOv8m Defence Object Detection",
|
|
|
77 |
Vehicles (car, truck, tank, bus, van), Ships (cargo, yacht, cruise, warship, sailboat),
|
78 |
and specialized items (drone, missile).
|
79 |
|
80 |
+
**Note:** Running on the Free Tier - inference may take longer than expected.
|
81 |
+
|
82 |
Developed for DSTA Brainhack 2025 - TIL-AI Category (Semi-Finalist)
|
83 |
""",
|
84 |
examples=[
|
85 |
+
["examples/test1.jpg", 0.25, 0.45],
|
86 |
+
["examples/test2.jpg", 0.25, 0.45],
|
87 |
+
["examples/test3.jpg", 0.25, 0.45],
|
88 |
+
["examples/test4.jpg", 0.25, 0.45],
|
89 |
+
["examples/test5.jpg", 0.25, 0.45],
|
90 |
+
["examples/test6.jpg", 0.25, 0.45],
|
91 |
+
["examples/test7.jpg", 0.25, 0.45],
|
92 |
+
["examples/test8.jpg", 0.25, 0.45],
|
93 |
+
["examples/test9.jpg", 0.25, 0.45],
|
94 |
+
["examples/test10.jpg", 0.25, 0.45],
|
95 |
],
|
96 |
+
css=css,
|
97 |
+
cache_examples=True
|
98 |
)
|
99 |
|
100 |
+
if __name__ == "__main__":
|
101 |
+
iface.launch(share=True)
|