Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -1,305 +1,122 @@ | |
| 1 | 
            -
            import os
         | 
| 2 | 
             
            import gradio as gr
         | 
| 3 | 
            -
            import requests
         | 
| 4 | 
            -
            from PIL import Image, ImageDraw, ImageFont
         | 
| 5 | 
            -
            import numpy as np
         | 
| 6 | 
             
            import cv2
         | 
|  | |
|  | |
|  | |
| 7 | 
             
            import time
         | 
| 8 |  | 
| 9 | 
            -
            #  | 
| 10 | 
            -
             | 
| 11 | 
            -
             | 
| 12 | 
            -
             | 
| 13 | 
            -
             | 
| 14 | 
            -
             | 
| 15 | 
            -
             | 
| 16 | 
            -
             | 
| 17 | 
            -
            # Model configuration
         | 
| 18 | 
            -
            MODEL_NAMES = {
         | 
| 19 | 
            -
                "nano": "yolov8n.pt",
         | 
| 20 | 
            -
                "small": "yolov8s.pt",
         | 
| 21 | 
            -
                "medium": "yolov8m.pt",
         | 
| 22 | 
            -
                "large": "yolov8l.pt",
         | 
| 23 | 
            -
                "extra large": "yolov8x.pt"
         | 
| 24 | 
            -
            }
         | 
| 25 | 
            -
             | 
| 26 | 
            -
            class YOLOModel:
         | 
| 27 | 
            -
                def __init__(self):
         | 
| 28 | 
            -
                    self.model = None
         | 
| 29 | 
            -
                    self.model_size = None
         | 
| 30 | 
            -
                    self.last_used = None
         | 
| 31 | 
            -
                    
         | 
| 32 | 
            -
                def load_model(self, model_size="medium"):
         | 
| 33 | 
            -
                    global model_loaded
         | 
| 34 | 
            -
                    try:
         | 
| 35 | 
            -
                        from ultralytics import YOLO
         | 
| 36 | 
            -
                        model_path = MODEL_NAMES.get(model_size, "yolov8m.pt")
         | 
| 37 | 
            -
                        self.model = YOLO(model_path)
         | 
| 38 | 
            -
                        self.model_size = model_size
         | 
| 39 | 
            -
                        self.last_used = time.time()
         | 
| 40 | 
            -
                        model_loaded = True
         | 
| 41 | 
            -
                        return f"YOLOv8 {model_size} model loaded successfully", True
         | 
| 42 | 
            -
                    except Exception as e:
         | 
| 43 | 
            -
                        return f"Error loading YOLO model: {str(e)}", False
         | 
| 44 | 
            -
                
         | 
| 45 | 
            -
                def get_model(self):
         | 
| 46 | 
            -
                    if self.model is None:
         | 
| 47 | 
            -
                        return None
         | 
| 48 | 
            -
                    self.last_used = time.time()
         | 
| 49 | 
            -
                    return self.model
         | 
| 50 | 
            -
             | 
| 51 | 
            -
            yolo_model = YOLOModel()
         | 
| 52 | 
            -
             | 
| 53 | 
            -
            def get_system_health():
         | 
| 54 | 
            -
                """Get system health metrics"""
         | 
| 55 | 
            -
                uptime = time.time() - startup_time
         | 
| 56 | 
            -
                return {
         | 
| 57 | 
            -
                    "uptime": f"{uptime:.1f} seconds",
         | 
| 58 | 
            -
                    "images_processed": processed_images,
         | 
| 59 | 
            -
                    "model_status": "Loaded" if yolo_model.model else "Not loaded",
         | 
| 60 | 
            -
                    "model_size": yolo_model.model_size if yolo_model.model else "None",
         | 
| 61 | 
            -
                    "last_used": f"{time.time() - yolo_model.last_used:.1f} sec ago" if yolo_model.last_used else "Never"
         | 
| 62 | 
             
                }
         | 
| 63 | 
            -
             | 
| 64 | 
            -
            def groq_chat_api(user_message, history=None):
         | 
| 65 | 
            -
                """Function to call Groq API for chat completions"""
         | 
| 66 | 
            -
                if not user_message or user_message.strip() == "":
         | 
| 67 | 
            -
                    return "Please enter a question to get a response."
         | 
| 68 | 
            -
                    
         | 
| 69 | 
             
                try:
         | 
| 70 | 
            -
                     | 
| 71 | 
            -
                     | 
| 72 | 
            -
                        # Include conversation history if provided
         | 
| 73 | 
            -
                        for user_msg, bot_response in history:
         | 
| 74 | 
            -
                            messages.insert(0, {"role": "assistant", "content": bot_response})
         | 
| 75 | 
            -
                            messages.insert(0, {"role": "user", "content": user_msg})
         | 
| 76 | 
            -
                            
         | 
| 77 | 
            -
                    payload = {
         | 
| 78 | 
            -
                        "messages": messages,
         | 
| 79 | 
            -
                        "model": "llama3-70b-8192",
         | 
| 80 | 
            -
                        "temperature": 0.7,
         | 
| 81 | 
            -
                        "max_tokens": 2048,
         | 
| 82 | 
            -
                        "top_p": 0.9,
         | 
| 83 | 
            -
                    }
         | 
| 84 | 
            -
             | 
| 85 | 
            -
                    headers = {
         | 
| 86 | 
            -
                        "Authorization": f"Bearer {API_KEY}",
         | 
| 87 | 
            -
                        "Content-Type": "application/json"
         | 
| 88 | 
            -
                    }
         | 
| 89 | 
            -
             | 
| 90 | 
            -
                    response = requests.post(API_URL, json=payload, headers=headers, timeout=30)
         | 
| 91 | 
            -
             | 
| 92 | 
            -
                    if response.status_code == 200:
         | 
| 93 | 
            -
                        return response.json()["choices"][0]["message"]["content"]
         | 
| 94 | 
            -
                    else:
         | 
| 95 | 
            -
                        return f"Error: Failed to get response (HTTP {response.status_code}). Please try again."
         | 
| 96 | 
            -
             | 
| 97 | 
            -
                except requests.Timeout:
         | 
| 98 | 
            -
                    return "Error: Request timed out. The server took too long to respond."
         | 
| 99 | 
             
                except Exception as e:
         | 
| 100 | 
            -
                    return f"Error  | 
| 101 |  | 
| 102 | 
            -
             | 
| 103 | 
            -
             | 
| 104 | 
            -
                 | 
| 105 | 
            -
             | 
| 106 | 
            -
             | 
| 107 | 
            -
             | 
| 108 | 
            -
                    
         | 
| 109 | 
            -
                    # Create a blank space at the bottom for the text
         | 
| 110 | 
            -
                    h, w = image.shape[:2]
         | 
| 111 | 
            -
                    text_area = np.zeros((150, w, 3), dtype=np.uint8)
         | 
| 112 | 
            -
                    text_area.fill(245)  # Light gray background
         | 
| 113 | 
            -
                    combined = np.vstack([image, text_area])
         | 
| 114 | 
            -
                    
         | 
| 115 | 
            -
                    # Convert back to PIL Image to use PIL's text drawing
         | 
| 116 | 
            -
                    combined_pil = Image.fromarray(cv2.cvtColor(combined, cv2.COLOR_BGR2RGB))
         | 
| 117 | 
            -
                    draw = ImageDraw.Draw(combined_pil)
         | 
| 118 | 
            -
                    
         | 
| 119 | 
            -
                    try:
         | 
| 120 | 
            -
                        # Try to use a nicer font if available
         | 
| 121 | 
            -
                        font = ImageFont.truetype("arial.ttf", 14)
         | 
| 122 | 
            -
                    except:
         | 
| 123 | 
            -
                        font = ImageFont.load_default()
         | 
| 124 | 
            -
                    
         | 
| 125 | 
            -
                    # Prepare detection text
         | 
| 126 | 
            -
                    text_lines = ["Detection Results:"]
         | 
| 127 | 
            -
                    for i, det in enumerate(detections[:5]):  # Show max 5 detections
         | 
| 128 | 
            -
                        text_lines.append(f"{i+1}. {det['class']} (Confidence: {det['confidence']:.1%})")
         | 
| 129 | 
            -
                    
         | 
| 130 | 
            -
                    if not detections:
         | 
| 131 | 
            -
                        text_lines.append("No objects detected")
         | 
| 132 | 
            -
                    
         | 
| 133 | 
            -
                    # Draw text
         | 
| 134 | 
            -
                    y_offset = h + 10
         | 
| 135 | 
            -
                    for line in text_lines:
         | 
| 136 | 
            -
                        draw.text((10, y_offset), line, font=font, fill=(0, 0, 0))
         | 
| 137 | 
            -
                        y_offset += 20
         | 
| 138 | 
            -
                    
         | 
| 139 | 
            -
                    return combined_pil
         | 
| 140 |  | 
| 141 | 
            -
                 | 
| 142 | 
            -
             | 
| 143 | 
            -
                     | 
| 144 | 
            -
             | 
| 145 | 
            -
             | 
| 146 | 
            -
                """Process image with YOLOv8 model and return annotated image with detection results"""
         | 
| 147 | 
            -
                global processed_images
         | 
| 148 |  | 
| 149 | 
            -
                 | 
| 150 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 151 |  | 
| 152 | 
            -
                 | 
| 153 | 
            -
                 | 
| 154 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 155 |  | 
| 156 | 
            -
                 | 
| 157 | 
            -
             | 
| 158 | 
            -
             | 
| 159 | 
            -
             | 
| 160 | 
            -
             | 
| 161 | 
            -
             | 
| 162 | 
            -
                    
         | 
| 163 | 
            -
                     | 
| 164 | 
            -
             | 
| 165 | 
            -
             | 
| 166 | 
            -
             | 
| 167 | 
            -
             | 
| 168 | 
            -
             | 
| 169 | 
            -
                    
         | 
| 170 | 
            -
             | 
| 171 | 
            -
                     | 
| 172 | 
            -
             | 
| 173 | 
            -
             | 
| 174 | 
            -
             | 
| 175 | 
            -
                             | 
| 176 | 
            -
             | 
| 177 | 
            -
             | 
| 178 | 
            -
             | 
| 179 | 
            -
                                "class": class_name,
         | 
| 180 | 
            -
                                "confidence": confidence,
         | 
| 181 | 
            -
                                "bbox": box.xyxy[0].tolist()
         | 
| 182 | 
            -
                            })
         | 
| 183 | 
            -
                    
         | 
| 184 | 
            -
                    # Sort detections by confidence (highest first)
         | 
| 185 | 
            -
                    detections.sort(key=lambda x: x["confidence"], reverse=True)
         | 
| 186 | 
            -
                    
         | 
| 187 | 
            -
                    # Create detection text
         | 
| 188 | 
            -
                    if not detections:
         | 
| 189 | 
            -
                        detection_text = "No objects detected with current confidence threshold"
         | 
| 190 | 
            -
                    else:
         | 
| 191 | 
            -
                        detection_text = "Detected objects:\n"
         | 
| 192 | 
            -
                        for i, det in enumerate(detections[:10]):  # Show max 10 detections
         | 
| 193 | 
            -
                            detection_text += f"{i+1}. {det['class']} (Confidence: {det['confidence']:.1%})\n"
         | 
| 194 | 
            -
                    
         | 
| 195 | 
            -
                    # Draw information on the image itself
         | 
| 196 | 
            -
                    final_image = draw_detection_info(annotated_img, detections)
         | 
| 197 | 
            -
                    
         | 
| 198 | 
            -
                    return final_image, detection_text
         | 
| 199 | 
            -
                except Exception as e:
         | 
| 200 | 
            -
                    return image, f"Error processing image: {str(e)}"
         | 
| 201 | 
            -
             | 
| 202 | 
            -
            # Theme colors
         | 
| 203 | 
            -
            theme = gr.themes.Default(
         | 
| 204 | 
            -
                primary_hue="blue",
         | 
| 205 | 
            -
                secondary_hue="gray",
         | 
| 206 | 
            -
                font=[gr.themes.GoogleFont('Montserrat'), 'Arial', 'sans-serif']
         | 
| 207 | 
            -
            )
         | 
| 208 |  | 
| 209 | 
            -
            #  | 
| 210 | 
            -
            with gr.Blocks(title="Enhanced  | 
| 211 | 
            -
                gr.Markdown("" | 
| 212 | 
            -
                 | 
| 213 | 
            -
                ### Powered by YOLOv8 Object Detection and Groq's Llama 3 LLM
         | 
| 214 | 
            -
                """)
         | 
| 215 |  | 
| 216 | 
            -
                with gr. | 
| 217 | 
            -
                    with gr. | 
| 218 | 
            -
                         | 
| 219 | 
            -
             | 
| 220 | 
            -
                             | 
| 221 | 
            -
             | 
| 222 | 
            -
                            load_model_dropdown = gr.Dropdown(
         | 
| 223 | 
            -
                                list(MODEL_NAMES.keys()),
         | 
| 224 | 
             
                                value="medium",
         | 
| 225 | 
            -
                                label=" | 
| 226 | 
             
                            )
         | 
| 227 | 
            -
                             | 
| 228 | 
            -
                                 | 
| 229 | 
            -
                                 | 
| 230 | 
            -
                            
         | 
| 231 | 
            -
             | 
| 232 | 
            -
             | 
| 233 | 
            -
             | 
| 234 | 
            -
                             | 
| 235 | 
            -
             | 
| 236 | 
            -
             | 
| 237 | 
            -
             | 
| 238 | 
            -
                         | 
| 239 | 
            -
                         | 
| 240 | 
            -
             | 
| 241 | 
            -
             | 
| 242 | 
            -
             | 
| 243 | 
            -
             | 
| 244 | 
            -
                         | 
| 245 | 
            -
                        outputs=[image_output, text_output]
         | 
| 246 | 
            -
                    )
         | 
| 247 | 
            -
                
         | 
| 248 | 
            -
                with gr.Tab("π¬ AI Chat Assistant"):
         | 
| 249 | 
            -
                    gr.Markdown("### Chat with Llama 3 (70B) via Groq")
         | 
| 250 | 
            -
                    chatbot = gr.Chatbot(height=400, label="Conversation History")
         | 
| 251 | 
            -
                    with gr.Row():
         | 
| 252 | 
            -
                        chat_input = gr.Textbox(label="Your Message", placeholder="Ask me anything...", lines=2)
         | 
| 253 | 
            -
                        chat_button = gr.Button("βοΈ Send")
         | 
| 254 | 
            -
                            
         | 
| 255 | 
            -
                    clear_button = gr.Button("π§Ή Clear Chat")
         | 
| 256 | 
            -
                    
         | 
| 257 | 
            -
                    chat_button.click(
         | 
| 258 | 
            -
                        fn=groq_chat_api,
         | 
| 259 | 
            -
                        inputs=[chat_input, chatbot],
         | 
| 260 | 
            -
                        outputs=[chat_input]
         | 
| 261 | 
            -
                    ).then(
         | 
| 262 | 
            -
                        lambda user_msg, chatbot: chatbot + [[user_msg, None]],
         | 
| 263 | 
            -
                        inputs=[chat_input, chatbot],
         | 
| 264 | 
            -
                        outputs=[chatbot],
         | 
| 265 | 
            -
                        queue=False
         | 
| 266 | 
            -
                    ).then(
         | 
| 267 | 
            -
                        lambda bot_msg, chatbot: (
         | 
| 268 | 
            -
                            None,  # Clear input
         | 
| 269 | 
            -
                            chatbot[:-1] + [(chatbot[-1][0], bot_msg)]  # Update last message
         | 
| 270 | 
            -
                        ),
         | 
| 271 | 
            -
                        inputs=[chat_input, chatbot],
         | 
| 272 | 
            -
                        outputs=[chat_input, chatbot]
         | 
| 273 | 
            -
                    )
         | 
| 274 | 
            -
                    
         | 
| 275 | 
            -
                    clear_button.click(
         | 
| 276 | 
            -
                        lambda: None,
         | 
| 277 | 
            -
                        inputs=None,
         | 
| 278 | 
            -
                        outputs=[chatbot],
         | 
| 279 | 
            -
                        queue=False
         | 
| 280 | 
            -
                    )
         | 
| 281 |  | 
| 282 | 
            -
                 | 
| 283 | 
            -
                     | 
| 284 | 
            -
                     | 
| 285 | 
            -
                     | 
| 286 | 
            -
             | 
| 287 | 
            -
                    health_button.click(
         | 
| 288 | 
            -
                        fn=get_system_health,
         | 
| 289 | 
            -
                        inputs=None,
         | 
| 290 | 
            -
                        outputs=health_output
         | 
| 291 | 
            -
                    )
         | 
| 292 | 
            -
                    
         | 
| 293 | 
            -
                    gr.Examples(
         | 
| 294 | 
            -
                        examples=[
         | 
| 295 | 
            -
                            ["What is YOLOv8 and how does it work?"],
         | 
| 296 | 
            -
                            ["How can AI be used for industrial equipment monitoring?"],
         | 
| 297 | 
            -
                            ["Explain the latest advancements in computer vision"]
         | 
| 298 | 
            -
                        ],
         | 
| 299 | 
            -
                        inputs=[chat_input],
         | 
| 300 | 
            -
                        label="Example Questions"
         | 
| 301 | 
            -
                    )
         | 
| 302 |  | 
| 303 | 
            -
            # Launch the app
         | 
| 304 | 
             
            if __name__ == "__main__":
         | 
| 305 | 
            -
                demo.launch( | 
|  | |
|  | |
| 1 | 
             
            import gradio as gr
         | 
|  | |
|  | |
|  | |
| 2 | 
             
            import cv2
         | 
| 3 | 
            +
            import numpy as np
         | 
| 4 | 
            +
            from PIL import Image, ImageDraw, ImageFont
         | 
| 5 | 
            +
            from ultralytics import YOLO
         | 
| 6 | 
             
            import time
         | 
| 7 |  | 
| 8 | 
            +
            # Initialize YOLO model
         | 
| 9 | 
            +
            def load_model(model_size="medium"):
         | 
| 10 | 
            +
                model_map = {
         | 
| 11 | 
            +
                    "nano": "yolov8n.pt",
         | 
| 12 | 
            +
                    "small": "yolov8s.pt",
         | 
| 13 | 
            +
                    "medium": "yolov8m.pt",
         | 
| 14 | 
            +
                    "large": "yolov8l.pt",
         | 
| 15 | 
            +
                    "xlarge": "yolov8x.pt"
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 16 | 
             
                }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 17 | 
             
                try:
         | 
| 18 | 
            +
                    model = YOLO(model_map[model_size])
         | 
| 19 | 
            +
                    return model, f"β
 {model_size.capitalize()} model loaded successfully"
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 20 | 
             
                except Exception as e:
         | 
| 21 | 
            +
                    return None, f"β Error loading model: {str(e)}"
         | 
| 22 |  | 
| 23 | 
            +
            # Enhanced detection function
         | 
| 24 | 
            +
            def detect_objects(image, model_size="medium", conf_threshold=0.35, iou_threshold=0.45):
         | 
| 25 | 
            +
                # Load model (load fresh each time to change model size dynamically)
         | 
| 26 | 
            +
                model, status = load_model(model_size)
         | 
| 27 | 
            +
                if model is None:
         | 
| 28 | 
            +
                    return image, status
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 29 |  | 
| 30 | 
            +
                # Convert image to numpy array if needed
         | 
| 31 | 
            +
                if isinstance(image, np.ndarray):
         | 
| 32 | 
            +
                    img = image
         | 
| 33 | 
            +
                else:
         | 
| 34 | 
            +
                    img = np.array(image)
         | 
|  | |
|  | |
| 35 |  | 
| 36 | 
            +
                # Run inference with improved parameters
         | 
| 37 | 
            +
                results = model.predict(
         | 
| 38 | 
            +
                    source=img,
         | 
| 39 | 
            +
                    conf=conf_threshold,
         | 
| 40 | 
            +
                    iou=iou_threshold,
         | 
| 41 | 
            +
                    imgsz=640,  # Increased from default 320 for better detection
         | 
| 42 | 
            +
                    augment=True,  # Enable test-time augmentation
         | 
| 43 | 
            +
                    visualize=False  # Disable visualization for speed
         | 
| 44 | 
            +
                )
         | 
| 45 |  | 
| 46 | 
            +
                # Extract detailed detection information
         | 
| 47 | 
            +
                detections = []
         | 
| 48 | 
            +
                for r in results:
         | 
| 49 | 
            +
                    boxes = r.boxes.cpu().numpy()
         | 
| 50 | 
            +
                    for i, box in enumerate(boxes):
         | 
| 51 | 
            +
                        detections.append({
         | 
| 52 | 
            +
                            "class": model.names[int(box.cls[0])],
         | 
| 53 | 
            +
                            "confidence": float(box.conf[0]),
         | 
| 54 | 
            +
                            "bbox": box.xyxy[0].tolist(),
         | 
| 55 | 
            +
                            "id": i+1
         | 
| 56 | 
            +
                        })
         | 
| 57 |  | 
| 58 | 
            +
                # Sort detections by confidence (highest first)
         | 
| 59 | 
            +
                detections.sort(key=lambda x: x["confidence"], reverse=True)
         | 
| 60 | 
            +
                
         | 
| 61 | 
            +
                # Create annotated image with enhanced visualization
         | 
| 62 | 
            +
                annotated_img = results[0].plot(
         | 
| 63 | 
            +
                    line_width=2,
         | 
| 64 | 
            +
                    font_size=14,
         | 
| 65 | 
            +
                    pil=True
         | 
| 66 | 
            +
                )
         | 
| 67 | 
            +
                
         | 
| 68 | 
            +
                # Generate detailed detection text
         | 
| 69 | 
            +
                detection_text = "Detection Results:\n"
         | 
| 70 | 
            +
                if not detections:
         | 
| 71 | 
            +
                    detection_text = "No objects detected"
         | 
| 72 | 
            +
                else:
         | 
| 73 | 
            +
                    for det in detections[:20]:  # Show top 20 detections
         | 
| 74 | 
            +
                        detection_text += (
         | 
| 75 | 
            +
                            f"{det['id']}. {det['class'].upper()} "
         | 
| 76 | 
            +
                            f"(Confidence: {det['confidence']:.1%})\n"
         | 
| 77 | 
            +
                            f"   Bounding Box: {[int(x) for x in det['bbox']]}\n"
         | 
| 78 | 
            +
                        )
         | 
| 79 | 
            +
                
         | 
| 80 | 
            +
                return annotated_img, detection_text
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 81 |  | 
| 82 | 
            +
            # Gradio interface
         | 
| 83 | 
            +
            with gr.Blocks(title="Enhanced Object Detector") as demo:
         | 
| 84 | 
            +
                gr.Markdown("# π Enhanced Object Detection with YOLOv8")
         | 
| 85 | 
            +
                gr.Markdown("### Get comprehensive detections with adjustable parameters")
         | 
|  | |
|  | |
| 86 |  | 
| 87 | 
            +
                with gr.Row():
         | 
| 88 | 
            +
                    with gr.Column():
         | 
| 89 | 
            +
                        image_input = gr.Image(label="Input Image", type="pil")
         | 
| 90 | 
            +
                        with gr.Accordion("Detection Settings", open=False):
         | 
| 91 | 
            +
                            model_size = gr.Dropdown(
         | 
| 92 | 
            +
                                ["nano", "small", "medium", "large", "xlarge"],
         | 
|  | |
|  | |
| 93 | 
             
                                value="medium",
         | 
| 94 | 
            +
                                label="Model Size"
         | 
| 95 | 
             
                            )
         | 
| 96 | 
            +
                            conf_slider = gr.Slider(
         | 
| 97 | 
            +
                                minimum=0.1, maximum=0.9, step=0.05,
         | 
| 98 | 
            +
                                value=0.35, label="Confidence Threshold"
         | 
| 99 | 
            +
                            )
         | 
| 100 | 
            +
                            iou_slider = gr.Slider(
         | 
| 101 | 
            +
                                minimum=0.1, maximum=0.9, step=0.05,
         | 
| 102 | 
            +
                                value=0.45, label="IOU Threshold"
         | 
| 103 | 
            +
                            )
         | 
| 104 | 
            +
                        detect_btn = gr.Button("π Detect Objects", variant="primary")
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                    with gr.Column():
         | 
| 107 | 
            +
                        image_output = gr.Image(label="Detected Objects", interactive=False)
         | 
| 108 | 
            +
                        text_output = gr.Textbox(
         | 
| 109 | 
            +
                            label="Detection Details",
         | 
| 110 | 
            +
                            lines=15,
         | 
| 111 | 
            +
                            max_lines=20,
         | 
| 112 | 
            +
                            interactive=False
         | 
| 113 | 
            +
                        )
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 114 |  | 
| 115 | 
            +
                detect_btn.click(
         | 
| 116 | 
            +
                    fn=detect_objects,
         | 
| 117 | 
            +
                    inputs=[image_input, model_size, conf_slider, iou_slider],
         | 
| 118 | 
            +
                    outputs=[image_output, text_output]
         | 
| 119 | 
            +
                )
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 120 |  | 
|  | |
| 121 | 
             
            if __name__ == "__main__":
         | 
| 122 | 
            +
                demo.launch()
         | 
