Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import matplotlib.pyplot as plt | |
| from PIL import Image | |
| from ultralyticsplus import YOLO | |
| import cv2 | |
| import numpy as np | |
| from transformers import pipeline | |
| import requests | |
| from io import BytesIO | |
| import os | |
| model = YOLO('best (1).pt') | |
| name = ['grenade','knife','pistol','rifle'] | |
| image_directory = "/home/user/app/image" | |
| video_directory = "/home/user/app/video" | |
| # url_example="https://drive.google.com/file/d/1bBq0bNmJ5X83tDWCzdzHSYCdg-aUL4xO/view?usp=drive_link" | |
| # url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] | |
| # r = requests.get(url_example) | |
| # im1 = Image.open(BytesIO(r.content)) | |
| # url_example="https://drive.google.com/file/d/16Z7QzvZ99fbEPj1sls_jOCJBsC0h_dYZ/view?usp=drive_link" | |
| # url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] | |
| # r = requests.get(url_example) | |
| # im2 = Image.open(BytesIO(r.content)) | |
| # url_example="https://drive.google.com/file/d/13mjTMS3eR0AKYSbV-Fpb3fTBno_T42JN/view?usp=drive_link" | |
| # url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] | |
| # r = requests.get(url_example) | |
| # im3 = Image.open(BytesIO(r.content)) | |
| # url_example="https://drive.google.com/file/d/1-XpFsa_nz506Ul6grKElVJDu_Jl3KZIF/view?usp=drive_link" | |
| # url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] | |
| # r = requests.get(url_example) | |
| # im4 = Image.open(BytesIO(r.content)) | |
| # for i, r in enumerate(results): | |
| # # Plot results image | |
| # im_bgr = r.plot() | |
| # im_rgb = im_bgr[..., ::-1] # Convert BGR to RGB | |
| def response(image): | |
| print(image) | |
| results = model(image) | |
| text = "" | |
| name_weap = "" | |
| for r in results: | |
| conf = np.array(r.boxes.conf) | |
| cls = np.array(r.boxes.cls) | |
| cls = cls.astype(int) | |
| xywh = np.array(r.boxes.xywh) | |
| xywh = xywh.astype(int) | |
| for con, cl, xy in zip(conf, cls, xywh): | |
| cone = con.astype(float) | |
| conef = round(cone,3) | |
| conef = conef * 100 | |
| text += (f"Detected {name[cl]} with confidence {round(conef,1)}% at ({xy[0]},{xy[1]})\n") | |
| if cl == 0: | |
| name_weap += name[cl] + '\n' | |
| elif cl == 1: | |
| name_weap += name[cl] + '\n' | |
| elif cl == 2: | |
| out = model2(image) | |
| name_weap += out[0]["label"] + '\n' | |
| elif cl == 3: | |
| out = model2(image) | |
| name_weap += out[0]["label"] + '\n' | |
| # im_rgb = Image.fromarray(im_rgb) | |
| return name_weap, text | |
| def response2(image: gr.Image = None,image_size: gr.Slider = 640, conf_threshold: gr.Slider = 0.3, iou_threshold: gr.Slider = 0.6): | |
| results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size) | |
| box = results[0].boxes | |
| for r in results: | |
| im_array = r.plot() | |
| im = Image.fromarray(im_array[..., ::-1]) | |
| model2 = pipeline('image-classification','Kaludi/csgo-weapon-classification') | |
| weapon_name, text_detection = response(image) | |
| # xywh = int(results.boxes.xywh) | |
| # x = xywh[0] | |
| # y = xywh[1] | |
| return im, text_detection, weapon_name | |
| inputs = [ | |
| gr.Image(type="pil", label="Input Image"), | |
| gr.Slider(minimum=320, maximum=1280, value=640, | |
| step=32, label="Image Size"), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.3, | |
| step=0.05, label="Confidence Threshold"), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.6, | |
| step=0.05, label="IOU Threshold"), | |
| ] | |
| outputs = [gr.Image( type="pil", label="Output Image"), | |
| gr.Textbox(label="Result"), | |
| gr.Textbox(label="Weapon Name") | |
| ] | |
| examples = [[os.path.join(image_directory, "th (5).jpg"),640, 0.3, 0.6], | |
| [os.path.join(image_directory, "th (8).jpg"),640, 0.3, 0.6], | |
| [os.path.join(image_directory, "th (11).jpg"),640, 0.3, 0.6], | |
| [os.path.join(image_directory, "th (3).jpg"),640, 0.3, 0.6] | |
| ] | |
| title = 'Weapon Detection Finetuned YOLOv8' | |
| description = 'Image Size: Defines the image size for inference.\nConfidence Treshold: Sets the minimum confidence threshold for detections.\nIOU Treshold: Intersection Over Union (IoU) threshold for Non-Maximum Suppression (NMS). Useful for reducing duplicates.' | |
| def pil_to_cv2(pil_image): | |
| open_cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR) | |
| return open_cv_image | |
| def process_video(video_path): | |
| cap = cv2.VideoCapture(video_path) | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| breakformat | |
| pil_img = Image.fromarray(frame[..., ::-1]) | |
| result = model.predict(source=pil_img) | |
| for r in result: | |
| im_array = r.plot() | |
| processed_frame = Image.fromarray(im_array[..., ::-1]) | |
| yield processed_frame | |
| cap.release() | |
| video_iface = gr.Interface( | |
| fn=process_video, | |
| inputs=[ | |
| gr.Video(label="Upload Video", interactive=True) | |
| ], | |
| outputs=gr.Image(type="pil",label="Result"), | |
| title=title, | |
| description="Upload video for inference." | |
| examples=[[os.path.join(video_directory, "ExampleRifle.mp4")], | |
| [os.path.join(video_directory, "Knife.mp4")], | |
| ] | |
| ) | |
| image_iface = gr.Interface(fn=response2, inputs=inputs, outputs=outputs, examples=examples, title=title, description=description) | |
| demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"]) | |
| if __name__ == '__main__': | |
| demo.launch() |