cvdetectors commited on
Commit
61dafbf
·
verified ·
1 Parent(s): a4a99d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -85
app.py CHANGED
@@ -1,98 +1,54 @@
1
- import os
2
- import tempfile
3
- import cv2
4
-
5
- from PIL import Image, ImageDraw
6
  import gradio as gr
 
7
  from ultralytics import YOLO
8
  from supervision import Detections
 
9
 
 
 
10
 
11
- # Download and load the YOLOv8 face detection model
12
- def load_model():
13
- model = YOLO("yolov8n-face.pt") # Make sure the path or name of the model is correct
14
- return model
15
-
16
-
17
- model = load_model()
18
-
19
 
20
  def detect_faces(image: Image.Image):
 
 
 
 
 
21
  output = model(image)
 
 
22
  results = Detections.from_ultralytics(output[0])
23
- boxes = results.xyxy
24
-
25
- annotated = image.copy()
26
- draw = ImageDraw.Draw(annotated)
27
- for x1, y1, x2, y2 in boxes:
 
 
 
 
 
 
28
  draw.rectangle([x1, y1, x2, y2], outline="red", width=2)
29
-
30
- return annotated, f"Number of faces detected: {len(boxes)}"
31
-
32
-
33
- def detect_faces_video(video_path: str):
34
- """
35
- Reads a video file, annotates faces on each frame, and writes out an annotated video.
36
-
37
- Returns the new video path and a summary.
38
- """
39
- cap = cv2.VideoCapture(video_path)
40
- fps = cap.get(cv2.CAP_PROP_FPS)
41
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
42
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
43
-
44
- # Prepare output
45
- out_file = tempfile.mktemp(suffix=".mp4")
46
- fourcc = cv2.VideoWriter_fourcc(*"mp4v")
47
- writer = cv2.VideoWriter(out_file, fourcc, fps, (width, height))
48
-
49
- frame_count = 0
50
- total_faces = 0
51
-
52
- while True:
53
- ret, frame = cap.read()
54
- if not ret:
55
- break
56
-
57
- # Convert frame BGR -> RGB and to PIL Image for model
58
- pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
59
- output = model(pil_img)
60
- results = Detections.from_ultralytics(output[0])
61
- boxes = results.xyxy
62
-
63
- # Draw boxes on original frame
64
- for x1, y1, x2, y2 in boxes:
65
- cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)
66
-
67
- writer.write(frame)
68
- frame_count += 1
69
- total_faces += len(boxes)
70
-
71
- cap.release()
72
- writer.release()
73
-
74
- avg_per_frame = total_faces / frame_count if frame_count else 0
75
- summary = (
76
- f"Processed {frame_count} frames. "
77
- f"Total faces detected: {total_faces}. "
78
- f"Average per frame: {avg_per_frame:.2f}"
79
- )
80
- return out_file, summary
81
-
82
-
83
- # Build Gradio interface
84
- video_interface = gr.Interface(
85
- fn=detect_faces_video,
86
- inputs=gr.Video(label="Upload Video"),
87
- outputs=[gr.Video(label="Annotated Video"), gr.Text(label="Summary")],
88
- title="YOLOv8 Video Face Detector",
89
- description="Detect and annotate faces in videos using a YOLOv8 model."
90
  )
91
 
92
-
93
- def main():
94
- video_interface.launch()
95
-
96
-
97
  if __name__ == "__main__":
98
- main()
 
 
 
 
 
 
1
  import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
  from ultralytics import YOLO
4
  from supervision import Detections
5
+ from PIL import Image, ImageDraw
6
 
7
+ # Download the YOLOv8 face detection model from Hugging Face Hub
8
+ model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt")
9
 
10
+ # Load the YOLOv8 face detection model
11
+ model = YOLO(model_path)
 
 
 
 
 
 
12
 
13
  def detect_faces(image: Image.Image):
14
+ """
15
+ Detects faces in an input image using YOLOv8 face detection model and returns the annotated image
16
+ along with the number of faces detected.
17
+ """
18
+ # Run inference on the input image
19
  output = model(image)
20
+
21
+ # Convert YOLO output to Detections format using the supervision library
22
  results = Detections.from_ultralytics(output[0])
23
+
24
+ # Extract bounding boxes; results.xyxy contains boxes in [x1, y1, x2, y2] format
25
+ boxes = results.xyxy # This is assumed to be a list-like structure of bounding boxes
26
+
27
+ # Create a copy of the input image for drawing
28
+ annotated_image = image.copy()
29
+ draw = ImageDraw.Draw(annotated_image)
30
+
31
+ # Draw a red bounding box for each detected face
32
+ for box in boxes:
33
+ x1, y1, x2, y2 = box # unpack the coordinates
34
  draw.rectangle([x1, y1, x2, y2], outline="red", width=2)
35
+
36
+ # Count the number of faces detected
37
+ face_count = len(boxes)
38
+
39
+ return annotated_image, f"Number of faces detected: {face_count}"
40
+
41
+ # Create a Gradio interface for the face detection function
42
+ demo = gr.Interface(
43
+ fn=detect_faces,
44
+ inputs=gr.Image(type="pil", label="Upload Image"),
45
+ outputs=[
46
+ gr.Image(type="pil", label="Annotated Image"),
47
+ gr.Text(label="Face Count")
48
+ ],
49
+ title="YOLOv8 Face Detector",
50
+ description="Upload an image to detect faces using a YOLOv8 face detection model. The detected faces will be highlighted with red bounding boxes."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  )
52
 
 
 
 
 
 
53
  if __name__ == "__main__":
54
+ demo.launch()