SerdarHelli kadirnar commited on
Commit
87cc80e
1 Parent(s): e271ca3

Added yolov8 model (#2)

Browse files

- Added yolov8 model (9c5ce294b0d813a7094b44b2d4d0635726727d53)
- Update app.py (3f98b8c0224f0fbed5840b9827314bf5dbefae08)
- Update requirements.txt (d139267820aeca22b668bf32e8f7eb556a1c9910)


Co-authored-by: Kadir Nar <[email protected]>

Files changed (7) hide show
  1. README.md +1 -1
  2. app.py +74 -83
  3. data/26.jpg +3 -0
  4. data/27.jpg +3 -0
  5. data/28.jpg +3 -0
  6. data/31.jpg +3 -0
  7. requirements.txt +2 -3
README.md CHANGED
@@ -6,7 +6,7 @@ colorTo: yellow
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
9
- duplicated_from: fcakyon/sahi-yolov5
10
  license: openrail
11
  ---
12
 
 
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
9
+ duplicated_from: deprem-ml/deprem_satellite_test
10
  license: openrail
11
  ---
12
 
app.py CHANGED
@@ -1,34 +1,21 @@
1
- import gradio as gr
2
- import sahi.utils
3
- from sahi import AutoDetectionModel
4
- import sahi.predict
5
- import sahi.slicing
6
  from PIL import Image
 
7
  import numpy
8
- from huggingface_hub import hf_hub_download
9
  import torch
 
10
 
 
11
 
12
- IMAGE_SIZE = 640
13
-
14
- model_id = 'deprem-ml/Binafarktespit-yolo5x-v1-xview'
15
-
16
-
17
  current_device = "cuda" if torch.cuda.is_available() else "cpu"
18
- model_types = ["YOLOv5", "YOLOv5 + SAHI"]
19
- # Model
20
- model = AutoDetectionModel.from_pretrained(
21
- model_type="yolov5",
22
- model_path=model_id,
23
- device=current_device,
24
- confidence_threshold=0.5,
25
- image_size=IMAGE_SIZE,
26
- )
27
-
28
 
29
- def sahi_yolo_inference(
30
- model_type,
31
  image,
 
 
 
32
  slice_height=512,
33
  slice_width=512,
34
  overlap_height_ratio=0.1,
@@ -39,26 +26,43 @@ def sahi_yolo_inference(
39
  postprocess_class_agnostic=False,
40
  ):
41
 
42
- # image_width, image_height = image.size
43
- # sliced_bboxes = sahi.slicing.get_slice_bboxes(
44
- # image_height,
45
- # image_width,
46
- # slice_height,
47
- # slice_width,
48
- # False,
49
- # overlap_height_ratio,
50
- # overlap_width_ratio,
51
- # )
52
- # if len(sliced_bboxes) > 60:
53
- # raise ValueError(
54
- # f"{len(sliced_bboxes)} slices are too much for huggingface spaces, try smaller slice size."
55
- # )
56
-
57
  rect_th = None or max(round(sum(image.size) / 2 * 0.0001), 1)
58
  text_th = None or max(rect_th - 2, 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- if "SAHI" in model_type:
61
- prediction_result_2 = sahi.predict.get_sliced_prediction(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  image=image,
63
  detection_model=model,
64
  slice_height=int(slice_height),
@@ -70,54 +74,38 @@ def sahi_yolo_inference(
70
  postprocess_match_threshold=postprocess_match_threshold,
71
  postprocess_class_agnostic=postprocess_class_agnostic,
72
  )
73
- visual_result_2 = sahi.utils.cv.visualize_object_predictions(
 
74
  image=numpy.array(image),
75
  object_prediction_list=prediction_result_2.object_prediction_list,
76
  rect_th=rect_th,
77
  text_th=text_th,
78
  )
 
79
  output = Image.fromarray(visual_result_2["image"])
80
  return output
81
 
82
- else:
83
- # standard inference
84
- prediction_result_1 = sahi.predict.get_prediction(
85
- image=image, detection_model=model
86
- )
87
- print(image)
88
- visual_result_1 = sahi.utils.cv.visualize_object_predictions(
89
- image=numpy.array(image),
90
- object_prediction_list=prediction_result_1.object_prediction_list,
91
- rect_th=rect_th,
92
- text_th=text_th,
93
- )
94
- output = Image.fromarray(visual_result_1["image"])
95
- return output
96
-
97
- # sliced inference
98
 
 
 
 
 
99
 
100
  inputs = [
101
- gr.Dropdown(
102
- choices=model_types,
103
- label="Choose Model Type",
104
- type="value",
105
- value=model_types[1],
106
- ),
107
  gr.Image(type="pil", label="Original Image"),
108
- gr.Number(default=512, label="slice_height"),
109
- gr.Number(default=512, label="slice_width"),
110
- gr.Number(default=0.1, label="overlap_height_ratio"),
111
- gr.Number(default=0.1, label="overlap_width_ratio"),
112
- gr.Dropdown(
113
- ["NMS", "GREEDYNMM"],
114
- type="value",
115
- value="NMS",
116
- label="postprocess_type",
117
- ),
118
- gr.Dropdown(["IOU", "IOS"], type="value", value="IOU", label="postprocess_type"),
119
- gr.Number(value=0.5, label="postprocess_match_threshold"),
120
- gr.Checkbox(value=True, label="postprocess_class_agnostic"),
121
  ]
122
 
123
  outputs = [gr.outputs.Image(type="pil", label="Output")]
@@ -126,13 +114,14 @@ title = "Building Detection from Satellite Images with SAHI + YOLOv5"
126
  description = "SAHI + YOLOv5 demo for building detection from satellite images. Upload an image or click an example image to use."
127
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
128
  examples = [
129
- [model_types[1], "26.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
130
- [model_types[1], "27.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
131
- [model_types[1], "28.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
132
- [model_types[1], "31.jpg", 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
133
  ]
134
- gr.Interface(
135
- sahi_yolo_inference,
 
136
  inputs,
137
  outputs,
138
  title=title,
@@ -141,4 +130,6 @@ gr.Interface(
141
  examples=examples,
142
  theme="huggingface",
143
  cache_examples=True,
144
- ).launch(debug=True, enable_queue=True)
 
 
 
1
+ from sahi import utils, predict, AutoDetectionModel
 
 
 
 
2
  from PIL import Image
3
+ import gradio as gr
4
  import numpy
 
5
  import torch
6
+ import os
7
 
8
+ os.system('pip install git+https://github.com/fcakyon/ultralyticsplus.git')
9
 
10
+ model_id_list = ['deprem-ml/Binafarktespit-yolo5x-v1-xview', 'SerdarHelli/deprem_satellite_labeled_yolov8']
 
 
 
 
11
  current_device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ model_types = ["YOLOv5", "YOLOv5 + SAHI", "YOLOv8"]
 
 
 
 
 
 
 
 
 
13
 
14
+ def sahi_yolov5_inference(
 
15
  image,
16
+ model_id,
17
+ model_type,
18
+ image_size,
19
  slice_height=512,
20
  slice_width=512,
21
  overlap_height_ratio=0.1,
 
26
  postprocess_class_agnostic=False,
27
  ):
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  rect_th = None or max(round(sum(image.size) / 2 * 0.0001), 1)
30
  text_th = None or max(rect_th - 2, 1)
31
+
32
+ if model_type == "YOLOv5":
33
+ # standard inference
34
+ model = AutoDetectionModel.from_pretrained(
35
+ model_type="yolov5",
36
+ model_path=model_id,
37
+ device=current_device,
38
+ confidence_threshold=0.5,
39
+ image_size=image_size,
40
+ )
41
+
42
+ prediction_result_1 = predict.get_prediction(
43
+ image=image, detection_model=model
44
+ )
45
 
46
+ visual_result_1 = utils.cv.visualize_object_predictions(
47
+ image=numpy.array(image),
48
+ object_prediction_list=prediction_result_1.object_prediction_list,
49
+ rect_th=rect_th,
50
+ text_th=text_th,
51
+ )
52
+
53
+ output = Image.fromarray(visual_result_1["image"])
54
+ return output
55
+
56
+ elif model_type == "YOLOv5 + SAHI":
57
+ model = AutoDetectionModel.from_pretrained(
58
+ model_type="yolov5",
59
+ model_path=model_id,
60
+ device=current_device,
61
+ confidence_threshold=0.5,
62
+ image_size=image_size,
63
+ )
64
+
65
+ prediction_result_2 = predict.get_sliced_prediction(
66
  image=image,
67
  detection_model=model,
68
  slice_height=int(slice_height),
 
74
  postprocess_match_threshold=postprocess_match_threshold,
75
  postprocess_class_agnostic=postprocess_class_agnostic,
76
  )
77
+
78
+ visual_result_2 = utils.cv.visualize_object_predictions(
79
  image=numpy.array(image),
80
  object_prediction_list=prediction_result_2.object_prediction_list,
81
  rect_th=rect_th,
82
  text_th=text_th,
83
  )
84
+
85
  output = Image.fromarray(visual_result_2["image"])
86
  return output
87
 
88
+ elif model_type == "YOLOv8":
89
+ from ultralyticsplus import YOLO, render_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ model = YOLO('SerdarHelli/deprem_satellite_labeled_yolov8')
92
+ result = model.predict(image, imgsz=image_size)[0]
93
+ render = render_result(model=model, image=image, result=result, rect_th=rect_th, text_th=text_th)
94
+ return render
95
 
96
  inputs = [
 
 
 
 
 
 
97
  gr.Image(type="pil", label="Original Image"),
98
+ gr.Dropdown(choices=model_id_list,label="Choose Model",value=model_id_list[0]),
99
+ gr.Dropdown( choices=model_types, label="Choose Model Type", value=model_types[1]),
100
+ gr.Slider(minimum=128, maximum=2048, value=640, step=32, label="Image Size"),
101
+ gr.Slider(minimum=128, maximum=2048, value=512, step=32, label="Slice Height"),
102
+ gr.Slider(minimum=128, maximum=2048, value=512, step=32, label="Slice Width"),
103
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.1, label="Overlap Height Ratio"),
104
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.1, label="Overlap Width Ratio"),
105
+ gr.Dropdown(["NMS", "GREEDYNMM"], type="value", value="NMS", label="Postprocess Type"),
106
+ gr.Dropdown(["IOU", "IOS"], type="value", value="IOU", label="Postprocess Type"),
107
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.1, label="Postprocess Match Threshold"),
108
+ gr.Checkbox(value=True, label="Postprocess Class Agnostic"),
 
 
109
  ]
110
 
111
  outputs = [gr.outputs.Image(type="pil", label="Output")]
 
114
  description = "SAHI + YOLOv5 demo for building detection from satellite images. Upload an image or click an example image to use."
115
  article = "<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://github.com/fcakyon/yolov5-pip'>YOLOv5 Github</a> </p>"
116
  examples = [
117
+ ["data/26.jpg", 'deprem-ml/Binafarktespit-yolo5x-v1-xview', "YOLOv5 + SAHI", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
118
+ ["data/27.jpg", 'deprem-ml/Binafarktespit-yolo5x-v1-xview', "YOLOv5 + SAHI", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
119
+ ["data/28.jpg", 'deprem-ml/Binafarktespit-yolo5x-v1-xview', "YOLOv5 + SAHI", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
120
+ ["data/31.jpg", 'deprem-ml/SerdarHelli-yolov8-v1-xview', "YOLOv8", 640, 512, 512, 0.1, 0.1, "NMS", "IOU", 0.25, False],
121
  ]
122
+
123
+ demo = gr.Interface(
124
+ sahi_yolov5_inference,
125
  inputs,
126
  outputs,
127
  title=title,
 
130
  examples=examples,
131
  theme="huggingface",
132
  cache_examples=True,
133
+ )
134
+
135
+ demo.launch(debug=True, enable_queue=True)
data/26.jpg ADDED

Git LFS Details

  • SHA256: d4c76c6cbf981506552cf5d636acec3ac8b24a73b98bba763595821890047f2f
  • Pointer size: 132 Bytes
  • Size of remote file: 5.92 MB
data/27.jpg ADDED

Git LFS Details

  • SHA256: 39d0e6cc52722f11d5150684413954689918b3b593e3fd86704fa43e54d4d446
  • Pointer size: 132 Bytes
  • Size of remote file: 4.63 MB
data/28.jpg ADDED

Git LFS Details

  • SHA256: 193aa658ffdb0ee417d47bd7e25d078e9596f6d564def6fa97ad0ca12932eaec
  • Pointer size: 132 Bytes
  • Size of remote file: 4.19 MB
data/31.jpg ADDED

Git LFS Details

  • SHA256: cfd13bde54acf2974d717853ca63f1009897ac9491166ede0ac6b21e170ae26d
  • Pointer size: 132 Bytes
  • Size of remote file: 5.53 MB
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
- torch==1.10.2+cpu
2
- torchvision==0.11.3+cpu
3
- -f https://download.pytorch.org/whl/torch_stable.html
4
  yolov5==7.0.8
5
  sahi==0.11.11
 
1
+ torch==1.10.2
2
+ torchvision==0.11.3
 
3
  yolov5==7.0.8
4
  sahi==0.11.11