Spaces:
Runtime error
Runtime error
gradio 5.x compatibility
Browse files- clear cache
- fix circular import error
- use original aspect ratio
- gradio 5.x compatibility
app.py
CHANGED
@@ -1,7 +1,9 @@
|
|
1 |
from PIL import Image
|
2 |
import gradio as gr
|
|
|
3 |
from FGT_codes.tool.video_inpainting import video_inpainting
|
4 |
-
from SiamMask.
|
|
|
5 |
from SiamMask.experiments.siammask_sharp.custom import Custom
|
6 |
from types import SimpleNamespace
|
7 |
import torch
|
@@ -139,6 +141,7 @@ def getBoundaries(mask):
|
|
139 |
|
140 |
|
141 |
def track_and_mask(vid, masked_frame, original_list, mask_list, in_fps, dt_string):
|
|
|
142 |
x, y, w, h = getBoundaries(masked_frame)
|
143 |
f = 0
|
144 |
|
@@ -203,29 +206,49 @@ def track_and_mask(vid, masked_frame, original_list, mask_list, in_fps, dt_strin
|
|
203 |
return original_list, mask_list, in_fps, outname
|
204 |
|
205 |
|
|
|
206 |
def inpaint_video(original_frame_list, mask_list, in_fps, dt_string):
|
207 |
outname = (dt_string+"_result.mp4")
|
208 |
args.out_fps = in_fps
|
209 |
args.outfilename = outname
|
210 |
-
|
|
|
|
|
|
|
|
|
|
|
211 |
original_frame_list = []
|
212 |
mask_list = []
|
213 |
-
return outname,original_frame_list, mask_list
|
214 |
|
|
|
|
|
215 |
|
216 |
-
|
217 |
-
if(video == None):
|
218 |
-
return gr.ImageMask()
|
219 |
-
video_capture = cv2.VideoCapture()
|
220 |
-
if video_capture.open(video):
|
221 |
-
width, height = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
|
222 |
-
video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
223 |
|
224 |
-
if video_capture.isOpened():
|
225 |
-
ret, frame = video_capture.read()
|
226 |
-
RGB_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
227 |
|
228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|
230 |
|
231 |
def drawRectangle(frame, mask):
|
@@ -247,6 +270,24 @@ def getStartEndPoints(mask):
|
|
247 |
|
248 |
return x1, y1, x2, y2
|
249 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
def reset_components():
|
251 |
return gr.update(value=None),gr.update(value=None, interactive=False),gr.update(value=None, interactive=False), [],[],24,datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
|
252 |
|
@@ -271,27 +312,29 @@ with gr.Blocks() as demo:
|
|
271 |
with gr.Row():
|
272 |
with gr.Column(scale=2):
|
273 |
with gr.Row():
|
274 |
-
in_video = gr.PlayableVideo(label="Input Video"
|
275 |
with gr.Row():
|
276 |
-
first_frame = gr.ImageMask(label="Select Object")
|
277 |
with gr.Row():
|
278 |
-
approve_mask = gr.Button(value="Run",variant="primary")
|
279 |
with gr.Column(scale=1):
|
280 |
with gr.Row():
|
281 |
original_image = gr.Image(interactive=False)
|
282 |
with gr.Row():
|
283 |
masked_image = gr.Image(interactive=False)
|
284 |
with gr.Column(scale=2):
|
285 |
-
out_video = gr.Video(label="Segmented Video"
|
286 |
-
out_video_inpaint = gr.Video(label="Inpainted Video"
|
287 |
# track_mask = gr.Button(value="Track and Mask")
|
288 |
# inpaint = gr.Button(value="Inpaint")
|
289 |
|
290 |
-
in_video.change(fn=get_first_frame, inputs=[
|
291 |
-
in_video], outputs=[first_frame])
|
292 |
in_video.clear(fn=reset_components, outputs=[first_frame, original_image, masked_image, original_frame_list, mask_list, in_fps, dt_string])
|
293 |
-
approve_mask.click(
|
294 |
-
|
|
|
|
|
|
|
295 |
masked_image.change(fn=track_and_mask,inputs=[
|
296 |
in_video, masked_image, original_frame_list, mask_list, in_fps, dt_string], outputs=[original_frame_list, mask_list, in_fps, out_video])
|
297 |
out_video.change(fn=inpaint_video, inputs=[original_frame_list, mask_list, in_fps, dt_string], outputs=[out_video_inpaint, original_frame_list, mask_list])
|
|
|
1 |
from PIL import Image
|
2 |
import gradio as gr
|
3 |
+
import gc
|
4 |
from FGT_codes.tool.video_inpainting import video_inpainting
|
5 |
+
from SiamMask.utils.config_helper import load_config
|
6 |
+
from SiamMask.utils.load_helper import load_pretrain
|
7 |
from SiamMask.experiments.siammask_sharp.custom import Custom
|
8 |
from types import SimpleNamespace
|
9 |
import torch
|
|
|
141 |
|
142 |
|
143 |
def track_and_mask(vid, masked_frame, original_list, mask_list, in_fps, dt_string):
|
144 |
+
from SiamMask.tools.test import siamese_init, siamese_track
|
145 |
x, y, w, h = getBoundaries(masked_frame)
|
146 |
f = 0
|
147 |
|
|
|
206 |
return original_list, mask_list, in_fps, outname
|
207 |
|
208 |
|
209 |
+
|
210 |
def inpaint_video(original_frame_list, mask_list, in_fps, dt_string):
|
211 |
outname = (dt_string+"_result.mp4")
|
212 |
args.out_fps = in_fps
|
213 |
args.outfilename = outname
|
214 |
+
|
215 |
+
first_frame = original_frame_list[0]
|
216 |
+
args.orig_h, args.orig_w = first_frame.shape[:2]
|
217 |
+
print(f"[INFO] Original video resolution: {args.orig_w}x{args.orig_h}")
|
218 |
+
|
219 |
+
video_inpainting(args, original_frame_list,original_frame_list, mask_list)
|
220 |
original_frame_list = []
|
221 |
mask_list = []
|
|
|
222 |
|
223 |
+
gc.collect()
|
224 |
+
torch.cuda.empty_cache()
|
225 |
|
226 |
+
return outname, original_frame_list, mask_list
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
|
|
|
|
|
|
|
228 |
|
229 |
+
def get_first_frame(video):
|
230 |
+
if video is None:
|
231 |
+
# leeres Editor-State
|
232 |
+
return {"background": None, "layers": [], "composite": None}
|
233 |
+
|
234 |
+
cap = cv2.VideoCapture(video)
|
235 |
+
ret, frame = cap.read()
|
236 |
+
cap.release()
|
237 |
+
if not ret:
|
238 |
+
return {"background": None, "layers": [], "composite": None}
|
239 |
+
|
240 |
+
# RGB und RGBA
|
241 |
+
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
242 |
+
h, w, _ = rgb.shape
|
243 |
+
# als RGBA-Hintergrund
|
244 |
+
bg_rgba = np.concatenate([rgb, np.full((h, w, 1), 255, dtype=np.uint8)], axis=-1)
|
245 |
+
# start mit leerer Maske (RGBA all zeros)
|
246 |
+
empty_layer = np.zeros((h, w, 4), dtype=np.uint8)
|
247 |
+
return {
|
248 |
+
"background": bg_rgba,
|
249 |
+
"layers": [empty_layer],
|
250 |
+
"composite": bg_rgba
|
251 |
+
}
|
252 |
|
253 |
|
254 |
def drawRectangle(frame, mask):
|
|
|
270 |
|
271 |
return x1, y1, x2, y2
|
272 |
|
273 |
+
def extract_frame_and_mask(editor_state):
|
274 |
+
bg_rgba = editor_state["background"]
|
275 |
+
layers = editor_state.get("layers", [])
|
276 |
+
|
277 |
+
# Hintergrund (RGB)
|
278 |
+
frame_rgb = bg_rgba[...,:3] if bg_rgba is not None else None
|
279 |
+
|
280 |
+
# Maske: falls Layer da sind, nimm das Alpha der letzten
|
281 |
+
if layers:
|
282 |
+
alpha = layers[-1][... , 3] # 0–255
|
283 |
+
# in 3-Kanal (damit gr.Image es akzeptiert)
|
284 |
+
mask = (alpha > 0).astype(np.uint8) * 255
|
285 |
+
mask_rgb = np.stack([mask]*3, axis=-1)
|
286 |
+
else:
|
287 |
+
mask_rgb = None
|
288 |
+
|
289 |
+
return frame_rgb, mask_rgb
|
290 |
+
|
291 |
def reset_components():
|
292 |
return gr.update(value=None),gr.update(value=None, interactive=False),gr.update(value=None, interactive=False), [],[],24,datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
|
293 |
|
|
|
312 |
with gr.Row():
|
313 |
with gr.Column(scale=2):
|
314 |
with gr.Row():
|
315 |
+
in_video = gr.PlayableVideo(label="Input Video")
|
316 |
with gr.Row():
|
317 |
+
first_frame = gr.ImageMask(label="Select Object", type="numpy")
|
318 |
with gr.Row():
|
319 |
+
approve_mask = gr.Button(value="Run", variant="primary")
|
320 |
with gr.Column(scale=1):
|
321 |
with gr.Row():
|
322 |
original_image = gr.Image(interactive=False)
|
323 |
with gr.Row():
|
324 |
masked_image = gr.Image(interactive=False)
|
325 |
with gr.Column(scale=2):
|
326 |
+
out_video = gr.Video(label="Segmented Video")
|
327 |
+
out_video_inpaint = gr.Video(label="Inpainted Video")
|
328 |
# track_mask = gr.Button(value="Track and Mask")
|
329 |
# inpaint = gr.Button(value="Inpaint")
|
330 |
|
331 |
+
in_video.change(fn=get_first_frame, inputs=[in_video], outputs=[first_frame])
|
|
|
332 |
in_video.clear(fn=reset_components, outputs=[first_frame, original_image, masked_image, original_frame_list, mask_list, in_fps, dt_string])
|
333 |
+
approve_mask.click(
|
334 |
+
fn=extract_frame_and_mask,
|
335 |
+
inputs=[first_frame],
|
336 |
+
outputs=[original_image, masked_image]
|
337 |
+
)
|
338 |
masked_image.change(fn=track_and_mask,inputs=[
|
339 |
in_video, masked_image, original_frame_list, mask_list, in_fps, dt_string], outputs=[original_frame_list, mask_list, in_fps, out_video])
|
340 |
out_video.change(fn=inpaint_video, inputs=[original_frame_list, mask_list, in_fps, dt_string], outputs=[out_video_inpaint, original_frame_list, mask_list])
|