lixin4ever CircleRadon commited on
Commit
08aaa9c
·
verified ·
1 Parent(s): 5eef352

Update app.py (#4)

Browse files

- Update app.py (ef28fa7ef601ab860d6c4a2f0e5f86b00e2cd1d7)


Co-authored-by: YuqianYuan <[email protected]>

Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -13,6 +13,7 @@ sys.path.append('./')
13
  from videollama3 import disable_torch_init, model_init, mm_infer, get_model_output
14
  from videollama3.mm_utils import load_images
15
  from videollama3.mm_utils import load_video
 
16
 
17
 
18
  color_rgb = (1.0, 1.0, 1.0)
@@ -54,6 +55,7 @@ def add_contour(img, mask, color=(1., 1., 1.)):
54
 
55
  return img
56
 
 
57
  def generate_masks(image):
58
  global mask_list
59
  global mask_raw_list
@@ -82,6 +84,7 @@ def generate_masks(image):
82
  return mask_list, image
83
 
84
 
 
85
  def generate_masks_video(image):
86
  global mask_list_video
87
  global mask_raw_list_video
@@ -110,7 +113,7 @@ def generate_masks_video(image):
110
  return mask_list_video, image
111
 
112
 
113
-
114
  def describe(image, mode, query, masks):
115
  # Create an image object from the uploaded image
116
  # print(image.keys())
@@ -211,6 +214,7 @@ def load_first_frame(video_path):
211
  image = Image.fromarray(frame)
212
  return image
213
 
 
214
  def describe_video(video_path, mode, query, annotated_frame, masks):
215
  global mask_list_video
216
  # Create a temporary directory to save extracted video frames
@@ -319,7 +323,7 @@ def describe_video(video_path, mode, query, annotated_frame, masks):
319
  yield gr.update(), text, gr.update()
320
 
321
 
322
-
323
  def apply_sam(image, input_points):
324
  inputs = sam_processor(image, input_points=input_points, return_tensors="pt").to(device)
325
 
 
13
  from videollama3 import disable_torch_init, model_init, mm_infer, get_model_output
14
  from videollama3.mm_utils import load_images
15
  from videollama3.mm_utils import load_video
16
+ import spaces
17
 
18
 
19
  color_rgb = (1.0, 1.0, 1.0)
 
55
 
56
  return img
57
 
58
+ @spaces.GPU(duration=120)
59
  def generate_masks(image):
60
  global mask_list
61
  global mask_raw_list
 
84
  return mask_list, image
85
 
86
 
87
+ @spaces.GPU(duration=120)
88
  def generate_masks_video(image):
89
  global mask_list_video
90
  global mask_raw_list_video
 
113
  return mask_list_video, image
114
 
115
 
116
+ @spaces.GPU(duration=120)
117
  def describe(image, mode, query, masks):
118
  # Create an image object from the uploaded image
119
  # print(image.keys())
 
214
  image = Image.fromarray(frame)
215
  return image
216
 
217
+ @spaces.GPU(duration=120)
218
  def describe_video(video_path, mode, query, annotated_frame, masks):
219
  global mask_list_video
220
  # Create a temporary directory to save extracted video frames
 
323
  yield gr.update(), text, gr.update()
324
 
325
 
326
+ @spaces.GPU(duration=120)
327
  def apply_sam(image, input_points):
328
  inputs = sam_processor(image, input_points=input_points, return_tensors="pt").to(device)
329