sanghan commited on
Commit
371ea96
·
1 Parent(s): d3f29af

only use one model per run

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -90,12 +90,6 @@ def inference(video):
90
  temp_directories.append(temp_dir)
91
  output_composition = temp_dir + "/matted_video.mp4"
92
 
93
- model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3")
94
- if torch.cuda.is_available():
95
- free_memory = get_free_memory_gb()
96
- print(f"Available video memory: {free_memory} GB")
97
- model = model.cuda()
98
-
99
  convert_video(
100
  model, # The loaded model, can be on any device (cpu or cuda).
101
  input_source=video, # A video file or an image sequence directory.
@@ -118,11 +112,14 @@ if __name__ == "__main__":
118
  temp_directories = []
119
  atexit.register(cleanup_temp_directories)
120
 
 
 
121
  if torch.cuda.is_available():
122
  free_memory = get_free_memory_gb()
123
  concurrency_count = int(free_memory // 7)
124
  print(f"Using GPU with concurrency: {concurrency_count}")
125
  print(f"Available video memory: {free_memory} GB")
 
126
  else:
127
  print("Using CPU")
128
  concurrency_count = 1
 
90
  temp_directories.append(temp_dir)
91
  output_composition = temp_dir + "/matted_video.mp4"
92
 
 
 
 
 
 
 
93
  convert_video(
94
  model, # The loaded model, can be on any device (cpu or cuda).
95
  input_source=video, # A video file or an image sequence directory.
 
112
  temp_directories = []
113
  atexit.register(cleanup_temp_directories)
114
 
115
+ model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3")
116
+
117
  if torch.cuda.is_available():
118
  free_memory = get_free_memory_gb()
119
  concurrency_count = int(free_memory // 7)
120
  print(f"Using GPU with concurrency: {concurrency_count}")
121
  print(f"Available video memory: {free_memory} GB")
122
+ model = model.cuda()
123
  else:
124
  print("Using CPU")
125
  concurrency_count = 1