Phil Sobrepena commited on
Commit
bac321f
·
1 Parent(s): 39a23a5

default inputs

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -60,13 +60,12 @@ net, feature_utils, seq_cfg = get_model()
60
 
61
  @spaces.GPU(duration=120)
62
  @torch.inference_mode()
63
- def video_to_audio(video: gr.Video, prompt: str, negative_prompt: str, num_steps: 25,
64
- cfg_strength: 4.5, duration: 8.0):
65
-
 
 
66
  rng = torch.Generator(device=device)
67
- # if seed >= 0:
68
- # rng.manual_seed(seed)
69
- # else:
70
  rng.seed()
71
 
72
  fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps)
@@ -120,7 +119,7 @@ video_to_audio_tab = gr.Interface(
120
  ],
121
  outputs='playable_video',
122
  cache_examples=False,
123
- title='Sonisphere — Sonic Branding with Multi-modal Audio Synthesis',
124
  examples=[
125
  ])
126
 
 
60
 
61
  @spaces.GPU(duration=120)
62
  @torch.inference_mode()
63
+ def video_to_audio(video: gr.Video, prompt: str, negative_prompt: str):
64
+
65
+ num_steps = 25
66
+ cfg_strength = 4.5
67
+ duration = 8.0
68
  rng = torch.Generator(device=device)
 
 
 
69
  rng.seed()
70
 
71
  fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps)
 
119
  ],
120
  outputs='playable_video',
121
  cache_examples=False,
122
+ title='Sonisphere — Sonic Branding through Multi-modal Audio Synthesis',
123
  examples=[
124
  ])
125