Spaces:
Running
Running
jhj0517
commited on
Commit
·
c5d0765
1
Parent(s):
f636e83
Add progress
Browse files
modules/uvr/music_separator.py
CHANGED
|
@@ -5,6 +5,7 @@ import soundfile as sf
|
|
| 5 |
import os
|
| 6 |
import torch
|
| 7 |
import gc
|
|
|
|
| 8 |
|
| 9 |
from uvr.models import MDX, Demucs, VrNetwork, MDXC
|
| 10 |
|
|
@@ -57,7 +58,8 @@ class MusicSeparator:
|
|
| 57 |
audio_file_path: str,
|
| 58 |
model_name: str,
|
| 59 |
device: Optional[str] = None,
|
| 60 |
-
segment_size: int = 256
|
|
|
|
| 61 |
if device is None:
|
| 62 |
device = self.device
|
| 63 |
|
|
@@ -78,6 +80,7 @@ class MusicSeparator:
|
|
| 78 |
self.current_model_size != model_name or
|
| 79 |
self.model_config != model_config or
|
| 80 |
self.audio_info.sample_rate != sample_rate):
|
|
|
|
| 81 |
self.update_model(
|
| 82 |
model_name=model_name,
|
| 83 |
device=device,
|
|
@@ -85,6 +88,7 @@ class MusicSeparator:
|
|
| 85 |
)
|
| 86 |
self.model.sample_rate = sample_rate
|
| 87 |
|
|
|
|
| 88 |
result = self.model(audio_file_path)
|
| 89 |
instrumental, vocals = result["instrumental"].T, result["vocals"].T
|
| 90 |
|
|
|
|
| 5 |
import os
|
| 6 |
import torch
|
| 7 |
import gc
|
| 8 |
+
import gradio as gr
|
| 9 |
|
| 10 |
from uvr.models import MDX, Demucs, VrNetwork, MDXC
|
| 11 |
|
|
|
|
| 58 |
audio_file_path: str,
|
| 59 |
model_name: str,
|
| 60 |
device: Optional[str] = None,
|
| 61 |
+
segment_size: int = 256,
|
| 62 |
+
progress: gr.Progress = gr.Progress()):
|
| 63 |
if device is None:
|
| 64 |
device = self.device
|
| 65 |
|
|
|
|
| 80 |
self.current_model_size != model_name or
|
| 81 |
self.model_config != model_config or
|
| 82 |
self.audio_info.sample_rate != sample_rate):
|
| 83 |
+
progress(0, desc="Initializing UVR Model..")
|
| 84 |
self.update_model(
|
| 85 |
model_name=model_name,
|
| 86 |
device=device,
|
|
|
|
| 88 |
)
|
| 89 |
self.model.sample_rate = sample_rate
|
| 90 |
|
| 91 |
+
progress(0, desc="Separating background music from the audio..")
|
| 92 |
result = self.model(audio_file_path)
|
| 93 |
instrumental, vocals = result["instrumental"].T, result["vocals"].T
|
| 94 |
|
modules/whisper/whisper_base.py
CHANGED
|
@@ -115,6 +115,7 @@ class WhisperBase(ABC):
|
|
| 115 |
model_name=params.uvr_model_size,
|
| 116 |
device=params.uvr_device,
|
| 117 |
segment_size=params.uvr_segment_size,
|
|
|
|
| 118 |
)
|
| 119 |
self.music_separator.offload()
|
| 120 |
|
|
|
|
| 115 |
model_name=params.uvr_model_size,
|
| 116 |
device=params.uvr_device,
|
| 117 |
segment_size=params.uvr_segment_size,
|
| 118 |
+
progress=progress
|
| 119 |
)
|
| 120 |
self.music_separator.offload()
|
| 121 |
|