Spaces:
Paused
Paused
Upload 3 files
Browse files- app.py +23 -0
- packages.txt +1 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchaudio
|
3 |
+
from pydub import AudioSegment
|
4 |
+
import gradio as gr
|
5 |
+
_original_load = torch.load
|
6 |
+
def cpu_load(*args, **kwargs):
|
7 |
+
kwargs["map_location"] = torch.device('cpu')
|
8 |
+
return _original_load(*args, **kwargs)
|
9 |
+
torch.load = cpu_load
|
10 |
+
|
11 |
+
hubert = torch.hub.load("bshall/hubert:main", "hubert_soft", trust_repo=True).cpu()
|
12 |
+
acoustic = torch.hub.load("bshall/acoustic-model:main", "hubert_soft", trust_repo=True).cpu()
|
13 |
+
hifigan = torch.hub.load("bshall/hifigan:main", "hifigan_hubert_soft", trust_repo=True).cpu()
|
14 |
+
|
15 |
+
def soft_vc(audio_path):
|
16 |
+
AudioSegment.from_file(audio_path).set_frame_rate(16000).set_channels(1).export(audio_path, format="wav")
|
17 |
+
source = torchaudio.load(audio_path)[0].unsqueeze(0).cpu()
|
18 |
+
with torch.inference_mode():
|
19 |
+
target = hifigan(acoustic.generate(hubert.units(source)).transpose(1, 2))
|
20 |
+
torchaudio.save("output.wav", target.squeeze(0).cpu(), 16000)
|
21 |
+
return "output.wav"
|
22 |
+
|
23 |
+
gr.Interface(soft_vc,gr.Audio(label="Input Audio",type="filepath"),gr.Audio(label="Output Audio",type="filepath")).launch()
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ffmpeg
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
pydub
|
2 |
+
torchaudio
|