Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +6 -0
- Data/Tsukuyomi/wavs/VOICEACTRESS100_008.wav +3 -0
- Data/Tsukuyomi/wavs/VOICEACTRESS100_009.wav +3 -0
- Data/Tsukuyomi/wavs/VOICEACTRESS100_010.wav +3 -0
- Data/Tsukuyomi/wavs/VOICEACTRESS100_011.wav +3 -0
- Data/Tsukuyomi/wavs/VOICEACTRESS100_012.wav +3 -0
- Data/Tsukuyomi/wavs/VOICEACTRESS100_015.wav +3 -0
- Models/Tsukuyomi_FineTuned/config_ft.yml +116 -0
- Models/Tsukuyomi_FineTuned/train.log +0 -0
- Modules/KotoDama_sampler.py +269 -0
- Modules/__init__.py +1 -0
- Modules/__pycache__/KotoDama_sampler.cpython-311.pyc +0 -0
- Modules/__pycache__/KotoDama_sampler.cpython-39.pyc +0 -0
- Modules/__pycache__/__init__.cpython-311.pyc +0 -0
- Modules/__pycache__/__init__.cpython-39.pyc +0 -0
- Modules/__pycache__/discriminators.cpython-311.pyc +0 -0
- Modules/__pycache__/discriminators.cpython-39.pyc +0 -0
- Modules/__pycache__/hifigan.cpython-311.pyc +0 -0
- Modules/__pycache__/istftnet.cpython-311.pyc +0 -0
- Modules/__pycache__/istftnet.cpython-39.pyc +0 -0
- Modules/__pycache__/slmadv.cpython-311.pyc +0 -0
- Modules/__pycache__/slmadv.cpython-39.pyc +0 -0
- Modules/__pycache__/utils.cpython-311.pyc +0 -0
- Modules/__pycache__/utils.cpython-39.pyc +0 -0
- Modules/diffusion/__pycache__/__init__.cpython-311.pyc +0 -0
- Modules/diffusion/__pycache__/__init__.cpython-39.pyc +0 -0
- Modules/diffusion/__pycache__/diffusion.cpython-311.pyc +0 -0
- Modules/diffusion/__pycache__/diffusion.cpython-39.pyc +0 -0
- Modules/diffusion/__pycache__/modules.cpython-311.pyc +0 -0
- Modules/diffusion/__pycache__/modules.cpython-39.pyc +0 -0
- Modules/diffusion/__pycache__/sampler.cpython-311.pyc +0 -0
- Modules/diffusion/__pycache__/sampler.cpython-39.pyc +0 -0
- Modules/diffusion/__pycache__/utils.cpython-311.pyc +0 -0
- Modules/diffusion/__pycache__/utils.cpython-39.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/__init__.cpython-311.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/__init__.cpython-39.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/components.cpython-311.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/components.cpython-39.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/diffusion.cpython-311.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/diffusion.cpython-39.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/models.cpython-311.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/models.cpython-39.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/utils.cpython-311.pyc +0 -0
- Modules/diffusion/audio_diffusion_pytorch/__pycache__/utils.cpython-39.pyc +0 -0
- Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/.github/workflows/python-publish.yml +39 -0
- Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/.gitignore +2 -0
- Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/.pre-commit-config.yaml +41 -0
- Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/LICENSE +21 -0
- Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/LOGO.png +0 -0
- Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/README.md +251 -0
.gitattributes
CHANGED
@@ -134,3 +134,9 @@ Data/Tsukuyomi/wavs/VOICEACTRESS100_006.wav filter=lfs diff=lfs merge=lfs -text
|
|
134 |
Data/Tsukuyomi/wavs/VOICEACTRESS100_005.wav filter=lfs diff=lfs merge=lfs -text
|
135 |
Data/Tsukuyomi/wavs/VOICEACTRESS100_007.wav filter=lfs diff=lfs merge=lfs -text
|
136 |
Data/Tsukuyomi/wavs/VOICEACTRESS100_003.wav filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
Data/Tsukuyomi/wavs/VOICEACTRESS100_005.wav filter=lfs diff=lfs merge=lfs -text
|
135 |
Data/Tsukuyomi/wavs/VOICEACTRESS100_007.wav filter=lfs diff=lfs merge=lfs -text
|
136 |
Data/Tsukuyomi/wavs/VOICEACTRESS100_003.wav filter=lfs diff=lfs merge=lfs -text
|
137 |
+
Data/Tsukuyomi/wavs/VOICEACTRESS100_009.wav filter=lfs diff=lfs merge=lfs -text
|
138 |
+
Data/Tsukuyomi/wavs/VOICEACTRESS100_011.wav filter=lfs diff=lfs merge=lfs -text
|
139 |
+
Data/Tsukuyomi/wavs/VOICEACTRESS100_008.wav filter=lfs diff=lfs merge=lfs -text
|
140 |
+
Data/Tsukuyomi/wavs/VOICEACTRESS100_012.wav filter=lfs diff=lfs merge=lfs -text
|
141 |
+
Data/Tsukuyomi/wavs/VOICEACTRESS100_015.wav filter=lfs diff=lfs merge=lfs -text
|
142 |
+
Data/Tsukuyomi/wavs/VOICEACTRESS100_010.wav filter=lfs diff=lfs merge=lfs -text
|
Data/Tsukuyomi/wavs/VOICEACTRESS100_008.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:897ec3b3656b97df031d7f638a74cc1e2fd172291dd0b6b721023cf4b6340b29
|
3 |
+
size 2793632
|
Data/Tsukuyomi/wavs/VOICEACTRESS100_009.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea6d3671bd12e929c5fda310e9743e6eb3bc337c7f38b75a556184d678b6bf9d
|
3 |
+
size 2202512
|
Data/Tsukuyomi/wavs/VOICEACTRESS100_010.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f15ab1a7766631867d53e775ced0681725e95d61be410df45a949354682c58a
|
3 |
+
size 1632880
|
Data/Tsukuyomi/wavs/VOICEACTRESS100_011.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c89253932ca51cb487af8d8101b66d3089499e16c2bbdc9c1164c402200e4fe5
|
3 |
+
size 2354984
|
Data/Tsukuyomi/wavs/VOICEACTRESS100_012.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc314a221e398fade4ea5494194753657bad97d04357eab2350a1fae836cd535
|
3 |
+
size 1988680
|
Data/Tsukuyomi/wavs/VOICEACTRESS100_015.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:670862e969693ba0f52ddd0bc72d9abafdcc5ef8d91d6cff27e83d9e316df50d
|
3 |
+
size 1622784
|
Models/Tsukuyomi_FineTuned/config_ft.yml
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
log_dir: "Models/Tsukuyomi_FineTuned"
|
2 |
+
save_freq: 1
|
3 |
+
log_interval: 10
|
4 |
+
device: "cuda"
|
5 |
+
epochs: 50 # number of finetuning epoch (1 hour of data)
|
6 |
+
batch_size: 3
|
7 |
+
max_len: 2500 # maximum number of frames
|
8 |
+
pretrained_model: "Models/Tsukasa/Top_ckpt_24khz.pth"
|
9 |
+
second_stage_load_pretrained: true # set to true if the pre-trained model is for 2nd stage
|
10 |
+
load_only_params: true # set to true if do not want to load epoch numbers and optimizer parameters
|
11 |
+
|
12 |
+
F0_path: "Utils/JDC/bst.t7"
|
13 |
+
ASR_config: "Utils/ASR/config.yml"
|
14 |
+
ASR_path: "Utils/ASR/bst_00080.pth"
|
15 |
+
|
16 |
+
PLBERT_dir: 'Utils/PLBERT/'
|
17 |
+
|
18 |
+
data_params:
|
19 |
+
train_data: "Data/Tsukuyomi/transcripts.txt" # トレーニングデータのパス
|
20 |
+
val_data: "Data/Tsukuyomi/validation.txt" # 検証データのパス
|
21 |
+
root_path: "Data/Tsukuyomi/wavs/" # 音声ファイルのパス
|
22 |
+
OOD_data: null # OODデータがない場合は空欄
|
23 |
+
min_length: 50
|
24 |
+
|
25 |
+
|
26 |
+
preprocess_params:
|
27 |
+
sr: 24000
|
28 |
+
spect_params:
|
29 |
+
n_fft: 2048
|
30 |
+
win_length: 1200
|
31 |
+
hop_length: 300
|
32 |
+
|
33 |
+
model_params:
|
34 |
+
multispeaker: false # 単一話者の場合は false
|
35 |
+
|
36 |
+
dim_in: 64
|
37 |
+
hidden_dim: 512
|
38 |
+
max_conv_dim: 512
|
39 |
+
n_layer: 3
|
40 |
+
n_mels: 80
|
41 |
+
|
42 |
+
n_token: 178 # number of phoneme tokens
|
43 |
+
max_dur: 50 # maximum duration of a single phoneme
|
44 |
+
style_dim: 128 # style vector size
|
45 |
+
|
46 |
+
dropout: 0.2
|
47 |
+
|
48 |
+
decoder:
|
49 |
+
type: 'istftnet' # either hifigan or istftnet
|
50 |
+
resblock_kernel_sizes: [3,7,11]
|
51 |
+
upsample_rates : [10, 6]
|
52 |
+
upsample_initial_channel: 512
|
53 |
+
resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]]
|
54 |
+
upsample_kernel_sizes: [20, 12]
|
55 |
+
gen_istft_n_fft: 20
|
56 |
+
gen_istft_hop_size: 5
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
# speech language model config
|
61 |
+
slm:
|
62 |
+
model: 'Respair/Whisper_Large_v2_Encoder_Block' # The model itself is hardcoded, change it through -> losses.py
|
63 |
+
sr: 16000 # sampling rate of SLM
|
64 |
+
hidden: 1280 # hidden size of SLM
|
65 |
+
nlayers: 33 # number of layers of SLM
|
66 |
+
initial_channel: 64 # initial channels of SLM discriminator head
|
67 |
+
|
68 |
+
# style diffusion model config
|
69 |
+
diffusion:
|
70 |
+
embedding_mask_proba: 0.1
|
71 |
+
# transformer config
|
72 |
+
transformer:
|
73 |
+
num_layers: 3
|
74 |
+
num_heads: 8
|
75 |
+
head_features: 64
|
76 |
+
multiplier: 2
|
77 |
+
|
78 |
+
# diffusion distribution config
|
79 |
+
dist:
|
80 |
+
sigma_data: 0.2 # placeholder for estimate_sigma_data set to false
|
81 |
+
estimate_sigma_data: true # estimate sigma_data from the current batch if set to true
|
82 |
+
mean: -3.0
|
83 |
+
std: 1.0
|
84 |
+
|
85 |
+
loss_params:
|
86 |
+
lambda_mel: 10. # mel reconstruction loss
|
87 |
+
lambda_gen: 1. # generator loss
|
88 |
+
lambda_slm: 1. # slm feature matching loss
|
89 |
+
|
90 |
+
lambda_mono: 1. # monotonic alignment loss (1st stage, TMA)
|
91 |
+
lambda_s2s: 1. # sequence-to-sequence loss (1st stage, TMA)
|
92 |
+
TMA_epoch: 9 # TMA starting epoch (1st stage)
|
93 |
+
|
94 |
+
lambda_F0: 1. # F0 reconstruction loss (2nd stage)
|
95 |
+
lambda_norm: 1. # norm reconstruction loss (2nd stage)
|
96 |
+
lambda_dur: 1. # duration loss (2nd stage)
|
97 |
+
lambda_ce: 20. # duration predictor probability output CE loss (2nd stage)
|
98 |
+
lambda_sty: 1. # style reconstruction loss (2nd stage)
|
99 |
+
lambda_diff: 1. # score matching loss (2nd stage)
|
100 |
+
|
101 |
+
diff_epoch: 0 # style diffusion starting epoch (2nd stage)
|
102 |
+
joint_epoch: 30 # joint training starting epoch (2nd stage)
|
103 |
+
|
104 |
+
optimizer_params:
|
105 |
+
lr: 0.0001 # general learning rate
|
106 |
+
bert_lr: 0.00001 # learning rate for PLBERT
|
107 |
+
ft_lr: 0.00001 # learning rate for acoustic modules
|
108 |
+
|
109 |
+
slmadv_params:
|
110 |
+
min_len: 400 # minimum length of samples
|
111 |
+
max_len: 500 # maximum length of samples
|
112 |
+
batch_percentage: 0.5 # to prevent out of memory, only use half of the original batch size
|
113 |
+
iter: 20 # update the discriminator every this iterations of generator update
|
114 |
+
thresh: 5 # gradient norm above which the gradient is scaled
|
115 |
+
scale: 0.01 # gradient scaling factor for predictors from SLM discriminators
|
116 |
+
sig: 1.5 # sigma for differentiable duration modeling
|
Models/Tsukuyomi_FineTuned/train.log
ADDED
File without changes
|
Modules/KotoDama_sampler.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForSequenceClassification, PreTrainedModel, AutoConfig, AutoModel, AutoTokenizer
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
from text_utils import TextCleaner
|
5 |
+
textclenaer = TextCleaner()
|
6 |
+
|
7 |
+
|
8 |
+
def length_to_mask(lengths):
|
9 |
+
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
10 |
+
mask = torch.gt(mask+1, lengths.unsqueeze(1))
|
11 |
+
return mask
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
17 |
+
|
18 |
+
|
19 |
+
# tokenizer_koto_prompt = AutoTokenizer.from_pretrained("google/mt5-small", trust_remote_code=True)
|
20 |
+
tokenizer_koto_prompt = AutoTokenizer.from_pretrained("ku-nlp/deberta-v3-base-japanese", trust_remote_code=True)
|
21 |
+
tokenizer_koto_text = AutoTokenizer.from_pretrained("line-corporation/line-distilbert-base-japanese", trust_remote_code=True)
|
22 |
+
|
23 |
+
class KotoDama_Prompt(PreTrainedModel):
|
24 |
+
|
25 |
+
def __init__(self, config):
|
26 |
+
super().__init__(config)
|
27 |
+
|
28 |
+
self.backbone = AutoModel.from_config(config)
|
29 |
+
|
30 |
+
self.output = nn.Sequential(nn.Linear(config.hidden_size, 512),
|
31 |
+
nn.LeakyReLU(0.2),
|
32 |
+
nn.Linear(512, config.num_labels))
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
def forward(
|
37 |
+
self,
|
38 |
+
input_ids,
|
39 |
+
attention_mask=None,
|
40 |
+
token_type_ids=None,
|
41 |
+
position_ids=None,
|
42 |
+
labels=None,
|
43 |
+
):
|
44 |
+
outputs = self.backbone(
|
45 |
+
input_ids,
|
46 |
+
attention_mask=attention_mask,
|
47 |
+
token_type_ids=token_type_ids,
|
48 |
+
position_ids=position_ids,
|
49 |
+
)
|
50 |
+
|
51 |
+
|
52 |
+
sequence_output = outputs.last_hidden_state[:, 0, :]
|
53 |
+
outputs = self.output(sequence_output)
|
54 |
+
|
55 |
+
# if labels, then we are training
|
56 |
+
loss = None
|
57 |
+
if labels is not None:
|
58 |
+
|
59 |
+
loss_fn = nn.MSELoss()
|
60 |
+
# labels = labels.unsqueeze(1)
|
61 |
+
loss = loss_fn(outputs, labels)
|
62 |
+
|
63 |
+
return {
|
64 |
+
"loss": loss,
|
65 |
+
"logits": outputs
|
66 |
+
}
|
67 |
+
|
68 |
+
|
69 |
+
class KotoDama_Text(PreTrainedModel):
|
70 |
+
|
71 |
+
def __init__(self, config):
|
72 |
+
super().__init__(config)
|
73 |
+
|
74 |
+
self.backbone = AutoModel.from_config(config)
|
75 |
+
|
76 |
+
self.output = nn.Sequential(nn.Linear(config.hidden_size, 512),
|
77 |
+
nn.LeakyReLU(0.2),
|
78 |
+
nn.Linear(512, config.num_labels))
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
def forward(
|
83 |
+
self,
|
84 |
+
input_ids,
|
85 |
+
attention_mask=None,
|
86 |
+
# token_type_ids=None,
|
87 |
+
# position_ids=None,
|
88 |
+
labels=None,
|
89 |
+
):
|
90 |
+
outputs = self.backbone(
|
91 |
+
input_ids,
|
92 |
+
attention_mask=attention_mask,
|
93 |
+
# token_type_ids=token_type_ids,
|
94 |
+
# position_ids=position_ids,
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
sequence_output = outputs.last_hidden_state[:, 0, :]
|
99 |
+
outputs = self.output(sequence_output)
|
100 |
+
|
101 |
+
# if labels, then we are training
|
102 |
+
loss = None
|
103 |
+
if labels is not None:
|
104 |
+
|
105 |
+
loss_fn = nn.MSELoss()
|
106 |
+
# labels = labels.unsqueeze(1)
|
107 |
+
loss = loss_fn(outputs, labels)
|
108 |
+
|
109 |
+
return {
|
110 |
+
"loss": loss,
|
111 |
+
"logits": outputs
|
112 |
+
}
|
113 |
+
|
114 |
+
|
115 |
+
def inference(model, diffusion_sampler, text=None, ref_s=None, alpha = 0.3, beta = 0.7, diffusion_steps=5, embedding_scale=1, rate_of_speech=1.):
|
116 |
+
|
117 |
+
tokens = textclenaer(text)
|
118 |
+
tokens.insert(0, 0)
|
119 |
+
tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)
|
120 |
+
|
121 |
+
with torch.no_grad():
|
122 |
+
input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)
|
123 |
+
|
124 |
+
text_mask = length_to_mask(input_lengths).to(device)
|
125 |
+
|
126 |
+
t_en = model.text_encoder(tokens, input_lengths, text_mask)
|
127 |
+
bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())
|
128 |
+
d_en = model.bert_encoder(bert_dur).transpose(-1, -2)
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
s_pred = diffusion_sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device),
|
133 |
+
embedding=bert_dur,
|
134 |
+
embedding_scale=embedding_scale,
|
135 |
+
features=ref_s, # reference from the same speaker as the embedding
|
136 |
+
num_steps=diffusion_steps).squeeze(1)
|
137 |
+
|
138 |
+
|
139 |
+
s = s_pred[:, 128:]
|
140 |
+
ref = s_pred[:, :128]
|
141 |
+
|
142 |
+
ref = alpha * ref + (1 - alpha) * ref_s[:, :128]
|
143 |
+
s = beta * s + (1 - beta) * ref_s[:, 128:]
|
144 |
+
|
145 |
+
d = model.predictor.text_encoder(d_en,
|
146 |
+
s, input_lengths, text_mask)
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
x = model.predictor.lstm(d)
|
151 |
+
x_mod = model.predictor.prepare_projection(x)
|
152 |
+
duration = model.predictor.duration_proj(x_mod)
|
153 |
+
|
154 |
+
|
155 |
+
duration = torch.sigmoid(duration).sum(axis=-1) / rate_of_speech
|
156 |
+
|
157 |
+
pred_dur = torch.round(duration.squeeze()).clamp(min=1)
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))
|
162 |
+
|
163 |
+
c_frame = 0
|
164 |
+
for i in range(pred_aln_trg.size(0)):
|
165 |
+
pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1
|
166 |
+
c_frame += int(pred_dur[i].data)
|
167 |
+
|
168 |
+
# encode prosody
|
169 |
+
en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))
|
170 |
+
|
171 |
+
|
172 |
+
|
173 |
+
F0_pred, N_pred = model.predictor.F0Ntrain(en, s)
|
174 |
+
|
175 |
+
asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))
|
176 |
+
|
177 |
+
|
178 |
+
out = model.decoder(asr,
|
179 |
+
F0_pred, N_pred, ref.squeeze().unsqueeze(0))
|
180 |
+
|
181 |
+
|
182 |
+
return out.squeeze().cpu().numpy()[..., :-50]
|
183 |
+
|
184 |
+
|
185 |
+
def Longform(model, diffusion_sampler, text, s_prev, ref_s, alpha = 0.3, beta = 0.7, t = 0.7, diffusion_steps=5, embedding_scale=1, rate_of_speech=1.0):
|
186 |
+
|
187 |
+
tokens = textclenaer(text)
|
188 |
+
tokens.insert(0, 0)
|
189 |
+
tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)
|
190 |
+
|
191 |
+
with torch.no_grad():
|
192 |
+
input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)
|
193 |
+
text_mask = length_to_mask(input_lengths).to(device)
|
194 |
+
|
195 |
+
t_en = model.text_encoder(tokens, input_lengths, text_mask)
|
196 |
+
bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())
|
197 |
+
d_en = model.bert_encoder(bert_dur).transpose(-1, -2)
|
198 |
+
|
199 |
+
s_pred = diffusion_sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device),
|
200 |
+
embedding=bert_dur,
|
201 |
+
embedding_scale=embedding_scale,
|
202 |
+
features=ref_s,
|
203 |
+
num_steps=diffusion_steps).squeeze(1)
|
204 |
+
|
205 |
+
if s_prev is not None:
|
206 |
+
# convex combination of previous and current style
|
207 |
+
s_pred = t * s_prev + (1 - t) * s_pred
|
208 |
+
|
209 |
+
s = s_pred[:, 128:]
|
210 |
+
ref = s_pred[:, :128]
|
211 |
+
|
212 |
+
ref = alpha * ref + (1 - alpha) * ref_s[:, :128]
|
213 |
+
s = beta * s + (1 - beta) * ref_s[:, 128:]
|
214 |
+
|
215 |
+
s_pred = torch.cat([ref, s], dim=-1)
|
216 |
+
|
217 |
+
d = model.predictor.text_encoder(d_en,
|
218 |
+
s, input_lengths, text_mask)
|
219 |
+
|
220 |
+
x = model.predictor.lstm(d)
|
221 |
+
x_mod = model.predictor.prepare_projection(x) # 640 -> 512
|
222 |
+
duration = model.predictor.duration_proj(x_mod)
|
223 |
+
|
224 |
+
duration = torch.sigmoid(duration).sum(axis=-1) / rate_of_speech
|
225 |
+
pred_dur = torch.round(duration.squeeze()).clamp(min=1)
|
226 |
+
|
227 |
+
|
228 |
+
pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))
|
229 |
+
c_frame = 0
|
230 |
+
for i in range(pred_aln_trg.size(0)):
|
231 |
+
pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1
|
232 |
+
c_frame += int(pred_dur[i].data)
|
233 |
+
|
234 |
+
# encode prosody
|
235 |
+
en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))
|
236 |
+
|
237 |
+
F0_pred, N_pred = model.predictor.F0Ntrain(en, s)
|
238 |
+
|
239 |
+
asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))
|
240 |
+
|
241 |
+
out = model.decoder(asr,
|
242 |
+
F0_pred, N_pred, ref.squeeze().unsqueeze(0))
|
243 |
+
|
244 |
+
|
245 |
+
return out.squeeze().cpu().numpy()[..., :-100], s_pred
|
246 |
+
|
247 |
+
|
248 |
+
def merge_short_elements(lst):
|
249 |
+
i = 0
|
250 |
+
while i < len(lst):
|
251 |
+
if i > 0 and len(lst[i]) < 10:
|
252 |
+
lst[i-1] += ' ' + lst[i]
|
253 |
+
lst.pop(i)
|
254 |
+
else:
|
255 |
+
i += 1
|
256 |
+
return lst
|
257 |
+
|
258 |
+
|
259 |
+
def merge_three(text_list, maxim=2):
|
260 |
+
|
261 |
+
merged_list = []
|
262 |
+
for i in range(0, len(text_list), maxim):
|
263 |
+
merged_text = ' '.join(text_list[i:i+maxim])
|
264 |
+
merged_list.append(merged_text)
|
265 |
+
return merged_list
|
266 |
+
|
267 |
+
|
268 |
+
def merging_sentences(lst):
|
269 |
+
return merge_three(merge_short_elements(lst))
|
Modules/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
Modules/__pycache__/KotoDama_sampler.cpython-311.pyc
ADDED
Binary file (14.3 kB). View file
|
|
Modules/__pycache__/KotoDama_sampler.cpython-39.pyc
ADDED
Binary file (5.99 kB). View file
|
|
Modules/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (178 Bytes). View file
|
|
Modules/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (155 Bytes). View file
|
|
Modules/__pycache__/discriminators.cpython-311.pyc
ADDED
Binary file (12.2 kB). View file
|
|
Modules/__pycache__/discriminators.cpython-39.pyc
ADDED
Binary file (6.17 kB). View file
|
|
Modules/__pycache__/hifigan.cpython-311.pyc
ADDED
Binary file (30 kB). View file
|
|
Modules/__pycache__/istftnet.cpython-311.pyc
ADDED
Binary file (34.4 kB). View file
|
|
Modules/__pycache__/istftnet.cpython-39.pyc
ADDED
Binary file (16.7 kB). View file
|
|
Modules/__pycache__/slmadv.cpython-311.pyc
ADDED
Binary file (13.7 kB). View file
|
|
Modules/__pycache__/slmadv.cpython-39.pyc
ADDED
Binary file (4.58 kB). View file
|
|
Modules/__pycache__/utils.cpython-311.pyc
ADDED
Binary file (1.18 kB). View file
|
|
Modules/__pycache__/utils.cpython-39.pyc
ADDED
Binary file (751 Bytes). View file
|
|
Modules/diffusion/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (188 Bytes). View file
|
|
Modules/diffusion/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (165 Bytes). View file
|
|
Modules/diffusion/__pycache__/diffusion.cpython-311.pyc
ADDED
Binary file (5.55 kB). View file
|
|
Modules/diffusion/__pycache__/diffusion.cpython-39.pyc
ADDED
Binary file (3.63 kB). View file
|
|
Modules/diffusion/__pycache__/modules.cpython-311.pyc
ADDED
Binary file (32.8 kB). View file
|
|
Modules/diffusion/__pycache__/modules.cpython-39.pyc
ADDED
Binary file (16.2 kB). View file
|
|
Modules/diffusion/__pycache__/sampler.cpython-311.pyc
ADDED
Binary file (37.8 kB). View file
|
|
Modules/diffusion/__pycache__/sampler.cpython-39.pyc
ADDED
Binary file (21.7 kB). View file
|
|
Modules/diffusion/__pycache__/utils.cpython-311.pyc
ADDED
Binary file (5.87 kB). View file
|
|
Modules/diffusion/__pycache__/utils.cpython-39.pyc
ADDED
Binary file (3.53 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (921 Bytes). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (676 Bytes). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/components.cpython-311.pyc
ADDED
Binary file (9.72 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/components.cpython-39.pyc
ADDED
Binary file (5.66 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/diffusion.cpython-311.pyc
ADDED
Binary file (22.5 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/diffusion.cpython-39.pyc
ADDED
Binary file (12.5 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/models.cpython-311.pyc
ADDED
Binary file (13.7 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/models.cpython-39.pyc
ADDED
Binary file (8.01 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/utils.cpython-311.pyc
ADDED
Binary file (8.28 kB). View file
|
|
Modules/diffusion/audio_diffusion_pytorch/__pycache__/utils.cpython-39.pyc
ADDED
Binary file (4.86 kB). View file
|
|
Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/.github/workflows/python-publish.yml
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This workflow will upload a Python Package using Twine when a release is created
|
2 |
+
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
|
3 |
+
|
4 |
+
# This workflow uses actions that are not certified by GitHub.
|
5 |
+
# They are provided by a third-party and are governed by
|
6 |
+
# separate terms of service, privacy policy, and support
|
7 |
+
# documentation.
|
8 |
+
|
9 |
+
name: Upload Python Package
|
10 |
+
|
11 |
+
on:
|
12 |
+
release:
|
13 |
+
types: [published]
|
14 |
+
|
15 |
+
permissions:
|
16 |
+
contents: read
|
17 |
+
|
18 |
+
jobs:
|
19 |
+
deploy:
|
20 |
+
|
21 |
+
runs-on: ubuntu-latest
|
22 |
+
|
23 |
+
steps:
|
24 |
+
- uses: actions/checkout@v3
|
25 |
+
- name: Set up Python
|
26 |
+
uses: actions/setup-python@v3
|
27 |
+
with:
|
28 |
+
python-version: '3.x'
|
29 |
+
- name: Install dependencies
|
30 |
+
run: |
|
31 |
+
python -m pip install --upgrade pip
|
32 |
+
pip install build
|
33 |
+
- name: Build package
|
34 |
+
run: python -m build
|
35 |
+
- name: Publish package
|
36 |
+
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
|
37 |
+
with:
|
38 |
+
user: __token__
|
39 |
+
password: ${{ secrets.PYPI_API_TOKEN }}
|
Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
.mypy_cache
|
Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/.pre-commit-config.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v2.3.0
|
4 |
+
hooks:
|
5 |
+
- id: end-of-file-fixer
|
6 |
+
- id: trailing-whitespace
|
7 |
+
|
8 |
+
# Formats code correctly
|
9 |
+
- repo: https://github.com/psf/black
|
10 |
+
rev: 21.12b0
|
11 |
+
hooks:
|
12 |
+
- id: black
|
13 |
+
args: [
|
14 |
+
'--experimental-string-processing'
|
15 |
+
]
|
16 |
+
|
17 |
+
# Sorts imports
|
18 |
+
- repo: https://github.com/pycqa/isort
|
19 |
+
rev: 5.10.1
|
20 |
+
hooks:
|
21 |
+
- id: isort
|
22 |
+
name: isort (python)
|
23 |
+
args: ["--profile", "black"]
|
24 |
+
|
25 |
+
# Checks unused imports, like lengths, etc
|
26 |
+
- repo: https://gitlab.com/pycqa/flake8
|
27 |
+
rev: 4.0.0
|
28 |
+
hooks:
|
29 |
+
- id: flake8
|
30 |
+
args: [
|
31 |
+
'--per-file-ignores=__init__.py:F401',
|
32 |
+
'--max-line-length=88',
|
33 |
+
'--ignore=E203,W503'
|
34 |
+
]
|
35 |
+
|
36 |
+
# Checks types
|
37 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
38 |
+
rev: 'v0.971'
|
39 |
+
hooks:
|
40 |
+
- id: mypy
|
41 |
+
additional_dependencies: [data-science-types>=0.2, torch>=1.6]
|
Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2022 archinet.ai
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/LOGO.png
ADDED
![]() |
Modules/diffusion/reconstruction_head/audio-diffusion-pytorch/README.md
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<img src="./LOGO.png"></img>
|
2 |
+
|
3 |
+
A fully featured audio diffusion library, for PyTorch. Includes models for unconditional audio generation, text-conditional audio generation, diffusion autoencoding, upsampling, and vocoding. The provided models are waveform-based, however, the U-Net (built using [`a-unet`](https://github.com/archinetai/a-unet)), `DiffusionModel`, diffusion method, and diffusion samplers are both generic to any dimension and highly customizable to work on other formats. **Notes: (1) no pre-trained models are provided here, (2) the configs shown are indicative and untested, see [Moûsai](https://arxiv.org/abs/2301.11757) for the configs used in the paper.**
|
4 |
+
|
5 |
+
|
6 |
+
## Install
|
7 |
+
|
8 |
+
```bash
|
9 |
+
pip install audio-diffusion-pytorch
|
10 |
+
```
|
11 |
+
|
12 |
+
[](https://pypi.org/project/audio-diffusion-pytorch/)
|
13 |
+
[](https://pepy.tech/project/audio-diffusion-pytorch)
|
14 |
+
|
15 |
+
|
16 |
+
## Usage
|
17 |
+
|
18 |
+
### Unconditional Generator
|
19 |
+
|
20 |
+
```py
|
21 |
+
from audio_diffusion_pytorch import DiffusionModel, UNetV0, VDiffusion, VSampler
|
22 |
+
|
23 |
+
model = DiffusionModel(
|
24 |
+
net_t=UNetV0, # The model type used for diffusion (U-Net V0 in this case)
|
25 |
+
in_channels=2, # U-Net: number of input/output (audio) channels
|
26 |
+
channels=[8, 32, 64, 128, 256, 512, 512, 1024, 1024], # U-Net: channels at each layer
|
27 |
+
factors=[1, 4, 4, 4, 2, 2, 2, 2, 2], # U-Net: downsampling and upsampling factors at each layer
|
28 |
+
items=[1, 2, 2, 2, 2, 2, 2, 4, 4], # U-Net: number of repeating items at each layer
|
29 |
+
attentions=[0, 0, 0, 0, 0, 1, 1, 1, 1], # U-Net: attention enabled/disabled at each layer
|
30 |
+
attention_heads=8, # U-Net: number of attention heads per attention item
|
31 |
+
attention_features=64, # U-Net: number of attention features per attention item
|
32 |
+
diffusion_t=VDiffusion, # The diffusion method used
|
33 |
+
sampler_t=VSampler, # The diffusion sampler used
|
34 |
+
)
|
35 |
+
|
36 |
+
# Train model with audio waveforms
|
37 |
+
audio = torch.randn(1, 2, 2**18) # [batch_size, in_channels, length]
|
38 |
+
loss = model(audio)
|
39 |
+
loss.backward()
|
40 |
+
|
41 |
+
# Turn noise into new audio sample with diffusion
|
42 |
+
noise = torch.randn(1, 2, 2**18) # [batch_size, in_channels, length]
|
43 |
+
sample = model.sample(noise, num_steps=10) # Suggested num_steps 10-100
|
44 |
+
```
|
45 |
+
|
46 |
+
### Text-Conditional Generator
|
47 |
+
A text-to-audio diffusion model that conditions the generation with `t5-base` text embeddings, requires `pip install transformers`.
|
48 |
+
```py
|
49 |
+
from audio_diffusion_pytorch import DiffusionModel, UNetV0, VDiffusion, VSampler
|
50 |
+
|
51 |
+
model = DiffusionModel(
|
52 |
+
# ... same as unconditional model
|
53 |
+
use_text_conditioning=True, # U-Net: enables text conditioning (default T5-base)
|
54 |
+
use_embedding_cfg=True, # U-Net: enables classifier free guidance
|
55 |
+
embedding_max_length=64, # U-Net: text embedding maximum length (default for T5-base)
|
56 |
+
embedding_features=768, # U-Net: text mbedding features (default for T5-base)
|
57 |
+
cross_attentions=[0, 0, 0, 1, 1, 1, 1, 1, 1], # U-Net: cross-attention enabled/disabled at each layer
|
58 |
+
)
|
59 |
+
|
60 |
+
# Train model with audio waveforms
|
61 |
+
audio_wave = torch.randn(1, 2, 2**18) # [batch, in_channels, length]
|
62 |
+
loss = model(
|
63 |
+
audio_wave,
|
64 |
+
text=['The audio description'], # Text conditioning, one element per batch
|
65 |
+
embedding_mask_proba=0.1 # Probability of masking text with learned embedding (Classifier-Free Guidance Mask)
|
66 |
+
)
|
67 |
+
loss.backward()
|
68 |
+
|
69 |
+
# Turn noise into new audio sample with diffusion
|
70 |
+
noise = torch.randn(1, 2, 2**18)
|
71 |
+
sample = model.sample(
|
72 |
+
noise,
|
73 |
+
text=['The audio description'],
|
74 |
+
embedding_scale=5.0, # Higher for more text importance, suggested range: 1-15 (Classifier-Free Guidance Scale)
|
75 |
+
num_steps=2 # Higher for better quality, suggested num_steps: 10-100
|
76 |
+
)
|
77 |
+
```
|
78 |
+
|
79 |
+
### Diffusion Upsampler
|
80 |
+
Upsample audio from a lower sample rate to higher sample rate using diffusion, e.g. 3kHz to 48kHz.
|
81 |
+
```py
|
82 |
+
from audio_diffusion_pytorch import DiffusionUpsampler, UNetV0, VDiffusion, VSampler
|
83 |
+
|
84 |
+
upsampler = DiffusionUpsampler(
|
85 |
+
net_t=UNetV0, # The model type used for diffusion
|
86 |
+
upsample_factor=16, # The upsample factor (e.g. 16 can be used for 3kHz to 48kHz)
|
87 |
+
in_channels=2, # U-Net: number of input/output (audio) channels
|
88 |
+
channels=[8, 32, 64, 128, 256, 512, 512, 1024, 1024], # U-Net: channels at each layer
|
89 |
+
factors=[1, 4, 4, 4, 2, 2, 2, 2, 2], # U-Net: downsampling and upsampling factors at each layer
|
90 |
+
items=[1, 2, 2, 2, 2, 2, 2, 4, 4], # U-Net: number of repeating items at each layer
|
91 |
+
diffusion_t=VDiffusion, # The diffusion method used
|
92 |
+
sampler_t=VSampler, # The diffusion sampler used
|
93 |
+
)
|
94 |
+
|
95 |
+
# Train model with high sample rate audio waveforms
|
96 |
+
audio = torch.randn(1, 2, 2**18) # [batch, in_channels, length]
|
97 |
+
loss = upsampler(audio)
|
98 |
+
loss.backward()
|
99 |
+
|
100 |
+
# Turn low sample rate audio into high sample rate
|
101 |
+
downsampled_audio = torch.randn(1, 2, 2**14) # [batch, in_channels, length]
|
102 |
+
sample = upsampler.sample(downsampled_audio, num_steps=10) # Output has shape: [1, 2, 2**18]
|
103 |
+
```
|
104 |
+
|
105 |
+
### Diffusion Vocoder
|
106 |
+
Convert a mel-spectrogram to wavefrom using diffusion.
|
107 |
+
```py
|
108 |
+
from audio_diffusion_pytorch import DiffusionVocoder, UNetV0, VDiffusion, VSampler
|
109 |
+
|
110 |
+
vocoder = DiffusionVocoder(
|
111 |
+
mel_n_fft=1024, # Mel-spectrogram n_fft
|
112 |
+
mel_channels=80, # Mel-spectrogram channels
|
113 |
+
mel_sample_rate=48000, # Mel-spectrogram sample rate
|
114 |
+
mel_normalize_log=True, # Mel-spectrogram log normalization (alternative is mel_normalize=True for [-1,1] power normalization)
|
115 |
+
net_t=UNetV0, # The model type used for diffusion vocoding
|
116 |
+
channels=[8, 32, 64, 128, 256, 512, 512, 1024, 1024], # U-Net: channels at each layer
|
117 |
+
factors=[1, 4, 4, 4, 2, 2, 2, 2, 2], # U-Net: downsampling and upsampling factors at each layer
|
118 |
+
items=[1, 2, 2, 2, 2, 2, 2, 4, 4], # U-Net: number of repeating items at each layer
|
119 |
+
diffusion_t=VDiffusion, # The diffusion method used
|
120 |
+
sampler_t=VSampler, # The diffusion sampler used
|
121 |
+
)
|
122 |
+
|
123 |
+
# Train model on waveforms (automatically converted to mel internally)
|
124 |
+
audio = torch.randn(1, 2, 2**18) # [batch, in_channels, length]
|
125 |
+
loss = vocoder(audio)
|
126 |
+
loss.backward()
|
127 |
+
|
128 |
+
# Turn mel spectrogram into waveform
|
129 |
+
mel_spectrogram = torch.randn(1, 2, 80, 1024) # [batch, in_channels, mel_channels, mel_length]
|
130 |
+
sample = vocoder.sample(mel_spectrogram, num_steps=10) # Output has shape: [1, 2, 2**18]
|
131 |
+
```
|
132 |
+
|
133 |
+
### Diffusion Autoencoder
|
134 |
+
Autoencode audio into a compressed latent using diffusion. Any encoder can be provided as long as it subclasses the `EncoderBase` class or contains an `out_channels` and `downsample_factor` field.
|
135 |
+
```py
|
136 |
+
from audio_diffusion_pytorch import DiffusionAE, UNetV0, VDiffusion, VSampler
|
137 |
+
from audio_encoders_pytorch import MelE1d, TanhBottleneck
|
138 |
+
|
139 |
+
autoencoder = DiffusionAE(
|
140 |
+
encoder=MelE1d( # The encoder used, in this case a mel-spectrogram encoder
|
141 |
+
in_channels=2,
|
142 |
+
channels=512,
|
143 |
+
multipliers=[1, 1],
|
144 |
+
factors=[2],
|
145 |
+
num_blocks=[12],
|
146 |
+
out_channels=32,
|
147 |
+
mel_channels=80,
|
148 |
+
mel_sample_rate=48000,
|
149 |
+
mel_normalize_log=True,
|
150 |
+
bottleneck=TanhBottleneck(),
|
151 |
+
),
|
152 |
+
inject_depth=6,
|
153 |
+
net_t=UNetV0, # The model type used for diffusion upsampling
|
154 |
+
in_channels=2, # U-Net: number of input/output (audio) channels
|
155 |
+
channels=[8, 32, 64, 128, 256, 512, 512, 1024, 1024], # U-Net: channels at each layer
|
156 |
+
factors=[1, 4, 4, 4, 2, 2, 2, 2, 2], # U-Net: downsampling and upsampling factors at each layer
|
157 |
+
items=[1, 2, 2, 2, 2, 2, 2, 4, 4], # U-Net: number of repeating items at each layer
|
158 |
+
diffusion_t=VDiffusion, # The diffusion method used
|
159 |
+
sampler_t=VSampler, # The diffusion sampler used
|
160 |
+
)
|
161 |
+
|
162 |
+
# Train autoencoder with audio samples
|
163 |
+
audio = torch.randn(1, 2, 2**18) # [batch, in_channels, length]
|
164 |
+
loss = autoencoder(audio)
|
165 |
+
loss.backward()
|
166 |
+
|
167 |
+
# Encode/decode audio
|
168 |
+
audio = torch.randn(1, 2, 2**18) # [batch, in_channels, length]
|
169 |
+
latent = autoencoder.encode(audio) # Encode
|
170 |
+
sample = autoencoder.decode(latent, num_steps=10) # Decode by sampling diffusion model conditioning on latent
|
171 |
+
```
|
172 |
+
|
173 |
+
## Other
|
174 |
+
|
175 |
+
### Inpainting
|
176 |
+
```py
|
177 |
+
from audio_diffusion_pytorch import UNetV0, VInpainter
|
178 |
+
|
179 |
+
# The diffusion UNetV0 (this is an example, the net must be trained to work)
|
180 |
+
net = UNetV0(
|
181 |
+
dim=1,
|
182 |
+
in_channels=2, # U-Net: number of input/output (audio) channels
|
183 |
+
channels=[8, 32, 64, 128, 256, 512, 512, 1024, 1024], # U-Net: channels at each layer
|
184 |
+
factors=[1, 4, 4, 4, 2, 2, 2, 2, 2], # U-Net: downsampling and upsampling factors at each layer
|
185 |
+
items=[1, 2, 2, 2, 2, 2, 2, 4, 4], # U-Net: number of repeating items at each layer
|
186 |
+
attentions=[0, 0, 0, 0, 0, 1, 1, 1, 1], # U-Net: attention enabled/disabled at each layer
|
187 |
+
attention_heads=8, # U-Net: number of attention heads per attention block
|
188 |
+
attention_features=64, # U-Net: number of attention features per attention block,
|
189 |
+
)
|
190 |
+
|
191 |
+
# Instantiate inpainter with trained net
|
192 |
+
inpainter = VInpainter(net=net)
|
193 |
+
|
194 |
+
# Inpaint source
|
195 |
+
y = inpainter(
|
196 |
+
source=torch.randn(1, 2, 2**18), # Start source
|
197 |
+
mask=torch.randint(0, 2, (1, 2, 2 ** 18), dtype=torch.bool), # Set to `True` the parts you want to keep
|
198 |
+
num_steps=10, # Number of inpainting steps
|
199 |
+
num_resamples=2, # Number of resampling steps
|
200 |
+
show_progress=True,
|
201 |
+
) # [1, 2, 2 ** 18]
|
202 |
+
```
|
203 |
+
|
204 |
+
## Appreciation
|
205 |
+
|
206 |
+
* [StabilityAI](https://stability.ai/) for the compute, [Zach Evans](https://github.com/zqevans) and everyone else from [HarmonAI](https://www.harmonai.org/) for the interesting research discussions.
|
207 |
+
* [ETH Zurich](https://inf.ethz.ch/) for the resources, [Zhijing Jin](https://zhijing-jin.com/), [Bernhard Schoelkopf](https://is.mpg.de/~bs), and [Mrinmaya Sachan](http://www.mrinmaya.io/) for supervising this Thesis.
|
208 |
+
* [Phil Wang](https://github.com/lucidrains) for the beautiful open source contributions on [diffusion](https://github.com/lucidrains/denoising-diffusion-pytorch) and [Imagen](https://github.com/lucidrains/imagen-pytorch).
|
209 |
+
* [Katherine Crowson](https://github.com/crowsonkb) for the experiments with [k-diffusion](https://github.com/crowsonkb/k-diffusion) and the insane collection of samplers.
|
210 |
+
|
211 |
+
## Citations
|
212 |
+
|
213 |
+
DDPM Diffusion
|
214 |
+
```bibtex
|
215 |
+
@misc{2006.11239,
|
216 |
+
Author = {Jonathan Ho and Ajay Jain and Pieter Abbeel},
|
217 |
+
Title = {Denoising Diffusion Probabilistic Models},
|
218 |
+
Year = {2020},
|
219 |
+
Eprint = {arXiv:2006.11239},
|
220 |
+
}
|
221 |
+
```
|
222 |
+
|
223 |
+
DDIM (V-Sampler)
|
224 |
+
```bibtex
|
225 |
+
@misc{2010.02502,
|
226 |
+
Author = {Jiaming Song and Chenlin Meng and Stefano Ermon},
|
227 |
+
Title = {Denoising Diffusion Implicit Models},
|
228 |
+
Year = {2020},
|
229 |
+
Eprint = {arXiv:2010.02502},
|
230 |
+
}
|
231 |
+
```
|
232 |
+
|
233 |
+
V-Diffusion
|
234 |
+
```bibtex
|
235 |
+
@misc{2202.00512,
|
236 |
+
Author = {Tim Salimans and Jonathan Ho},
|
237 |
+
Title = {Progressive Distillation for Fast Sampling of Diffusion Models},
|
238 |
+
Year = {2022},
|
239 |
+
Eprint = {arXiv:2202.00512},
|
240 |
+
}
|
241 |
+
```
|
242 |
+
|
243 |
+
Imagen (T5 Text Conditioning)
|
244 |
+
```bibtex
|
245 |
+
@misc{2205.11487,
|
246 |
+
Author = {Chitwan Saharia and William Chan and Saurabh Saxena and Lala Li and Jay Whang and Emily Denton and Seyed Kamyar Seyed Ghasemipour and Burcu Karagol Ayan and S. Sara Mahdavi and Rapha Gontijo Lopes and Tim Salimans and Jonathan Ho and David J Fleet and Mohammad Norouzi},
|
247 |
+
Title = {Photorealistic Text-to-Image Diffusion Models with Deep Language Understanding},
|
248 |
+
Year = {2022},
|
249 |
+
Eprint = {arXiv:2205.11487},
|
250 |
+
}
|
251 |
+
```
|