DiffuSynthV0.2 / metrics /pipelines.py
WeixuanYuan's picture
Upload 66 files
ae1bdf7 verified
raw
history blame
7.96 kB
import librosa
import numpy as np
import torch
from tqdm import tqdm
from tools import VAE_out_put_to_spc, rms_normalize, nnData2Audio
from model.DiffSynthSampler import DiffSynthSampler
def sample_pipeline(device, uNet, VAE, MMM, CLAP_tokenizer,
positive_prompts, negative_prompts, batchsize, sample_steps, CFG, seed=None, duration=3.0,
freq_resolution=512, time_resolution=256, channels=4, VAE_scale=4, timesteps=1000, noise_strategy="repeat", sampler="ddim", return_latent=True):
height = int(freq_resolution/VAE_scale)
width = int(time_resolution/VAE_scale)
VAE_encoder, VAE_quantizer, VAE_decoder = VAE._encoder, VAE._vq_vae, VAE._decoder
text2sound_embedding = \
MMM.get_text_features(**CLAP_tokenizer([positive_prompts], padding=True, return_tensors="pt"))[0].to(device)
negative_condition = \
MMM.get_text_features(**CLAP_tokenizer([negative_prompts], padding=True, return_tensors="pt"))[0].to(device)
mySampler = DiffSynthSampler(timesteps, height=height, channels=channels, noise_strategy=noise_strategy, mute=True)
mySampler.activate_classifier_free_guidance(CFG, negative_condition)
mySampler.respace(list(np.linspace(0, timesteps - 1, sample_steps, dtype=np.int32)))
condition = text2sound_embedding.repeat(batchsize, 1)
latent_representations, initial_noise = \
mySampler.sample(model=uNet, shape=(batchsize, channels, height, width), seed=seed,
return_tensor=True, condition=condition, sampler=sampler)
latent_representations = latent_representations[-1]
quantized_latent_representations, _, (_, _, _) = VAE_quantizer(latent_representations)
if return_latent:
return quantized_latent_representations.detach()
reconstruction_batch = VAE_decoder(quantized_latent_representations).to("cpu").detach().numpy()
time_resolution = int(time_resolution * ((duration+1) / 4))
rec_signals = nnData2Audio(reconstruction_batch, resolution=(freq_resolution, time_resolution))
rec_signals = [rms_normalize(rec_signal) for rec_signal in rec_signals]
return quantized_latent_representations.detach(), reconstruction_batch, rec_signals
def sample_pipeline_GAN(device, gan_generator, VAE, MMM, CLAP_tokenizer,
positive_prompts, negative_prompts, batchsize, sample_steps, CFG, seed=None, duration=3.0,
freq_resolution=512, time_resolution=256, channels=4, VAE_scale=4, timesteps=1000, noise_strategy="repeat", sampler="ddim", return_latent=True):
height = int(freq_resolution/VAE_scale)
width = int(time_resolution/VAE_scale)
VAE_encoder, VAE_quantizer, VAE_decoder = VAE._encoder, VAE._vq_vae, VAE._decoder
text2sound_embedding = \
MMM.get_text_features(**CLAP_tokenizer([positive_prompts], padding=True, return_tensors="pt"))[0].to(device)
condition = text2sound_embedding.repeat(batchsize, 1)
noise = torch.randn(batchsize, channels, height, width).to(device)
latent_representations = gan_generator(noise, condition)
quantized_latent_representations, _, (_, _, _) = VAE_quantizer(latent_representations)
if return_latent:
return quantized_latent_representations.detach()
reconstruction_batch = VAE_decoder(quantized_latent_representations).to("cpu").detach().numpy()
time_resolution = int(time_resolution * ((duration+1) / 4))
rec_signals = nnData2Audio(reconstruction_batch, resolution=(freq_resolution, time_resolution))
rec_signals = [rms_normalize(rec_signal) for rec_signal in rec_signals]
return quantized_latent_representations.detach(), reconstruction_batch, rec_signals
def inpaint_pipeline(device, uNet, VAE, MMM, CLAP_tokenizer, use_dynamic_mask, noising_strength, guidance,
positive_prompts, negative_prompts, batchsize, sample_steps, CFG, seed=None, duration=3.0, mask_flexivity=0.99,
freq_resolution=512, time_resolution=256, channels=4, VAE_scale=4, timesteps=1000, noise_strategy="repeat", sampler="ddim", return_latent=True):
height = int(freq_resolution/VAE_scale)
width = int(time_resolution * ((duration + 1) / 4) / VAE_scale)
VAE_encoder, VAE_quantizer, VAE_decoder = VAE._encoder, VAE._vq_vae, VAE._decoder
text2sound_embedding = \
MMM.get_text_features(**CLAP_tokenizer([positive_prompts], padding=True, return_tensors="pt"))[0]
negative_condition = \
MMM.get_text_features(**CLAP_tokenizer([negative_prompts], padding=True, return_tensors="pt"))[0]
mySampler = DiffSynthSampler(timesteps, height=height, channels=channels, noise_strategy=noise_strategy, mute=True)
mySampler.activate_classifier_free_guidance(CFG, negative_condition)
mySampler.respace(list(np.linspace(0, timesteps - 1, sample_steps, dtype=np.int32)))
condition = text2sound_embedding.repeat(batchsize, 1)
guidance = guidance.repeat(batchsize, 1, 1, 1).to(device)
# mask = 1, freeze
latent_mask = torch.zeros((batchsize, 1, height, width), dtype=torch.float32).to(device)
latent_mask[:, :, :, -int(time_resolution * (1 / 4) / VAE_scale):] = 1.0
latent_representations, initial_noise = \
mySampler.inpaint_sample(model=uNet, shape=(batchsize, channels, height, width),
noising_strength=noising_strength,
guide_img=guidance, mask=latent_mask, return_tensor=True,
condition=condition, sampler=sampler,
use_dynamic_mask=use_dynamic_mask,
end_noise_level_ratio=0.0,
mask_flexivity=mask_flexivity)
latent_representations = latent_representations[-1]
quantized_latent_representations, _, (_, _, _) = VAE_quantizer(latent_representations)
if return_latent:
return quantized_latent_representations.detach()
reconstruction_batch = VAE_decoder(quantized_latent_representations).to("cpu").detach().numpy()
time_resolution = int(time_resolution * ((duration+1) / 4))
rec_signals = nnData2Audio(reconstruction_batch, resolution=(freq_resolution, time_resolution))
rec_signals = [rms_normalize(rec_signal) for rec_signal in rec_signals]
return quantized_latent_representations.detach(), reconstruction_batch, rec_signals
def generate_audios_with_diffuSynth_sample(device, uNet, VAE, MMM, CLAP_tokenizer, num_batches, positive_prompts, negative_prompts="", CFG=6, sample_steps=10):
diffuSynth_signals = []
for _ in tqdm(range(num_batches)):
_, _, signals = sample_pipeline(device, uNet, VAE, MMM, CLAP_tokenizer,
positive_prompts=positive_prompts, negative_prompts=negative_prompts,
batchsize=16, sample_steps=sample_steps, CFG=CFG, seed=None, return_latent=False)
diffuSynth_signals.extend(signals)
return np.array(diffuSynth_signals)
def generate_audios_with_diffuSynth_inpaint(device, uNet, VAE, MMM, CLAP_tokenizer, num_batches, guidance, duration, use_dynamic_mask, noising_strength, positive_prompts, negative_prompts="", CFG=6, sample_steps=10):
diffuSynth_signals = []
for _ in tqdm(range(num_batches)):
_, _, signals = inpaint_pipeline(device, uNet, VAE, MMM, CLAP_tokenizer,
use_dynamic_mask=use_dynamic_mask, noising_strength=noising_strength, guidance=guidance,
positive_prompts=positive_prompts, negative_prompts=negative_prompts, batchsize=16, sample_steps=sample_steps, CFG=CFG, seed=None, duration=duration, mask_flexivity=0.999,
return_latent=False)
diffuSynth_signals.extend(signals)
return np.array(diffuSynth_signals)