File size: 1,917 Bytes
9f3e39f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
hydra:
run:
dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
datasets:
name: vin100h-preprocessed-v2 # dataset name
batch_size_per_gpu: 3200 # 1 GPUs, 1 * 3200 = 3200
batch_size_type: frame # frame | sample
max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
num_workers: 4
optim:
epochs: 80
learning_rate: 1e-5
num_warmup_updates: 2761 # warmup updates
grad_accumulation_steps: 2 # note: updates = steps / grad_accumulation_steps
max_grad_norm: 1.0 # gradient clipping
bnb_optimizer: False # use bnb 8bit AdamW optimizer or not
model:
name: vi_fine_tuned_t5_tts # model name
tokenizer: pinyin # tokenizer type
tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt)
backbone: DiT
arch:
dim: 1024
depth: 22
heads: 16
ff_mult: 2
text_dim: 512
text_mask_padding: False
conv_layers: 4
pe_attn_head: 1
checkpoint_activations: False # recompute activations and save memory for extra compute
mel_spec:
target_sample_rate: 24000
n_mel_channels: 100
hop_length: 256
win_length: 1024
n_fft: 1024
mel_spec_type: vocos # vocos | bigvgan
vocoder:
is_local: False # use local offline ckpt or not
local_path: null # local vocoder path
ckpts:
logger: null # wandb | tensorboard | null
log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples
save_per_updates: 4000 # save checkpoint per updates
keep_last_n_checkpoints: 1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
last_per_updates: 4000 # save last checkpoint per updates
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|