hparams cleanup
Browse files- hyperparams.yaml +2 -110
hyperparams.yaml
CHANGED
|
@@ -1,105 +1,24 @@
|
|
| 1 |
-
|
| 2 |
-
# /home/mila/s/subakany/speechbrain_new/recipes/LibriMix/separation/hparams/sepformer-libri2mix.yaml
|
| 3 |
-
# yamllint disable
|
| 4 |
# ################################
|
| 5 |
-
# Model: SepFormer for source separation
|
| 6 |
-
# https://arxiv.org/abs/2010.13154
|
| 7 |
# Dataset : Libri2mix
|
| 8 |
# ################################
|
| 9 |
-
#
|
| 10 |
-
# Basic parameters
|
| 11 |
-
# Seed needs to be set at top of yaml, before objects with parameters are made
|
| 12 |
-
#
|
| 13 |
-
seed: 1234
|
| 14 |
-
__set_seed: !apply:torch.manual_seed [1234]
|
| 15 |
-
|
| 16 |
-
# Data params
|
| 17 |
-
|
| 18 |
-
# e.g. '/yourpath/Libri2Mix/train-clean-360/'
|
| 19 |
-
# the data folder is needed even if dynamic mixing is applied
|
| 20 |
-
data_folder: /miniscratch/subakany/LibriMixData_new/Libri2Mix/
|
| 21 |
-
|
| 22 |
-
# this is the base folder for dynamic mixing
|
| 23 |
-
base_folder_dm: /miniscratch/subakany/LibriMixData_new/LibriSpeech/train-clean-360_processed/
|
| 24 |
-
|
| 25 |
-
experiment_name: sepformer-libri2mix
|
| 26 |
-
output_folder: results/sepformer-libri2mix/1234
|
| 27 |
-
train_log: results/sepformer-libri2mix/1234/train_log.txt
|
| 28 |
-
save_folder: results/sepformer-libri2mix/1234/save
|
| 29 |
-
train_data: results/sepformer-libri2mix/1234/save/libri2mix_train-360.csv
|
| 30 |
-
valid_data: results/sepformer-libri2mix/1234/save/libri2mix_dev.csv
|
| 31 |
-
test_data: results/sepformer-libri2mix/1234/save/libri2mix_test.csv
|
| 32 |
-
skip_prep: false
|
| 33 |
-
|
| 34 |
-
ckpt_interval_minutes: 60
|
| 35 |
|
| 36 |
# Experiment params
|
| 37 |
-
auto_mix_prec: true # Set it to True for mixed precision
|
| 38 |
-
test_only: true
|
| 39 |
num_spks: 2
|
| 40 |
-
progressbar: true
|
| 41 |
-
save_audio: false # Save estimated sources on disk
|
| 42 |
sample_rate: 8000
|
| 43 |
|
| 44 |
-
# Training parameters
|
| 45 |
-
N_epochs: 200
|
| 46 |
-
batch_size: 1
|
| 47 |
-
lr: 0.00015
|
| 48 |
-
clip_grad_norm: 5
|
| 49 |
-
loss_upper_lim: 999999 # this is the upper limit for an acceptable loss
|
| 50 |
-
# if True, the training sequences are cut to a specified length
|
| 51 |
-
limit_training_signal_len: false
|
| 52 |
-
# this is the length of sequences if we choose to limit
|
| 53 |
-
# the signal length of training sequences
|
| 54 |
-
training_signal_len: 32000000
|
| 55 |
-
|
| 56 |
-
# Set it to True to dynamically create mixtures at training time
|
| 57 |
-
dynamic_mixing: true
|
| 58 |
-
use_wham_noise: false
|
| 59 |
-
|
| 60 |
-
# Parameters for data augmentation
|
| 61 |
-
use_wavedrop: false
|
| 62 |
-
use_speedperturb: true
|
| 63 |
-
use_speedperturb_sameforeachsource: false
|
| 64 |
-
use_rand_shift: false
|
| 65 |
-
min_shift: -8000
|
| 66 |
-
max_shift: 8000
|
| 67 |
-
|
| 68 |
-
speedperturb: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
|
| 69 |
-
perturb_prob: 1.0
|
| 70 |
-
drop_freq_prob: 0.0
|
| 71 |
-
drop_chunk_prob: 0.0
|
| 72 |
-
sample_rate: 8000
|
| 73 |
-
speeds: [95, 100, 105]
|
| 74 |
-
|
| 75 |
-
wavedrop: !new:speechbrain.lobes.augment.TimeDomainSpecAugment
|
| 76 |
-
perturb_prob: 0.0
|
| 77 |
-
drop_freq_prob: 1.0
|
| 78 |
-
drop_chunk_prob: 1.0
|
| 79 |
-
sample_rate: 8000
|
| 80 |
-
|
| 81 |
-
# loss thresholding -- this thresholds the training loss
|
| 82 |
-
threshold_byloss: true
|
| 83 |
-
threshold: -30
|
| 84 |
-
|
| 85 |
# Encoder parameters
|
| 86 |
N_encoder_out: 256
|
| 87 |
out_channels: 256
|
| 88 |
kernel_size: 16
|
| 89 |
kernel_stride: 8
|
| 90 |
|
| 91 |
-
# Dataloader options
|
| 92 |
-
dataloader_opts:
|
| 93 |
-
batch_size: 1
|
| 94 |
-
num_workers: 0
|
| 95 |
-
|
| 96 |
-
|
| 97 |
# Specifying the network
|
| 98 |
Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder
|
| 99 |
kernel_size: 16
|
| 100 |
out_channels: 256
|
| 101 |
|
| 102 |
-
|
| 103 |
SBtfintra: &id001 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
|
| 104 |
num_layers: 8
|
| 105 |
d_model: 256
|
|
@@ -119,7 +38,6 @@ SBtfinter: &id002 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
|
|
| 119 |
norm_before: true
|
| 120 |
|
| 121 |
MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
|
| 122 |
-
|
| 123 |
num_spks: 2
|
| 124 |
in_channels: 256
|
| 125 |
out_channels: 256
|
|
@@ -138,36 +56,10 @@ Decoder: &id004 !new:speechbrain.lobes.models.dual_path.Decoder
|
|
| 138 |
stride: 8
|
| 139 |
bias: false
|
| 140 |
|
| 141 |
-
optimizer: !name:torch.optim.Adam
|
| 142 |
-
lr: 0.00015
|
| 143 |
-
weight_decay: 0
|
| 144 |
-
|
| 145 |
-
loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper
|
| 146 |
-
|
| 147 |
-
lr_scheduler: !new:speechbrain.nnet.schedulers.ReduceLROnPlateau
|
| 148 |
-
factor: 0.5
|
| 149 |
-
patience: 2
|
| 150 |
-
dont_halve_until_epoch: 5
|
| 151 |
-
|
| 152 |
-
epoch_counter: &id006 !new:speechbrain.utils.epoch_loop.EpochCounter
|
| 153 |
-
# lr_scheduler: !ref <lr_scheduler>
|
| 154 |
-
|
| 155 |
-
limit: 200
|
| 156 |
-
|
| 157 |
modules:
|
| 158 |
encoder: *id003
|
| 159 |
decoder: *id004
|
| 160 |
masknet: *id005
|
| 161 |
-
checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer
|
| 162 |
-
checkpoints_dir: results/sepformer-libri2mix/1234/save
|
| 163 |
-
recoverables:
|
| 164 |
-
encoder: *id003
|
| 165 |
-
decoder: *id004
|
| 166 |
-
masknet: *id005
|
| 167 |
-
counter: *id006
|
| 168 |
-
train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger
|
| 169 |
-
save_file: results/sepformer-libri2mix/1234/train_log.txt
|
| 170 |
-
|
| 171 |
|
| 172 |
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
|
| 173 |
loadables:
|
|
|
|
| 1 |
+
|
|
|
|
|
|
|
| 2 |
# ################################
|
| 3 |
+
# Model: Pretrained SepFormer for source separation
|
|
|
|
| 4 |
# Dataset : Libri2mix
|
| 5 |
# ################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Experiment params
|
|
|
|
|
|
|
| 8 |
num_spks: 2
|
|
|
|
|
|
|
| 9 |
sample_rate: 8000
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
# Encoder parameters
|
| 12 |
N_encoder_out: 256
|
| 13 |
out_channels: 256
|
| 14 |
kernel_size: 16
|
| 15 |
kernel_stride: 8
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
# Specifying the network
|
| 18 |
Encoder: &id003 !new:speechbrain.lobes.models.dual_path.Encoder
|
| 19 |
kernel_size: 16
|
| 20 |
out_channels: 256
|
| 21 |
|
|
|
|
| 22 |
SBtfintra: &id001 !new:speechbrain.lobes.models.dual_path.SBTransformerBlock
|
| 23 |
num_layers: 8
|
| 24 |
d_model: 256
|
|
|
|
| 38 |
norm_before: true
|
| 39 |
|
| 40 |
MaskNet: &id005 !new:speechbrain.lobes.models.dual_path.Dual_Path_Model
|
|
|
|
| 41 |
num_spks: 2
|
| 42 |
in_channels: 256
|
| 43 |
out_channels: 256
|
|
|
|
| 56 |
stride: 8
|
| 57 |
bias: false
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
modules:
|
| 60 |
encoder: *id003
|
| 61 |
decoder: *id004
|
| 62 |
masknet: *id005
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
|
| 65 |
loadables:
|