File size: 1,686 Bytes
4b3c600 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
defaults:
- _self_
- override hydra/launcher: submitit_slurm
compute:
ngpus: 1
nodes: 1
logging:
log_freq: 100
log_lr_every: ${logging.log_freq}
log_file_name: stdout.log
enable_wandb: True
entity: flows
project: flow_matching
group: null
data:
train: DNA-LLM/experiment_one_viral_genomes_train_set_v2
valid: DNA-LLM/experiment_one_viral_genomes_val_set_v2
cache_dir: /huggingface/
num_workers: 8
training:
batch_size: 64
snapshot: 2000 # 2000
eval_freq: 20000 # 20000
perplexity_freq: 200000 #2000
seed: 42
eval:
batch_size: 64
sample_batch_size: 16
perplexity: True
perplexity_batch_size: 16
optim:
weight_decay: 0.03
optimizer: AdamW
lr: 3e-4
beta1: 0.9
beta2: 0.95
eps: 1e-8
warmup: 2500
grad_clip: 1.
eta_min_ratio: 0.1
fused: false
n_iters: 1000000
log_lr_every: ${logging.log_lr_every}
flow:
source_distribution: uniform # [uniform, mask]
loss_function: cross_entropy # [cross_entropy, generalized_kl]
exponent: 1.
scheduler_type: polynomial
sampling_steps: 2048
model:
hidden_size: 768
cond_dim: 128
length: 2048
n_blocks: 12
n_heads: 12
dropout: 0.1
compile: true
hydra_dir: /user/hassanahmed.hassan/u12592/.project/dir.lustre-grete/learning-nucleoTIDEs/flow_matching-main/model_runs
hydra:
run:
dir: ${hydra_dir}/${now:%Y.%m.%d}/${now:%H%M%S}
sweep:
dir: ${hydra_dir}/${now:%Y.%m.%d}/${now:%H%M%S}
subdir: ${hydra.job.num}
launcher:
max_num_timeout: 100000
timeout_min: 4320
partition: learn
qos: # TODO: change it to your own qos
gpus_per_node: ${compute.ngpus}
mem_gb: 1760
cpus_per_task: 32
nodes: ${compute.nodes}
|