ovi054 commited on
Commit
aadfa78
·
verified ·
1 Parent(s): be5a559

Upload epoch20 diffusion model

Browse files
Files changed (3) hide show
  1. adapter_config.json +38 -0
  2. adapter_model.safetensors +3 -0
  3. wan.toml +135 -0
adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "ffn.0",
28
+ "o",
29
+ "k",
30
+ "v",
31
+ "ffn.2",
32
+ "q"
33
+ ],
34
+ "task_type": null,
35
+ "trainable_token_indices": null,
36
+ "use_dora": false,
37
+ "use_rslora": false
38
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33bd47ebca6e2f5ffad203880bc4deb9a7cc4ad567d14cc549469f2260f95670
3
+ size 306807976
wan.toml ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/teamspace/studios/this_studio/diffusion-pipe/data/output2'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/dataset.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 100
15
+ # Batch size of a single forward/backward pass for one GPU.
16
+ micro_batch_size_per_gpu = 1
17
+ # For mixed video / image training, you can have a different batch size for images.
18
+ #image_micro_batch_size_per_gpu = 4
19
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
20
+ pipeline_stages = 1
21
+ # Number of micro-batches sent through the pipeline for each training step.
22
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
23
+ gradient_accumulation_steps = 1
24
+ # Grad norm clipping.
25
+ gradient_clipping = 1.0
26
+ # Learning rate warmup.
27
+ warmup_steps = 100
28
+ # Force the learning rate to be this value, regardless of what the optimizer or anything else says.
29
+ # Can be used to change learning rate even when resuming from checkpoint.
30
+ #force_constant_lr = 1e-5
31
+
32
+ # Block swapping is supported for Wan, HunyuanVideo, Flux, and Chroma. This value controls the number
33
+ # of blocks kept offloaded to RAM. Increasing it lowers VRAM use, but has a performance penalty. The
34
+ # exactly performance penalty depends on the model and the type of training you are doing (e.g. images vs video).
35
+ # Block swapping only works for LoRA training, and requires pipeline_stages=1.
36
+ #blocks_to_swap = 20
37
+
38
+ # eval settings
39
+
40
+ eval_every_n_epochs = 1
41
+ eval_before_first_step = true
42
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
43
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
44
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
45
+ eval_micro_batch_size_per_gpu = 1
46
+ # Batch size for images when doing mixed image / video training. Will be micro_batch_size_per_gpu if not set.
47
+ #image_eval_micro_batch_size_per_gpu = 4
48
+ eval_gradient_accumulation_steps = 1
49
+ # If using block swap, you can disable it for eval. Eval uses less memory, so depending on block swapping amount you can maybe get away with
50
+ # doing this, and then eval is much faster.
51
+ #disable_block_swap_for_eval = true
52
+
53
+ # misc settings
54
+
55
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
56
+ save_every_n_epochs = 10
57
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
58
+ #checkpoint_every_n_epochs = 1
59
+ checkpoint_every_n_minutes = 120
60
+ # Always set to true unless you have a huge amount of VRAM.
61
+ # This can also be 'unsloth' to reduce VRAM even more, with a slight performance hit.
62
+ activation_checkpointing = true
63
+
64
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
65
+ partition_method = 'parameters'
66
+ # Alternatively you can use 'manual' in combination with partition_split, which specifies the split points for dividing
67
+ # layers between GPUs. For example, with two GPUs, partition_split=[10] puts layers 0-9 on GPU 0, and the rest on GPU 1.
68
+ # With three GPUs, partition_split=[10, 20] puts layers 0-9 on GPU 0, layers 10-19 on GPU 1, and the rest on GPU 2.
69
+ # Length of partition_split must be pipeline_stages-1.
70
+ #partition_split = [N]
71
+
72
+ # dtype for saving the LoRA or model, if different from training dtype
73
+ save_dtype = 'bfloat16'
74
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
75
+ caching_batch_size = 1
76
+ # How often deepspeed logs to console.
77
+ steps_per_print = 1
78
+ # How to extract video clips for training from a single input video file.
79
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
80
+ # number of frames for that bucket.
81
+ # single_beginning: one clip starting at the beginning of the video
82
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
83
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
84
+ # default is single_beginning
85
+ video_clip_mode = 'single_beginning'
86
+
87
+ # This is how you configure HunyuanVideo. Other models will be different. See docs/supported_models.md for
88
+ # details on the configuration and options for each model.
89
+ [model]
90
+ type = 'wan'
91
+ ckpt_path = '/teamspace/studios/this_studio/diffusion-pipe/models/Wan2.1-T2V-14B'
92
+ dtype = 'bfloat16'
93
+ # You can use fp8 for the transformer when training LoRA.
94
+ #transformer_dtype = 'float8'
95
+ timestep_sample_method = 'logit_normal'
96
+
97
+ # For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
98
+ [adapter]
99
+ type = 'lora'
100
+ rank = 32
101
+ # Dtype for the LoRA weights you are training.
102
+ dtype = 'bfloat16'
103
+ # You can initialize the lora weights from a previously trained lora.
104
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
105
+
106
+ [optimizer]
107
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
108
+ # Look at train.py for other options. You could also easily edit the file and add your own.
109
+ type = 'adamw_optimi'
110
+ lr = 2e-5
111
+ betas = [0.9, 0.99]
112
+ weight_decay = 0.01
113
+ eps = 1e-8
114
+
115
+ # Can use this optimizer for a bit less memory usage.
116
+ # [optimizer]
117
+ # type = 'AdamW8bitKahan'
118
+ # lr = 2e-5
119
+ # betas = [0.9, 0.99]
120
+ # weight_decay = 0.01
121
+ # stabilize = false
122
+
123
+ # Any optimizer not explicitly supported will be dynamically loaded from the pytorch-optimizer library.
124
+ # [optimizer]
125
+ # type = 'Prodigy'
126
+ # lr = 1
127
+ # betas = [0.9, 0.99]
128
+ # weight_decay = 0.01
129
+
130
+ [monitoring]
131
+ # Set to true and fill in these fields to enable wandb
132
+ enable_wandb = false
133
+ wandb_api_key = ''
134
+ wandb_tracker_name = ''
135
+ wandb_run_name = ''