csyxwei commited on
Commit
a8c1b6c
1 Parent(s): a578cda

upload data

Browse files
Files changed (47) hide show
  1. .gitignore +20 -0
  2. README.md +69 -2
  3. data_scripts/face_alignment.py +79 -0
  4. data_scripts/face_parsing.py +333 -0
  5. data_scripts/process_images.py +72 -0
  6. data_scripts/resnet.py +109 -0
  7. data_scripts/weights/79999_iter.pth +3 -0
  8. delta_edit/README.md +110 -0
  9. delta_edit/__init__.py +0 -0
  10. delta_edit/clip/__init__.py +1 -0
  11. delta_edit/clip/bpe_simple_vocab_16e6.txt.gz +3 -0
  12. delta_edit/clip/clip.py +221 -0
  13. delta_edit/clip/model.py +432 -0
  14. delta_edit/clip/simple_tokenizer.py +132 -0
  15. delta_edit/datasets/__init__.py +0 -0
  16. delta_edit/datasets/test_dataset.py +40 -0
  17. delta_edit/datasets/train_dataset.py +62 -0
  18. delta_edit/delta_mapper.py +73 -0
  19. delta_edit/editing_attributes.txt +29 -0
  20. delta_edit/generate_codes.py +138 -0
  21. delta_edit/models/__init__.py +0 -0
  22. delta_edit/models/encoders/__init__.py +0 -0
  23. delta_edit/models/encoders/helpers.py +140 -0
  24. delta_edit/models/encoders/model_irse.py +84 -0
  25. delta_edit/models/encoders/psp_encoders.py +235 -0
  26. delta_edit/models/stylegan2/__init__.py +0 -0
  27. delta_edit/models/stylegan2/model.py +673 -0
  28. delta_edit/models/stylegan2/npy_ffhq/fs3.npy +3 -0
  29. delta_edit/models/stylegan2/op/__init__.py +2 -0
  30. delta_edit/models/stylegan2/op/fused_act.py +38 -0
  31. delta_edit/models/stylegan2/op/upfirdn2d.py +52 -0
  32. delta_edit/options/__init__.py +0 -0
  33. delta_edit/options/test_options.py +28 -0
  34. delta_edit/options/train_options.py +27 -0
  35. delta_edit/scripts/__init__.py +0 -0
  36. delta_edit/scripts/inference.py +124 -0
  37. delta_edit/scripts/inference_laion.py +230 -0
  38. delta_edit/scripts/inference_real.py +210 -0
  39. delta_edit/scripts/train.py +63 -0
  40. delta_edit/utils/__init__.py +0 -0
  41. delta_edit/utils/map_tool.py +120 -0
  42. delta_edit/utils/stylespace_util.py +160 -0
  43. delta_edit/weights/e4e_ffhq_encode.pt +3 -0
  44. delta_edit/weights/net_face.pth +3 -0
  45. delta_edit/weights/stylegan2-ffhq-config-f.pt +3 -0
  46. filtered_laion_faces.npy +3 -0
  47. filtered_laion_faces.parquet +3 -0
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _debug*
2
+ .env
3
+ __pycache__
4
+ _sc.py
5
+ *.ckpt
6
+ *.bin
7
+
8
+ .idea
9
+ .idea/workspace.xml
10
+ .DS_Store
11
+ */__pycache__git
12
+ .pyc
13
+ .iml
14
+ __pycache__/
15
+ */__pycache__/
16
+ */*/__pycache__/
17
+ */*/*/__pycache__/
18
+ */*/*/*/__pycache__/
19
+ */*/*/*/*/__pycache__/
20
+ */*/*/*/*/*/__pycache__/
README.md CHANGED
@@ -1,3 +1,70 @@
 
 
1
  ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Filtered Laion Face
2
+
3
  ---
4
+
5
+ This repository provides the pipeline to construt the face augmneted dataset used in [MasterWeaver](https://arxiv.org/abs/2405.05806). The dataset contains ~160k text-image pairs from the [LAION-Face dataset](https://github.com/FacePerceiver/LAION-Face/). We have generated the corresponding captions using BLIP2 and created several attribute-augmented faces.
6
+
7
+ ## Steps to Construct the Dataset
8
+
9
+ ### 1. Clone the Repository
10
+
11
+ ```bash
12
+ git clone https://huggingface.co/datasets/csyxwei/Filtered-Laion-Face
13
+ cd Filtered-Laion-Face
14
+ ```
15
+
16
+ ### 2. Download images
17
+
18
+ We have provided links of filerted laion face images in `filtered_laion_faces.parquet`. You can download the original image using [img2dataset tool](https://github.com/rom1504/img2dataset/blob/main/dataset_examples/laion-face.md):
19
+
20
+ ```bash
21
+ pip install img2dataset
22
+
23
+ img2dataset --url_list ./filtered_laion_faces.parquet --input_format "parquet" \
24
+ --url_col "URL" --caption_col "TEXT" --output_format files \
25
+ --output_folder ./filtered_laion_faces/images --processes_count 16 --thread_count 128 --resize_mode no \
26
+ --save_additional_columns '["NSFW","similarity","LICENSE","SAMPLE_ID"]'
27
+ ```
28
+
29
+ The downloaded images will be saved in the `./filtered_laion_faces/images` directory.
30
+
31
+ ### 3. Process Laion Face Images
32
+
33
+ Next, use dlib and a face parsing model to crop and align the downloaded images:
34
+
35
+ ```bash
36
+ cd data_scripts
37
+
38
+ CUDA_VISIBLE_DEVICES=4 python process_images.py
39
+ ```
40
+
41
+ ### 4. Augment the Face Images
42
+
43
+ After processing, construct the augmented faces using [DeltaEdit](https://github.com/Yueming6568/DeltaEdit). Refer to its [official repository](https://github.com/Yueming6568/DeltaEdit) for configuration details.
44
+
45
+ Then, run the following command::
46
+
47
+ ```bash
48
+ cd ../delta_edit
49
+
50
+ CUDA_VISIBLE_DEVICES=7 python scripts/inference_laion.py \
51
+ --image_dir "../filtered_laion_faces/images_cropped_face" \
52
+ --save_dir "../filtered_laion_faces/images_cropped_face_aug/" \
53
+ --target ""
54
+ ```
55
+
56
+ The final directory structure will be as follows:
57
+
58
+ ```
59
+ filtered_laion_faces
60
+ └─ images
61
+ └─ images_cropped
62
+ └─ images_cropped_face
63
+ └─ images_cropped_face_mask
64
+ └─ images_cropped_face_aug
65
+ └─ captions
66
+ ```
67
+
68
+ ## Acknowledgements
69
+
70
+ This dataset is built upon the [Laion Face dataset](https://github.com/FacePerceiver/LAION-Face/) with tools from [FFHQ](https://github.com/NVlabs/ffhq-dataset), [face-parsing.PyTorch](https://github.com/zllrunning/face-parsing.PyTorch), and [DeltaEdit](https://github.com/Yueming6568/DeltaEdit/). We thank the authors for sharing the datasets and code.
data_scripts/face_alignment.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy.ndimage
3
+ import PIL.Image
4
+
5
+ def image_align(image, face_landmarks, output_size=1024, transform_size=4096, enable_padding=True):
6
+ # Align function from FFHQ dataset pre-processing step
7
+ # https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
8
+
9
+ lm = np.array(face_landmarks)
10
+ lm_chin = lm[0 : 17] # left-right
11
+ lm_eyebrow_left = lm[17 : 22] # left-right
12
+ lm_eyebrow_right = lm[22 : 27] # left-right
13
+ lm_nose = lm[27 : 31] # top-down
14
+ lm_nostrils = lm[31 : 36] # top-down
15
+ lm_eye_left = lm[36 : 42] # left-clockwise
16
+ lm_eye_right = lm[42 : 48] # left-clockwise
17
+ lm_mouth_outer = lm[48 : 60] # left-clockwise
18
+ lm_mouth_inner = lm[60 : 68] # left-clockwise
19
+
20
+ # Calculate auxiliary vectors.
21
+ eye_left = np.mean(lm_eye_left, axis=0)
22
+ eye_right = np.mean(lm_eye_right, axis=0)
23
+ eye_avg = (eye_left + eye_right) * 0.5
24
+ eye_to_eye = eye_right - eye_left
25
+ mouth_left = lm_mouth_outer[0]
26
+ mouth_right = lm_mouth_outer[6]
27
+ mouth_avg = (mouth_left + mouth_right) * 0.5
28
+ eye_to_mouth = mouth_avg - eye_avg
29
+
30
+ # Choose oriented crop rectangle.
31
+ x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
32
+ x /= np.hypot(*x)
33
+ x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
34
+ y = np.flipud(x) * [-1, 1]
35
+ c = eye_avg + eye_to_mouth * 0.1
36
+ quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
37
+ qsize = np.hypot(*x) * 2
38
+
39
+ img = PIL.Image.fromarray(image)
40
+
41
+ # Shrink.
42
+ shrink = int(np.floor(qsize / output_size * 0.5))
43
+ if shrink > 1:
44
+ rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
45
+ img = img.resize(rsize, PIL.Image.ANTIALIAS)
46
+ quad /= shrink
47
+ qsize /= shrink
48
+
49
+ # Crop.
50
+ border = max(int(np.rint(qsize * 0.1)), 3)
51
+ crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
52
+ crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
53
+ if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
54
+ img = img.crop(crop)
55
+ quad -= crop[0:2]
56
+
57
+ # Pad.
58
+ pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
59
+ pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
60
+ if enable_padding and max(pad) > border - 4:
61
+ pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
62
+ img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
63
+ h, w, _ = img.shape
64
+ y, x, _ = np.ogrid[:h, :w, :1]
65
+ mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
66
+ blur = qsize * 0.02
67
+ img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
68
+ img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
69
+ img = np.uint8(np.clip(np.rint(img), 0, 255))
70
+ img = PIL.Image.fromarray(img, 'RGB')
71
+ quad += pad[:2]
72
+
73
+ # Transform.
74
+ img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
75
+ if output_size < transform_size:
76
+ img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
77
+
78
+ img_np = np.array(img)
79
+ return img_np
data_scripts/face_parsing.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ # -*- encoding: utf-8 -*-
3
+
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ import os
10
+ import os.path as osp
11
+ import numpy as np
12
+ from PIL import Image
13
+ import torchvision.transforms as transforms
14
+ import cv2
15
+
16
+ from resnet import Resnet18
17
+ # from modules.bn import InPlaceABNSync as BatchNorm2d
18
+
19
+
20
+ class ConvBNReLU(nn.Module):
21
+ def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):
22
+ super(ConvBNReLU, self).__init__()
23
+ self.conv = nn.Conv2d(in_chan,
24
+ out_chan,
25
+ kernel_size = ks,
26
+ stride = stride,
27
+ padding = padding,
28
+ bias = False)
29
+ self.bn = nn.BatchNorm2d(out_chan)
30
+ self.init_weight()
31
+
32
+ def forward(self, x):
33
+ x = self.conv(x)
34
+ x = F.relu(self.bn(x))
35
+ return x
36
+
37
+ def init_weight(self):
38
+ for ly in self.children():
39
+ if isinstance(ly, nn.Conv2d):
40
+ nn.init.kaiming_normal_(ly.weight, a=1)
41
+ if not ly.bias is None: nn.init.constant_(ly.bias, 0)
42
+
43
+ class BiSeNetOutput(nn.Module):
44
+ def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):
45
+ super(BiSeNetOutput, self).__init__()
46
+ self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
47
+ self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False)
48
+ self.init_weight()
49
+
50
+ def forward(self, x):
51
+ x = self.conv(x)
52
+ x = self.conv_out(x)
53
+ return x
54
+
55
+ def init_weight(self):
56
+ for ly in self.children():
57
+ if isinstance(ly, nn.Conv2d):
58
+ nn.init.kaiming_normal_(ly.weight, a=1)
59
+ if not ly.bias is None: nn.init.constant_(ly.bias, 0)
60
+
61
+ def get_params(self):
62
+ wd_params, nowd_params = [], []
63
+ for name, module in self.named_modules():
64
+ if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
65
+ wd_params.append(module.weight)
66
+ if not module.bias is None:
67
+ nowd_params.append(module.bias)
68
+ elif isinstance(module, nn.BatchNorm2d):
69
+ nowd_params += list(module.parameters())
70
+ return wd_params, nowd_params
71
+
72
+
73
+ class AttentionRefinementModule(nn.Module):
74
+ def __init__(self, in_chan, out_chan, *args, **kwargs):
75
+ super(AttentionRefinementModule, self).__init__()
76
+ self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
77
+ self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False)
78
+ self.bn_atten = nn.BatchNorm2d(out_chan)
79
+ self.sigmoid_atten = nn.Sigmoid()
80
+ self.init_weight()
81
+
82
+ def forward(self, x):
83
+ feat = self.conv(x)
84
+ atten = F.avg_pool2d(feat, feat.size()[2:])
85
+ atten = self.conv_atten(atten)
86
+ atten = self.bn_atten(atten)
87
+ atten = self.sigmoid_atten(atten)
88
+ out = torch.mul(feat, atten)
89
+ return out
90
+
91
+ def init_weight(self):
92
+ for ly in self.children():
93
+ if isinstance(ly, nn.Conv2d):
94
+ nn.init.kaiming_normal_(ly.weight, a=1)
95
+ if not ly.bias is None: nn.init.constant_(ly.bias, 0)
96
+
97
+
98
+ class ContextPath(nn.Module):
99
+ def __init__(self, *args, **kwargs):
100
+ super(ContextPath, self).__init__()
101
+ self.resnet = Resnet18()
102
+ self.arm16 = AttentionRefinementModule(256, 128)
103
+ self.arm32 = AttentionRefinementModule(512, 128)
104
+ self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
105
+ self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
106
+ self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
107
+
108
+ self.init_weight()
109
+
110
+ def forward(self, x):
111
+ H0, W0 = x.size()[2:]
112
+ feat8, feat16, feat32 = self.resnet(x)
113
+ H8, W8 = feat8.size()[2:]
114
+ H16, W16 = feat16.size()[2:]
115
+ H32, W32 = feat32.size()[2:]
116
+
117
+ avg = F.avg_pool2d(feat32, feat32.size()[2:])
118
+ avg = self.conv_avg(avg)
119
+ avg_up = F.interpolate(avg, (H32, W32), mode='nearest')
120
+
121
+ feat32_arm = self.arm32(feat32)
122
+ feat32_sum = feat32_arm + avg_up
123
+ feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest')
124
+ feat32_up = self.conv_head32(feat32_up)
125
+
126
+ feat16_arm = self.arm16(feat16)
127
+ feat16_sum = feat16_arm + feat32_up
128
+ feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest')
129
+ feat16_up = self.conv_head16(feat16_up)
130
+
131
+ return feat8, feat16_up, feat32_up # x8, x8, x16
132
+
133
+ def init_weight(self):
134
+ for ly in self.children():
135
+ if isinstance(ly, nn.Conv2d):
136
+ nn.init.kaiming_normal_(ly.weight, a=1)
137
+ if not ly.bias is None: nn.init.constant_(ly.bias, 0)
138
+
139
+ def get_params(self):
140
+ wd_params, nowd_params = [], []
141
+ for name, module in self.named_modules():
142
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
143
+ wd_params.append(module.weight)
144
+ if not module.bias is None:
145
+ nowd_params.append(module.bias)
146
+ elif isinstance(module, nn.BatchNorm2d):
147
+ nowd_params += list(module.parameters())
148
+ return wd_params, nowd_params
149
+
150
+
151
+ ### This is not used, since I replace this with the resnet feature with the same size
152
+ class SpatialPath(nn.Module):
153
+ def __init__(self, *args, **kwargs):
154
+ super(SpatialPath, self).__init__()
155
+ self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3)
156
+ self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
157
+ self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
158
+ self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0)
159
+ self.init_weight()
160
+
161
+ def forward(self, x):
162
+ feat = self.conv1(x)
163
+ feat = self.conv2(feat)
164
+ feat = self.conv3(feat)
165
+ feat = self.conv_out(feat)
166
+ return feat
167
+
168
+ def init_weight(self):
169
+ for ly in self.children():
170
+ if isinstance(ly, nn.Conv2d):
171
+ nn.init.kaiming_normal_(ly.weight, a=1)
172
+ if not ly.bias is None: nn.init.constant_(ly.bias, 0)
173
+
174
+ def get_params(self):
175
+ wd_params, nowd_params = [], []
176
+ for name, module in self.named_modules():
177
+ if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
178
+ wd_params.append(module.weight)
179
+ if not module.bias is None:
180
+ nowd_params.append(module.bias)
181
+ elif isinstance(module, nn.BatchNorm2d):
182
+ nowd_params += list(module.parameters())
183
+ return wd_params, nowd_params
184
+
185
+
186
+ class FeatureFusionModule(nn.Module):
187
+ def __init__(self, in_chan, out_chan, *args, **kwargs):
188
+ super(FeatureFusionModule, self).__init__()
189
+ self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
190
+ self.conv1 = nn.Conv2d(out_chan,
191
+ out_chan//4,
192
+ kernel_size = 1,
193
+ stride = 1,
194
+ padding = 0,
195
+ bias = False)
196
+ self.conv2 = nn.Conv2d(out_chan//4,
197
+ out_chan,
198
+ kernel_size = 1,
199
+ stride = 1,
200
+ padding = 0,
201
+ bias = False)
202
+ self.relu = nn.ReLU(inplace=True)
203
+ self.sigmoid = nn.Sigmoid()
204
+ self.init_weight()
205
+
206
+ def forward(self, fsp, fcp):
207
+ fcat = torch.cat([fsp, fcp], dim=1)
208
+ feat = self.convblk(fcat)
209
+ atten = F.avg_pool2d(feat, feat.size()[2:])
210
+ atten = self.conv1(atten)
211
+ atten = self.relu(atten)
212
+ atten = self.conv2(atten)
213
+ atten = self.sigmoid(atten)
214
+ feat_atten = torch.mul(feat, atten)
215
+ feat_out = feat_atten + feat
216
+ return feat_out
217
+
218
+ def init_weight(self):
219
+ for ly in self.children():
220
+ if isinstance(ly, nn.Conv2d):
221
+ nn.init.kaiming_normal_(ly.weight, a=1)
222
+ if not ly.bias is None: nn.init.constant_(ly.bias, 0)
223
+
224
+ def get_params(self):
225
+ wd_params, nowd_params = [], []
226
+ for name, module in self.named_modules():
227
+ if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
228
+ wd_params.append(module.weight)
229
+ if not module.bias is None:
230
+ nowd_params.append(module.bias)
231
+ elif isinstance(module, nn.BatchNorm2d):
232
+ nowd_params += list(module.parameters())
233
+ return wd_params, nowd_params
234
+
235
+
236
+ class BiSeNet(nn.Module):
237
+ def __init__(self, n_classes, *args, **kwargs):
238
+ super(BiSeNet, self).__init__()
239
+ self.cp = ContextPath()
240
+ ## here self.sp is deleted
241
+ self.ffm = FeatureFusionModule(256, 256)
242
+ self.conv_out = BiSeNetOutput(256, 256, n_classes)
243
+ self.conv_out16 = BiSeNetOutput(128, 64, n_classes)
244
+ self.conv_out32 = BiSeNetOutput(128, 64, n_classes)
245
+ self.init_weight()
246
+
247
+ def forward(self, x):
248
+ H, W = x.size()[2:]
249
+ feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature
250
+ feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature
251
+ feat_fuse = self.ffm(feat_sp, feat_cp8)
252
+
253
+ feat_out = self.conv_out(feat_fuse)
254
+ feat_out16 = self.conv_out16(feat_cp8)
255
+ feat_out32 = self.conv_out32(feat_cp16)
256
+
257
+ feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True)
258
+ feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True)
259
+ feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True)
260
+ return feat_out, feat_out16, feat_out32
261
+
262
+ def init_weight(self):
263
+ for ly in self.children():
264
+ if isinstance(ly, nn.Conv2d):
265
+ nn.init.kaiming_normal_(ly.weight, a=1)
266
+ if not ly.bias is None: nn.init.constant_(ly.bias, 0)
267
+
268
+ def get_params(self):
269
+ wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], []
270
+ for name, child in self.named_children():
271
+ child_wd_params, child_nowd_params = child.get_params()
272
+ if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput):
273
+ lr_mul_wd_params += child_wd_params
274
+ lr_mul_nowd_params += child_nowd_params
275
+ else:
276
+ wd_params += child_wd_params
277
+ nowd_params += child_nowd_params
278
+ return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params
279
+
280
+
281
+ def evaluate(image, net, mode='mask'):
282
+
283
+ to_tensor = transforms.Compose([
284
+ transforms.ToTensor(),
285
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
286
+ ])
287
+ with torch.no_grad():
288
+ image = image.resize((512, 512), Image.BILINEAR)
289
+ img = to_tensor(image)
290
+ img = torch.unsqueeze(img, 0)
291
+ img = img.cuda()
292
+ out = net(img)[0]
293
+ parsing = out.squeeze(0).cpu().numpy().argmax(0)
294
+ if mode == 'face':
295
+ parsing = np.where(parsing == 14, 0, parsing)
296
+ parsing = np.where(parsing == 15, 0, parsing)
297
+ parsing = np.where(parsing == 16, 0, parsing)
298
+ # parsing = np.where(parsing == 17, 0, parsing)
299
+ parsing = np.where(parsing == 18, 0, parsing)
300
+ mask = np.where(parsing > 0, 1, 0)
301
+ mask = mask[:, :, None] * 1.0
302
+ image_masked = np.array(image) * mask
303
+ # image_masked = np.array(image)
304
+ return Image.fromarray(image_masked.astype('uint8'))
305
+ else:
306
+ return parsing
307
+
308
+
309
+ # def evaluate(image, net):
310
+ #
311
+ # to_tensor = transforms.Compose([
312
+ # transforms.ToTensor(),
313
+ # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
314
+ # ])
315
+ # with torch.no_grad():
316
+ # image = image.resize((512, 512), Image.BILINEAR)
317
+ # img = to_tensor(image)
318
+ # img = torch.unsqueeze(img, 0)
319
+ # img = img.cuda()
320
+ # out = net(img)[0]
321
+ # parsing = out.squeeze(0).cpu().numpy().argmax(0)
322
+ #
323
+ # return parsing
324
+
325
+ if __name__ == "__main__":
326
+ n_classes = 19
327
+ net = BiSeNet(n_classes=n_classes)
328
+ net.cuda()
329
+ net.load_state_dict(torch.load('./weights/79999_iter.pth'))
330
+ net.eval()
331
+ image = Image.open('/home/weiyuxiang/02_man.jpg')
332
+ face_parsing = evaluate(image, net)
333
+ cv2.imwrite('/home/weiyuxiang/02_man_mask.png', face_parsing)
data_scripts/process_images.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import glob
4
+ import os
5
+ from tqdm import tqdm
6
+ from face_alignment import image_align
7
+ from face_parsing import BiSeNet, evaluate
8
+ import torch
9
+ from PIL import Image
10
+
11
+ #### image source and save path
12
+ face_params = np.load('../filtered_laion_faces.npy', allow_pickle=True)
13
+ laion_images_download_dir = '../filtered_laion_faces/images'
14
+ cropped_image_save_dir = '../filtered_laion_faces/images_cropped'
15
+ face_image_save_dir = '../filtered_laion_faces/images_cropped_face'
16
+ face_mask_save_dir = '../filtered_laion_faces/images_cropped_face_mask'
17
+ caption_save_dir = '../filtered_laion_faces/captions'
18
+
19
+ os.makedirs(cropped_image_save_dir, exist_ok=True)
20
+ os.makedirs(caption_save_dir, exist_ok=True)
21
+
22
+ ### face parsing net
23
+ n_classes = 19
24
+ net = BiSeNet(n_classes=n_classes)
25
+ net.cuda()
26
+ net.load_state_dict(torch.load('./weights/79999_iter.pth'))
27
+ net.eval()
28
+ ###
29
+
30
+ image_files = glob.glob(f'{laion_images_download_dir}/*.jpg')
31
+ image_files += glob.glob(f'{laion_images_download_dir}/*/*.jpg')
32
+
33
+ image_files.sort()
34
+
35
+ for image_path in tqdm(image_files):
36
+ image_name = os.path.basename(image_path)[:-4]
37
+ image_param = face_params[int(image_name)]
38
+ # {'delta_h': delta_h, 'delta_w': delta_w, 'landmarks': original_lm, 'blip2_caption': caption}
39
+ face_lm = image_param['landmarks']
40
+ blip2_caption = image_param['blip2_caption']
41
+
42
+ ## crop image as square
43
+ delta_h, delta_w = image_param['delta_h'], image_param['delta_w']
44
+
45
+ ## align face image
46
+ try:
47
+ image_np = cv2.imread(image_path)
48
+ h, w, _ = image_np.shape
49
+ if delta_h > 0:
50
+ face_lm[:, 1] = face_lm[:, 1] - delta_h
51
+ image_np = image_np[delta_h:delta_h + w, :, :]
52
+ if delta_w > 0:
53
+ face_lm[:, 0] = face_lm[:, 0] - delta_w
54
+ image_np = image_np[:, delta_w:delta_w + h, :]
55
+ aligned_image = image_align(image_np, face_lm, transform_size=512)
56
+ face_parsing = evaluate(Image.fromarray(aligned_image[:, :, ::-1]), net)
57
+ except Exception as e:
58
+ print(image_path, e)
59
+ continue
60
+
61
+ cv2.imwrite(os.path.join(cropped_image_save_dir, f'{image_name}.jpg'), image_np)
62
+ tmp_face_image_save_dir = os.path.join(face_image_save_dir, image_name)
63
+ tmp_face_mask_save_dir = os.path.join(face_mask_save_dir, image_name)
64
+ os.makedirs(tmp_face_image_save_dir)
65
+ os.makedirs(tmp_face_mask_save_dir)
66
+ cv2.imwrite(os.path.join(tmp_face_image_save_dir, f'{image_name}.jpg'), aligned_image)
67
+ cv2.imwrite(os.path.join(tmp_face_mask_save_dir, f'{image_name}.png'), face_parsing)
68
+
69
+ with open(os.path.join(caption_save_dir, f'{image_name}.txt'), 'w') as f:
70
+ f.write(blip2_caption)
71
+
72
+ np.save(os.path.join(tmp_face_image_save_dir, f'{image_name}.npy'), face_lm)
data_scripts/resnet.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ # -*- encoding: utf-8 -*-
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import torch.utils.model_zoo as modelzoo
8
+
9
+ # from modules.bn import InPlaceABNSync as BatchNorm2d
10
+
11
+ resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
12
+
13
+
14
+ def conv3x3(in_planes, out_planes, stride=1):
15
+ """3x3 convolution with padding"""
16
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
17
+ padding=1, bias=False)
18
+
19
+
20
+ class BasicBlock(nn.Module):
21
+ def __init__(self, in_chan, out_chan, stride=1):
22
+ super(BasicBlock, self).__init__()
23
+ self.conv1 = conv3x3(in_chan, out_chan, stride)
24
+ self.bn1 = nn.BatchNorm2d(out_chan)
25
+ self.conv2 = conv3x3(out_chan, out_chan)
26
+ self.bn2 = nn.BatchNorm2d(out_chan)
27
+ self.relu = nn.ReLU(inplace=True)
28
+ self.downsample = None
29
+ if in_chan != out_chan or stride != 1:
30
+ self.downsample = nn.Sequential(
31
+ nn.Conv2d(in_chan, out_chan,
32
+ kernel_size=1, stride=stride, bias=False),
33
+ nn.BatchNorm2d(out_chan),
34
+ )
35
+
36
+ def forward(self, x):
37
+ residual = self.conv1(x)
38
+ residual = F.relu(self.bn1(residual))
39
+ residual = self.conv2(residual)
40
+ residual = self.bn2(residual)
41
+
42
+ shortcut = x
43
+ if self.downsample is not None:
44
+ shortcut = self.downsample(x)
45
+
46
+ out = shortcut + residual
47
+ out = self.relu(out)
48
+ return out
49
+
50
+
51
+ def create_layer_basic(in_chan, out_chan, bnum, stride=1):
52
+ layers = [BasicBlock(in_chan, out_chan, stride=stride)]
53
+ for i in range(bnum-1):
54
+ layers.append(BasicBlock(out_chan, out_chan, stride=1))
55
+ return nn.Sequential(*layers)
56
+
57
+
58
+ class Resnet18(nn.Module):
59
+ def __init__(self):
60
+ super(Resnet18, self).__init__()
61
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
62
+ bias=False)
63
+ self.bn1 = nn.BatchNorm2d(64)
64
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
65
+ self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
66
+ self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
67
+ self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
68
+ self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
69
+ self.init_weight()
70
+
71
+ def forward(self, x):
72
+ x = self.conv1(x)
73
+ x = F.relu(self.bn1(x))
74
+ x = self.maxpool(x)
75
+
76
+ x = self.layer1(x)
77
+ feat8 = self.layer2(x) # 1/8
78
+ feat16 = self.layer3(feat8) # 1/16
79
+ feat32 = self.layer4(feat16) # 1/32
80
+ return feat8, feat16, feat32
81
+
82
+ def init_weight(self):
83
+ state_dict = modelzoo.load_url(resnet18_url)
84
+ self_state_dict = self.state_dict()
85
+ for k, v in state_dict.items():
86
+ if 'fc' in k: continue
87
+ self_state_dict.update({k: v})
88
+ self.load_state_dict(self_state_dict)
89
+
90
+ def get_params(self):
91
+ wd_params, nowd_params = [], []
92
+ for name, module in self.named_modules():
93
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
94
+ wd_params.append(module.weight)
95
+ if not module.bias is None:
96
+ nowd_params.append(module.bias)
97
+ elif isinstance(module, nn.BatchNorm2d):
98
+ nowd_params += list(module.parameters())
99
+ return wd_params, nowd_params
100
+
101
+
102
+ if __name__ == "__main__":
103
+ net = Resnet18()
104
+ x = torch.randn(16, 3, 224, 224)
105
+ out = net(x)
106
+ print(out[0].size())
107
+ print(out[1].size())
108
+ print(out[2].size())
109
+ net.get_params()
data_scripts/weights/79999_iter.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:468e13ca13a9b43cc0881a9f99083a430e9c0a38abd935431d1c28ee94b26567
3
+ size 53289463
delta_edit/README.md ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DeltaEdit: Exploring Text-free Training for Text-driven Image Manipulation
2
+
3
+ ## Overview
4
+
5
+ This repository contains the **offical** PyTorch implementation of paper:
6
+
7
+ *DeltaEdit: Exploring Text-free Training for Text-driven Image Manipulation*, CVPR 2023
8
+
9
+ ## News
10
+
11
+ - [2023-03-11] Upload the training and inference code for the facial domain (◍•ڡ•◍).
12
+
13
+ *To be continued...*
14
+
15
+ We will release the training and inference code for the LSUN cat, church, horse later : )
16
+
17
+ ## Dependences
18
+
19
+ - Install CLIP:
20
+
21
+ ```shell script
22
+ conda install --yes -c pytorch pytorch=1.7.1 torchvision cudatoolkit=<CUDA_VERSION>
23
+ pip install ftfy regex tqdm gdown
24
+ pip install git+https://github.com/openai/CLIP.git
25
+ ```
26
+
27
+ - Download pre-trained models :
28
+
29
+ - The code relies on the [Rosinality](https://github.com/rosinality/stylegan2-pytorch/) pytorch implementation of StyleGAN2.
30
+ - Download the pre-trained StyleGAN2 generator model for the faical domain from [here](https://drive.google.com/file/d/1EM87UquaoQmk17Q8d5kYIAHqu0dkYqdT/view?usp=sharing), and then place it into the folder `./models/pretrained_models`.
31
+ - Download the pre-trained StyleGAN2 generator model for the LSUN cat, church, horse domains from [here](https://drive.google.com/drive/folders/1YRhXGM-2xk7A4TExM_jXaNg1f2AiCRlw?usp=share_link) and then place them into the folder `./models/pretrained_models/stylegan2-{cat/church/horse}`.
32
+
33
+ ## Training
34
+
35
+ ### Data preparing
36
+
37
+ - DeltaEdit is trained on latent vectors.
38
+
39
+ - For the facial domain, 58,000 real images from [FFHQ](https://github.com/NVlabs/ffhq-dataset) dataset are randomly selected and 200,000 fake images from the z space in StyleGAN are sampled for training. Note that all real images are inverted by [e4e](https://github.com/omertov/encoder4editing) encoder.
40
+
41
+ - Download the provided FFHQ latent vectors from [here](https://drive.google.com/drive/folders/13NLq4giSgdcMVkYQIiPj4Xhxz4-wlXSD?usp=sharing) and then place all numpy files into the folder `./latent_code/ffhq`.
42
+
43
+ - Generate the 200,000 sampled latent vectors by running the following commands for each specific domain:
44
+
45
+ ```python
46
+ CUDA_VISIBLE_DEVICES=0 python generate_codes.py --classname ffhq --samples 200000
47
+ CUDA_VISIBLE_DEVICES=0 python generate_codes.py --classname cat --samples 200000
48
+ CUDA_VISIBLE_DEVICES=0 python generate_codes.py --classname church --samples 200000
49
+ CUDA_VISIBLE_DEVICES=0 python generate_codes.py --classname horse --samples 200000
50
+ ```
51
+
52
+ ### Usage
53
+
54
+ - The main training script is placed in `./scripts/train.py`.
55
+ - Training arguments can be found at `./options/train_options.py`.
56
+
57
+ For training please run the following commands:
58
+
59
+ ```python
60
+ CUDA_VISIBLE_DEVICES=0 python scripts/train.py
61
+ ```
62
+
63
+ ## Inference
64
+
65
+ - The main inferece script is placed in `./scripts/inference.py`.
66
+ - Inference arguments can be found at `./options/test_options.py`.
67
+ - Download the pretrained DeltaMapper model for editing human face from [here](https://drive.google.com/file/d/1Mb2WiELoVDPDIi24tIfoWsjn1l2xTjtZ/view?usp=sharing), and then place it into the folder `./checkpoints` .
68
+ - Some inference data are provided in `./examples`.
69
+
70
+ To produce editing results please run the following commands :
71
+
72
+ ```python
73
+ CUDA_VISIBLE_DEVICES=1 python scripts/inference.py --target "chubby face","face with eyeglasses","face with smile","face with pale skin","face with tanned skin","face with big eyes","face with black clothes","face with blue suit","happy face","face with bangs","face with red hair","face with black hair","face with blond hair","face with curly hair","face with receding hairline","face with bowlcut hairstyle"
74
+ ```
75
+
76
+ The produced results are showed in the following.
77
+
78
+ You can also specify your desired target attributes to the flag of `--target`.
79
+
80
+ ## Inference for real images
81
+
82
+ - The main inferece script is placed in `./scripts/inference_real.py`.
83
+ - Inference arguments can be found at `./options/test_options.py`.
84
+ - Download the pretrained DeltaMapper model for editing human face from [here](https://drive.google.com/file/d/1Mb2WiELoVDPDIi24tIfoWsjn1l2xTjtZ/view?usp=sharing), and then place it into the folder `./checkpoints` .
85
+ - Download the pretrained e4e encoder e4e_ffhq_encode.pt from [e4e](https://github.com/omertov/encoder4editing).
86
+ - One test image is provided in `./test_imgs`.
87
+
88
+ To produce editing results please run the following commands :
89
+
90
+ ```python
91
+ CUDA_VISIBLE_DEVICES=1 python scripts/inference_real.py --target "chubby face","face with eyeglasses","face with smile","face with pale skin","face with tanned skin","face with big eyes","face with black clothes","face with blue suit","happy face","face with bangs","face with red hair","face with black hair","face with blond hair","face with curly hair","face with receding hairline","face with bowlcut hairstyle"
92
+ ```
93
+ ## Results
94
+
95
+ ![results](./results.jpg)
96
+
97
+ ## Acknowledgements
98
+
99
+ This code is developed based on the code of [orpatashnik/StyleCLIP](https://github.com/orpatashnik/StyleCLIP) by Or Patashnik et al.
100
+
101
+ ## Citation
102
+ If you use this code for your research, please cite our paper:
103
+ ```
104
+ @InProceedings{lyu2023deltaedit,
105
+ author = {Lyu, Yueming and Lin, Tianwei and Li, Fu and He, Dongliang and Dong, Jing and Tan, Tieniu},
106
+ title = {DeltaEdit: Exploring Text-free Training for Text-Driven Image Manipulation},
107
+ booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
108
+ year = {2023},
109
+ }
110
+ ```
delta_edit/__init__.py ADDED
File without changes
delta_edit/clip/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .clip import *
delta_edit/clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
delta_edit/clip/clip.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import urllib
4
+ import warnings
5
+ from typing import Union, List
6
+
7
+ import torch
8
+ from PIL import Image
9
+ from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
10
+ from tqdm import tqdm
11
+
12
+ from .model import build_model
13
+ from .simple_tokenizer import SimpleTokenizer as _Tokenizer
14
+
15
+ try:
16
+ from torchvision.transforms import InterpolationMode
17
+ BICUBIC = InterpolationMode.BICUBIC
18
+ except ImportError:
19
+ BICUBIC = Image.BICUBIC
20
+
21
+
22
+ if torch.__version__.split(".") < ["1", "7", "1"]:
23
+ warnings.warn("PyTorch version 1.7.1 or higher is recommended")
24
+
25
+
26
+ __all__ = ["available_models", "load", "tokenize"]
27
+ _tokenizer = _Tokenizer()
28
+
29
+ _MODELS = {
30
+ "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
31
+ "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
32
+ "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
33
+ "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
34
+ "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
35
+ "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
36
+ }
37
+
38
+
39
+ def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
40
+ os.makedirs(root, exist_ok=True)
41
+ filename = os.path.basename(url)
42
+
43
+ expected_sha256 = url.split("/")[-2]
44
+ download_target = os.path.join(root, filename)
45
+
46
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
47
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
48
+
49
+ if os.path.isfile(download_target):
50
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
51
+ return download_target
52
+ else:
53
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
54
+
55
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
56
+ with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
57
+ while True:
58
+ buffer = source.read(8192)
59
+ if not buffer:
60
+ break
61
+
62
+ output.write(buffer)
63
+ loop.update(len(buffer))
64
+
65
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
66
+ raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
67
+
68
+ return download_target
69
+
70
+
71
+ def _transform(n_px):
72
+ return Compose([
73
+ Resize(n_px, interpolation=BICUBIC),
74
+ CenterCrop(n_px),
75
+ lambda image: image.convert("RGB"),
76
+ ToTensor(),
77
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
78
+ ])
79
+
80
+
81
+ def available_models() -> List[str]:
82
+ """Returns the names of available CLIP models"""
83
+ return list(_MODELS.keys())
84
+
85
+
86
+ def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=False):
87
+ """Load a CLIP model
88
+
89
+ Parameters
90
+ ----------
91
+ name : str
92
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
93
+
94
+ device : Union[str, torch.device]
95
+ The device to put the loaded model
96
+
97
+ jit : bool
98
+ Whether to load the optimized JIT model or more hackable non-JIT model (default).
99
+
100
+ Returns
101
+ -------
102
+ model : torch.nn.Module
103
+ The CLIP model
104
+
105
+ preprocess : Callable[[PIL.Image], torch.Tensor]
106
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
107
+ """
108
+ if name in _MODELS:
109
+ model_path = _download(_MODELS[name])
110
+ elif os.path.isfile(name):
111
+ model_path = name
112
+ else:
113
+ raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
114
+
115
+ try:
116
+ # loading JIT archive
117
+ model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
118
+ state_dict = None
119
+ except RuntimeError:
120
+ # loading saved state dict
121
+ if jit:
122
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
123
+ jit = False
124
+ state_dict = torch.load(model_path, map_location="cpu")
125
+
126
+ if not jit:
127
+ model = build_model(state_dict or model.state_dict()).to(device)
128
+ if str(device) == "cpu":
129
+ model.float()
130
+ return model, _transform(model.visual.input_resolution)
131
+
132
+ # patch the device names
133
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
134
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
135
+
136
+ def patch_device(module):
137
+ try:
138
+ graphs = [module.graph] if hasattr(module, "graph") else []
139
+ except RuntimeError:
140
+ graphs = []
141
+
142
+ if hasattr(module, "forward1"):
143
+ graphs.append(module.forward1.graph)
144
+
145
+ for graph in graphs:
146
+ for node in graph.findAllNodes("prim::Constant"):
147
+ if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
148
+ node.copyAttributes(device_node)
149
+
150
+ model.apply(patch_device)
151
+ patch_device(model.encode_image)
152
+ patch_device(model.encode_text)
153
+
154
+ # patch dtype to float32 on CPU
155
+ if str(device) == "cpu":
156
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
157
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
158
+ float_node = float_input.node()
159
+
160
+ def patch_float(module):
161
+ try:
162
+ graphs = [module.graph] if hasattr(module, "graph") else []
163
+ except RuntimeError:
164
+ graphs = []
165
+
166
+ if hasattr(module, "forward1"):
167
+ graphs.append(module.forward1.graph)
168
+
169
+ for graph in graphs:
170
+ for node in graph.findAllNodes("aten::to"):
171
+ inputs = list(node.inputs())
172
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
173
+ if inputs[i].node()["value"] == 5:
174
+ inputs[i].node().copyAttributes(float_node)
175
+
176
+ model.apply(patch_float)
177
+ patch_float(model.encode_image)
178
+ patch_float(model.encode_text)
179
+
180
+ model.float()
181
+
182
+ return model, _transform(model.input_resolution.item())
183
+
184
+
185
+ def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:
186
+ """
187
+ Returns the tokenized representation of given input string(s)
188
+
189
+ Parameters
190
+ ----------
191
+ texts : Union[str, List[str]]
192
+ An input string or a list of input strings to tokenize
193
+
194
+ context_length : int
195
+ The context length to use; all CLIP models use 77 as the context length
196
+
197
+ truncate: bool
198
+ Whether to truncate the text in case its encoding is longer than the context length
199
+
200
+ Returns
201
+ -------
202
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
203
+ """
204
+ if isinstance(texts, str):
205
+ texts = [texts]
206
+
207
+ sot_token = _tokenizer.encoder["<|startoftext|>"]
208
+ eot_token = _tokenizer.encoder["<|endoftext|>"]
209
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
210
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
211
+
212
+ for i, tokens in enumerate(all_tokens):
213
+ if len(tokens) > context_length:
214
+ if truncate:
215
+ tokens = tokens[:context_length]
216
+ tokens[-1] = eot_token
217
+ else:
218
+ raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
219
+ result[i, :len(tokens)] = torch.tensor(tokens)
220
+
221
+ return result
delta_edit/clip/model.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+ from typing import Tuple, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+
9
+
10
+ class Bottleneck(nn.Module):
11
+ expansion = 4
12
+
13
+ def __init__(self, inplanes, planes, stride=1):
14
+ super().__init__()
15
+
16
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
17
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
18
+ self.bn1 = nn.BatchNorm2d(planes)
19
+
20
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
21
+ self.bn2 = nn.BatchNorm2d(planes)
22
+
23
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
24
+
25
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
26
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
27
+
28
+ self.relu = nn.ReLU(inplace=True)
29
+ self.downsample = None
30
+ self.stride = stride
31
+
32
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
33
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
34
+ self.downsample = nn.Sequential(OrderedDict([
35
+ ("-1", nn.AvgPool2d(stride)),
36
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
37
+ ("1", nn.BatchNorm2d(planes * self.expansion))
38
+ ]))
39
+
40
+ def forward(self, x: torch.Tensor):
41
+ identity = x
42
+
43
+ out = self.relu(self.bn1(self.conv1(x)))
44
+ out = self.relu(self.bn2(self.conv2(out)))
45
+ out = self.avgpool(out)
46
+ out = self.bn3(self.conv3(out))
47
+
48
+ if self.downsample is not None:
49
+ identity = self.downsample(x)
50
+
51
+ out += identity
52
+ out = self.relu(out)
53
+ return out
54
+
55
+
56
+ class AttentionPool2d(nn.Module):
57
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
58
+ super().__init__()
59
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
60
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
61
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
62
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
63
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
64
+ self.num_heads = num_heads
65
+
66
+ def forward(self, x):
67
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
68
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
69
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
70
+ x, _ = F.multi_head_attention_forward(
71
+ query=x, key=x, value=x,
72
+ embed_dim_to_check=x.shape[-1],
73
+ num_heads=self.num_heads,
74
+ q_proj_weight=self.q_proj.weight,
75
+ k_proj_weight=self.k_proj.weight,
76
+ v_proj_weight=self.v_proj.weight,
77
+ in_proj_weight=None,
78
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
79
+ bias_k=None,
80
+ bias_v=None,
81
+ add_zero_attn=False,
82
+ dropout_p=0,
83
+ out_proj_weight=self.c_proj.weight,
84
+ out_proj_bias=self.c_proj.bias,
85
+ use_separate_proj_weight=True,
86
+ training=self.training,
87
+ need_weights=False
88
+ )
89
+
90
+ return x[0]
91
+
92
+
93
+ class ModifiedResNet(nn.Module):
94
+ """
95
+ A ResNet class that is similar to torchvision's but contains the following changes:
96
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
97
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
98
+ - The final pooling layer is a QKV attention instead of an average pool
99
+ """
100
+
101
+ def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
102
+ super().__init__()
103
+ self.output_dim = output_dim
104
+ self.input_resolution = input_resolution
105
+
106
+ # the 3-layer stem
107
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
108
+ self.bn1 = nn.BatchNorm2d(width // 2)
109
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
110
+ self.bn2 = nn.BatchNorm2d(width // 2)
111
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
112
+ self.bn3 = nn.BatchNorm2d(width)
113
+ self.avgpool = nn.AvgPool2d(2)
114
+ self.relu = nn.ReLU(inplace=True)
115
+
116
+ # residual layers
117
+ self._inplanes = width # this is a *mutable* variable used during construction
118
+ self.layer1 = self._make_layer(width, layers[0])
119
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
120
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
121
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
122
+
123
+ embed_dim = width * 32 # the ResNet feature dimension
124
+ self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
125
+
126
+ def _make_layer(self, planes, blocks, stride=1):
127
+ layers = [Bottleneck(self._inplanes, planes, stride)]
128
+
129
+ self._inplanes = planes * Bottleneck.expansion
130
+ for _ in range(1, blocks):
131
+ layers.append(Bottleneck(self._inplanes, planes))
132
+
133
+ return nn.Sequential(*layers)
134
+
135
+ def forward(self, x):
136
+ def stem(x):
137
+ for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
138
+ x = self.relu(bn(conv(x)))
139
+ x = self.avgpool(x)
140
+ return x
141
+
142
+ x = x.type(self.conv1.weight.dtype)
143
+ x = stem(x)
144
+ x = self.layer1(x)
145
+ x = self.layer2(x)
146
+ x = self.layer3(x)
147
+ x = self.layer4(x)
148
+ x = self.attnpool(x)
149
+
150
+ return x
151
+
152
+
153
+ class LayerNorm(nn.LayerNorm):
154
+ """Subclass torch's LayerNorm to handle fp16."""
155
+
156
+ def forward(self, x: torch.Tensor):
157
+ orig_type = x.dtype
158
+ ret = super().forward(x.type(torch.float32))
159
+ return ret.type(orig_type)
160
+
161
+
162
+ class QuickGELU(nn.Module):
163
+ def forward(self, x: torch.Tensor):
164
+ return x * torch.sigmoid(1.702 * x)
165
+
166
+
167
+ class ResidualAttentionBlock(nn.Module):
168
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
169
+ super().__init__()
170
+
171
+ self.attn = nn.MultiheadAttention(d_model, n_head)
172
+ self.ln_1 = LayerNorm(d_model)
173
+ self.mlp = nn.Sequential(OrderedDict([
174
+ ("c_fc", nn.Linear(d_model, d_model * 4)),
175
+ ("gelu", QuickGELU()),
176
+ ("c_proj", nn.Linear(d_model * 4, d_model))
177
+ ]))
178
+ self.ln_2 = LayerNorm(d_model)
179
+ self.attn_mask = attn_mask
180
+
181
+ def attention(self, x: torch.Tensor):
182
+ self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
183
+ return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
184
+
185
+ def forward(self, x: torch.Tensor):
186
+ x = x + self.attention(self.ln_1(x))
187
+ x = x + self.mlp(self.ln_2(x))
188
+ return x
189
+
190
+
191
+ class Transformer(nn.Module):
192
+ def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
193
+ super().__init__()
194
+ self.width = width
195
+ self.layers = layers
196
+ self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
197
+
198
+ def forward(self, x: torch.Tensor):
199
+ return self.resblocks(x)
200
+
201
+
202
+ class VisionTransformer(nn.Module):
203
+ def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
204
+ super().__init__()
205
+ self.input_resolution = input_resolution
206
+ self.output_dim = output_dim
207
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
208
+
209
+ scale = width ** -0.5
210
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
211
+ self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
212
+ self.ln_pre = LayerNorm(width)
213
+
214
+ self.transformer = Transformer(width, layers, heads)
215
+
216
+ self.ln_post = LayerNorm(width)
217
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
218
+
219
+ def forward(self, x: torch.Tensor):
220
+ x = self.conv1(x) # shape = [*, width, grid, grid]
221
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
222
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
223
+ x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
224
+ x = x + self.positional_embedding.to(x.dtype)
225
+ x = self.ln_pre(x)
226
+
227
+ x = x.permute(1, 0, 2) # NLD -> LND
228
+ x = self.transformer(x)
229
+ x = x.permute(1, 0, 2) # LND -> NLD
230
+
231
+ x = self.ln_post(x[:, 0, :])
232
+
233
+ if self.proj is not None:
234
+ x = x @ self.proj
235
+
236
+ return x
237
+
238
+
239
+ class CLIP(nn.Module):
240
+ def __init__(self,
241
+ embed_dim: int,
242
+ # vision
243
+ image_resolution: int,
244
+ vision_layers: Union[Tuple[int, int, int, int], int],
245
+ vision_width: int,
246
+ vision_patch_size: int,
247
+ # text
248
+ context_length: int,
249
+ vocab_size: int,
250
+ transformer_width: int,
251
+ transformer_heads: int,
252
+ transformer_layers: int
253
+ ):
254
+ super().__init__()
255
+
256
+ self.context_length = context_length
257
+
258
+ if isinstance(vision_layers, (tuple, list)):
259
+ vision_heads = vision_width * 32 // 64
260
+ self.visual = ModifiedResNet(
261
+ layers=vision_layers,
262
+ output_dim=embed_dim,
263
+ heads=vision_heads,
264
+ input_resolution=image_resolution,
265
+ width=vision_width
266
+ )
267
+ else:
268
+ vision_heads = vision_width // 64
269
+ self.visual = VisionTransformer(
270
+ input_resolution=image_resolution,
271
+ patch_size=vision_patch_size,
272
+ width=vision_width,
273
+ layers=vision_layers,
274
+ heads=vision_heads,
275
+ output_dim=embed_dim
276
+ )
277
+
278
+ self.transformer = Transformer(
279
+ width=transformer_width,
280
+ layers=transformer_layers,
281
+ heads=transformer_heads,
282
+ attn_mask=self.build_attention_mask()
283
+ )
284
+
285
+ self.vocab_size = vocab_size
286
+ self.token_embedding = nn.Embedding(vocab_size, transformer_width)
287
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
288
+ self.ln_final = LayerNorm(transformer_width)
289
+
290
+ self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
291
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
292
+
293
+ self.initialize_parameters()
294
+
295
+ def initialize_parameters(self):
296
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
297
+ nn.init.normal_(self.positional_embedding, std=0.01)
298
+
299
+ if isinstance(self.visual, ModifiedResNet):
300
+ if self.visual.attnpool is not None:
301
+ std = self.visual.attnpool.c_proj.in_features ** -0.5
302
+ nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
303
+ nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
304
+ nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
305
+ nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
306
+
307
+ for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
308
+ for name, param in resnet_block.named_parameters():
309
+ if name.endswith("bn3.weight"):
310
+ nn.init.zeros_(param)
311
+
312
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
313
+ attn_std = self.transformer.width ** -0.5
314
+ fc_std = (2 * self.transformer.width) ** -0.5
315
+ for block in self.transformer.resblocks:
316
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
317
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
318
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
319
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
320
+
321
+ if self.text_projection is not None:
322
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
323
+
324
+ def build_attention_mask(self):
325
+ # lazily create causal attention mask, with full attention between the vision tokens
326
+ # pytorch uses additive attention mask; fill with -inf
327
+ mask = torch.empty(self.context_length, self.context_length)
328
+ mask.fill_(float("-inf"))
329
+ mask.triu_(1) # zero out the lower diagonal
330
+ return mask
331
+
332
+ @property
333
+ def dtype(self):
334
+ return self.visual.conv1.weight.dtype
335
+
336
+ def encode_image(self, image):
337
+ return self.visual(image.type(self.dtype))
338
+
339
+ def encode_text(self, text):
340
+ x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
341
+
342
+ x = x + self.positional_embedding.type(self.dtype)
343
+ x = x.permute(1, 0, 2) # NLD -> LND
344
+ x = self.transformer(x)
345
+ x = x.permute(1, 0, 2) # LND -> NLD
346
+ x = self.ln_final(x).type(self.dtype)
347
+
348
+ # x.shape = [batch_size, n_ctx, transformer.width]
349
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
350
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
351
+
352
+ return x
353
+
354
+ def forward(self, image, text):
355
+ image_features = self.encode_image(image)
356
+ text_features = self.encode_text(text)
357
+
358
+ # normalized features
359
+ image_features = image_features / image_features.norm(dim=-1, keepdim=True)
360
+ text_features = text_features / text_features.norm(dim=-1, keepdim=True)
361
+
362
+ # cosine similarity as logits
363
+ logit_scale = self.logit_scale.exp()
364
+ logits_per_image = logit_scale * image_features @ text_features.t()
365
+ logits_per_text = logit_scale * text_features @ image_features.t()
366
+
367
+ # shape = [global_batch_size, global_batch_size]
368
+ return logits_per_image, logits_per_text
369
+
370
+
371
+ def convert_weights(model: nn.Module):
372
+ """Convert applicable model parameters to fp16"""
373
+
374
+ def _convert_weights_to_fp16(l):
375
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
376
+ l.weight.data = l.weight.data.half()
377
+ if l.bias is not None:
378
+ l.bias.data = l.bias.data.half()
379
+
380
+ if isinstance(l, nn.MultiheadAttention):
381
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
382
+ tensor = getattr(l, attr)
383
+ if tensor is not None:
384
+ tensor.data = tensor.data.half()
385
+
386
+ for name in ["text_projection", "proj"]:
387
+ if hasattr(l, name):
388
+ attr = getattr(l, name)
389
+ if attr is not None:
390
+ attr.data = attr.data.half()
391
+
392
+ model.apply(_convert_weights_to_fp16)
393
+
394
+
395
+ def build_model(state_dict: dict):
396
+ vit = "visual.proj" in state_dict
397
+
398
+ if vit:
399
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
400
+ vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
401
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
402
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
403
+ image_resolution = vision_patch_size * grid_size
404
+ else:
405
+ counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
406
+ vision_layers = tuple(counts)
407
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
408
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
409
+ vision_patch_size = None
410
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
411
+ image_resolution = output_width * 32
412
+
413
+ embed_dim = state_dict["text_projection"].shape[1]
414
+ context_length = state_dict["positional_embedding"].shape[0]
415
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
416
+ transformer_width = state_dict["ln_final.weight"].shape[0]
417
+ transformer_heads = transformer_width // 64
418
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
419
+
420
+ model = CLIP(
421
+ embed_dim,
422
+ image_resolution, vision_layers, vision_width, vision_patch_size,
423
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
424
+ )
425
+
426
+ for key in ["input_resolution", "context_length", "vocab_size"]:
427
+ if key in state_dict:
428
+ del state_dict[key]
429
+
430
+ convert_weights(model)
431
+ model.load_state_dict(state_dict)
432
+ return model.eval()
delta_edit/clip/simple_tokenizer.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import html
3
+ import os
4
+ from functools import lru_cache
5
+
6
+ import ftfy
7
+ import regex as re
8
+
9
+
10
+ @lru_cache()
11
+ def default_bpe():
12
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
13
+
14
+
15
+ @lru_cache()
16
+ def bytes_to_unicode():
17
+ """
18
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
19
+ The reversible bpe codes work on unicode strings.
20
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
21
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
22
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
23
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
24
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
25
+ """
26
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
27
+ cs = bs[:]
28
+ n = 0
29
+ for b in range(2**8):
30
+ if b not in bs:
31
+ bs.append(b)
32
+ cs.append(2**8+n)
33
+ n += 1
34
+ cs = [chr(n) for n in cs]
35
+ return dict(zip(bs, cs))
36
+
37
+
38
+ def get_pairs(word):
39
+ """Return set of symbol pairs in a word.
40
+ Word is represented as tuple of symbols (symbols being variable-length strings).
41
+ """
42
+ pairs = set()
43
+ prev_char = word[0]
44
+ for char in word[1:]:
45
+ pairs.add((prev_char, char))
46
+ prev_char = char
47
+ return pairs
48
+
49
+
50
+ def basic_clean(text):
51
+ text = ftfy.fix_text(text)
52
+ text = html.unescape(html.unescape(text))
53
+ return text.strip()
54
+
55
+
56
+ def whitespace_clean(text):
57
+ text = re.sub(r'\s+', ' ', text)
58
+ text = text.strip()
59
+ return text
60
+
61
+
62
+ class SimpleTokenizer(object):
63
+ def __init__(self, bpe_path: str = default_bpe()):
64
+ self.byte_encoder = bytes_to_unicode()
65
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
66
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
67
+ merges = merges[1:49152-256-2+1]
68
+ merges = [tuple(merge.split()) for merge in merges]
69
+ vocab = list(bytes_to_unicode().values())
70
+ vocab = vocab + [v+'</w>' for v in vocab]
71
+ for merge in merges:
72
+ vocab.append(''.join(merge))
73
+ vocab.extend(['<|startoftext|>', '<|endoftext|>'])
74
+ self.encoder = dict(zip(vocab, range(len(vocab))))
75
+ self.decoder = {v: k for k, v in self.encoder.items()}
76
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
77
+ self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
78
+ self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
79
+
80
+ def bpe(self, token):
81
+ if token in self.cache:
82
+ return self.cache[token]
83
+ word = tuple(token[:-1]) + ( token[-1] + '</w>',)
84
+ pairs = get_pairs(word)
85
+
86
+ if not pairs:
87
+ return token+'</w>'
88
+
89
+ while True:
90
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
91
+ if bigram not in self.bpe_ranks:
92
+ break
93
+ first, second = bigram
94
+ new_word = []
95
+ i = 0
96
+ while i < len(word):
97
+ try:
98
+ j = word.index(first, i)
99
+ new_word.extend(word[i:j])
100
+ i = j
101
+ except:
102
+ new_word.extend(word[i:])
103
+ break
104
+
105
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
106
+ new_word.append(first+second)
107
+ i += 2
108
+ else:
109
+ new_word.append(word[i])
110
+ i += 1
111
+ new_word = tuple(new_word)
112
+ word = new_word
113
+ if len(word) == 1:
114
+ break
115
+ else:
116
+ pairs = get_pairs(word)
117
+ word = ' '.join(word)
118
+ self.cache[token] = word
119
+ return word
120
+
121
+ def encode(self, text):
122
+ bpe_tokens = []
123
+ text = whitespace_clean(basic_clean(text)).lower()
124
+ for token in re.findall(self.pat, text):
125
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
126
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
127
+ return bpe_tokens
128
+
129
+ def decode(self, tokens):
130
+ text = ''.join([self.decoder[token] for token in tokens])
131
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
132
+ return text
delta_edit/datasets/__init__.py ADDED
File without changes
delta_edit/datasets/test_dataset.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import torch
4
+ from torch.utils.data import Dataset
5
+
6
+ class TestLatentsDataset(Dataset):
7
+ def __init__(self):
8
+
9
+ style_latents_list = []
10
+ clip_latents_list = []
11
+ wplus_latents_list = []
12
+
13
+ #change the paths here for testing other latent codes
14
+ # style_latents_list.append(torch.Tensor(np.load("./examples/sspace_img_feat.npy")))
15
+ # clip_latents_list.append(torch.Tensor(np.load("./examples/cspace_img_feat.npy")))
16
+ # wplus_latents_list.append(torch.Tensor(np.load("./examples/wplus_img_feat.npy")))
17
+
18
+ style_latents_list.append(torch.Tensor(np.load("/home/weiyuxiang/models/delta_edit/sspace_ffhq_feat.npy")))
19
+ clip_latents_list.append(torch.Tensor(np.load("/home/weiyuxiang/models/delta_edit/cspace_ffhq_feat.npy")))
20
+ wplus_latents_list.append(torch.Tensor(np.load("/home/weiyuxiang/models/delta_edit/wspace_ffhq_feat.npy")))
21
+
22
+ self.style_latents = torch.cat(style_latents_list, dim=0)
23
+ self.clip_latents = torch.cat(clip_latents_list, dim=0)
24
+ self.wplus_latents = torch.cat(wplus_latents_list, dim=0)
25
+ print(self.wplus_latents.shape)
26
+
27
+ def __len__(self):
28
+
29
+ return self.style_latents.shape[0]
30
+
31
+ def __getitem__(self, index):
32
+
33
+ latent_s1 = self.style_latents[index]
34
+ latent_c1 = self.clip_latents[index]
35
+ latent_w1 = self.wplus_latents[index]
36
+ latent_c1 = latent_c1 / latent_c1.norm(dim=-1, keepdim=True).float()
37
+
38
+ delta_c = torch.cat([latent_c1, latent_c1], dim=0)
39
+
40
+ return latent_s1, delta_c, latent_w1
delta_edit/datasets/train_dataset.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import random
3
+ import numpy as np
4
+
5
+ import torch
6
+ from torch.utils.data import Dataset
7
+
8
+ class TrainLatentsDataset(Dataset):
9
+ def __init__(self, opts, cycle=True):
10
+
11
+ style_latents_list = []
12
+ clip_latents_list = []
13
+ wplus_latents_list = []
14
+
15
+ style_latents_list.append(torch.Tensor(np.load(f"./latent_code/{opts.classname}/sspace_noise_feat.npy")))
16
+ clip_latents_list.append(torch.Tensor(np.load(f"./latent_code/{opts.classname}/cspace_noise_feat.npy")))
17
+ wplus_latents_list.append(torch.Tensor(np.load(f"./latent_code/{opts.classname}/wspace_noise_feat.npy")))
18
+
19
+ style_latents_list.append(torch.Tensor(np.load(f"./latent_code/{opts.classname}/sspace_ffhq_feat.npy")))
20
+ clip_latents_list.append(torch.Tensor(np.load(f"./latent_code/{opts.classname}/cspace_ffhq_feat.npy")))
21
+ wplus_latents_list.append(torch.Tensor(np.load(f"./latent_code/{opts.classname}/wspace_ffhq_feat.npy")))
22
+
23
+ self.style_latents = torch.cat(style_latents_list, dim=0)
24
+ self.clip_latents = torch.cat(clip_latents_list, dim=0)
25
+ self.wplus_latents = torch.cat(wplus_latents_list, dim=0)
26
+
27
+ self.style_latents = self.style_latents[:200000+58000]
28
+ self.clip_latents = self.clip_latents[:200000+58000]
29
+ self.wplus_latents = self.wplus_latents[:200000+58000]
30
+
31
+ self.dataset_size = self.style_latents.shape[0]
32
+ print("dataset size", self.dataset_size)
33
+ self.cycle = cycle
34
+
35
+ def __len__(self):
36
+ if self.cycle:
37
+ return self.style_latents.shape[0] * 50
38
+ else:
39
+ return self.style_latents.shape[0]
40
+
41
+ def __getitem__(self, index):
42
+ if self.cycle:
43
+ index = index % self.dataset_size
44
+
45
+ latent_s1 = self.style_latents[index]
46
+ latent_c1 = self.clip_latents[index]
47
+ latent_w1 = self.wplus_latents[index]
48
+ latent_c1 = latent_c1 / latent_c1.norm(dim=-1, keepdim=True).float()
49
+
50
+ random_index = random.randint(0, self.dataset_size - 1)
51
+ latent_s2 = self.style_latents[random_index]
52
+ latent_c2 = self.clip_latents[random_index]
53
+ latent_w2 = self.wplus_latents[random_index]
54
+ latent_c2 = latent_c2 / latent_c2.norm(dim=-1, keepdim=True).float()
55
+
56
+ delta_s1 = latent_s2 - latent_s1
57
+ delta_c = latent_c2 - latent_c1
58
+
59
+ delta_c = delta_c / delta_c.norm(dim=-1, keepdim=True).float().clamp(min=1e-5)
60
+ delta_c = torch.cat([latent_c1, delta_c], dim=0)
61
+
62
+ return latent_s1, delta_c, delta_s1
delta_edit/delta_mapper.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import Module
6
+ import torch.nn.functional as F
7
+
8
+ from models.stylegan2.model import EqualLinear, PixelNorm
9
+
10
+ class Mapper(Module):
11
+
12
+ def __init__(self, in_channel=512, out_channel=512, norm=True, num_layers=4):
13
+ super(Mapper, self).__init__()
14
+
15
+ layers = [PixelNorm()] if norm else []
16
+
17
+ layers.append(EqualLinear(in_channel, out_channel, lr_mul=0.01, activation='fused_lrelu'))
18
+ for _ in range(num_layers-1):
19
+ layers.append(EqualLinear(out_channel, out_channel, lr_mul=0.01, activation='fused_lrelu'))
20
+ self.mapping = nn.Sequential(*layers)
21
+
22
+ def forward(self, x):
23
+ x = self.mapping(x)
24
+ return x
25
+
26
+ class DeltaMapper(Module):
27
+
28
+ def __init__(self):
29
+ super(DeltaMapper, self).__init__()
30
+
31
+ #Style Module(sm)
32
+ self.sm_coarse = Mapper(512, 512)
33
+ self.sm_medium = Mapper(512, 512)
34
+ self.sm_fine = Mapper(2464, 2464)
35
+
36
+ #Condition Module(cm)
37
+ self.cm_coarse = Mapper(1024, 512)
38
+ self.cm_medium = Mapper(1024, 512)
39
+ self.cm_fine = Mapper(1024, 2464)
40
+
41
+ #Fusion Module(fm)
42
+ self.fm_coarse = Mapper(512*2, 512, norm=False)
43
+ self.fm_medium = Mapper(512*2, 512, norm=False)
44
+ self.fm_fine = Mapper(2464*2, 2464, norm=False)
45
+
46
+ def forward(self, sspace_feat, clip_feat):
47
+
48
+ s_coarse = sspace_feat[:, :3*512].view(-1,3,512)
49
+ s_medium = sspace_feat[:, 3*512:7*512].view(-1,4,512)
50
+ s_fine = sspace_feat[:, 7*512:] #channels:2464
51
+
52
+ s_coarse = self.sm_coarse(s_coarse)
53
+ s_medium = self.sm_medium(s_medium)
54
+ s_fine = self.sm_fine(s_fine)
55
+
56
+ c_coarse = self.cm_coarse(clip_feat)
57
+ c_medium = self.cm_medium(clip_feat)
58
+ c_fine = self.cm_fine(clip_feat)
59
+
60
+ x_coarse = torch.cat([s_coarse, torch.stack([c_coarse]*3, dim=1)], dim=2) #[b,3,1024]
61
+ x_medium = torch.cat([s_medium, torch.stack([c_medium]*4, dim=1)], dim=2) #[b,4,1024]
62
+ x_fine = torch.cat([s_fine, c_fine], dim=1) #[b,2464*2]
63
+
64
+ x_coarse = self.fm_coarse(x_coarse)
65
+ x_coarse = x_coarse.view(-1,3*512)
66
+
67
+ x_medium = self.fm_medium(x_medium)
68
+ x_medium = x_medium.view(-1,4*512)
69
+
70
+ x_fine = self.fm_fine(x_fine)
71
+
72
+ out = torch.cat([x_coarse, x_medium, x_fine], dim=1)
73
+ return out
delta_edit/editing_attributes.txt ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ face with big eyes,1,face with big eyes
2
+ face with big eyes,-1.2,face with small eyes
3
+ face with mouth open,1,face with mouth open
4
+ face with mouth open,-1.2,face with mouth closed
5
+ face with bushy eyebrows,3,face with bushy eyebrows
6
+ face with thick eyebrows,2,face with thick eyebrows
7
+ face with no eyebrows,1.5,face with no eyebrows
8
+ face with beard,2,face with beard
9
+ face with makeup,4,face with makeup
10
+ young face,4,young face
11
+ woman face,2,woman face
12
+ man face,2,man face
13
+ chubby face,1.5,chubby face
14
+ face with eyeglasses,1.5,face with eyeglasses
15
+ face with smile,1,face with smile
16
+ happy face,1,happy face
17
+ surprised face,1.5,surprised face
18
+ angry face,1.5,angry face
19
+ face with bangs,1.5,face with bangs
20
+ face with red hair,1.5,face with red hair
21
+ face with black hair,1.5,face with black hair
22
+ face with blond hair,1.5,face with blond hair
23
+ face with grey hair,2,face with grey hair
24
+ face with white hair,2,face with white hair
25
+ face with curly hair,1.5,face with curly hair
26
+ face with receding hairline,1.5,face with receding hairline
27
+ face with bowlcut hairstyle,1.5,face with bowlcut hairstyle
28
+ face with straight long hair,1.5,face with straight long hair
29
+ bald face,1.5,bald face
delta_edit/generate_codes.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import clip
4
+
5
+ import random
6
+ import numpy as np
7
+ import torch
8
+ from torchvision import utils
9
+ from utils import stylespace_util
10
+ from models.stylegan2.model import Generator
11
+
12
+ def save_image_pytorch(img, name):
13
+ """Helper function to save torch tensor into an image file."""
14
+ utils.save_image(
15
+ img,
16
+ name,
17
+ nrow=1,
18
+ padding=0,
19
+ normalize=True,
20
+ range=(-1, 1),
21
+ )
22
+
23
+
24
+ def generate(args, netG, device, mean_latent):
25
+
26
+ device = "cuda" if torch.cuda.is_available() else "cpu"
27
+ model, preprocess = clip.load("ViT-B/32", device=device)
28
+ avg_pool = torch.nn.AvgPool2d(kernel_size=1024 // 32)
29
+ upsample = torch.nn.Upsample(scale_factor=7)
30
+
31
+ ind = 0
32
+ with torch.no_grad():
33
+ netG.eval()
34
+
35
+ # Generate images from a file of input noises
36
+ if args.fixed_z is not None:
37
+ sample_z = torch.load(args.fixed_z, map_location=device)
38
+ for start in range(0, sample_z.size(0), args.batch_size):
39
+ end = min(start + args.batch_size, sample_z.size(0))
40
+ z_batch = sample_z[start:end]
41
+ sample, _ = netG([z_batch], truncation=args.truncation, truncation_latent=mean_latent)
42
+ for s in sample:
43
+ save_image_pytorch(s, f'{args.save_dir}/{str(ind).zfill(6)}.png')
44
+ ind += 1
45
+ return
46
+
47
+ # Generate image by sampling input noises
48
+ w_latents_list = []
49
+ s_latents_list = []
50
+ c_latents_list = []
51
+ for start in range(0, args.samples, args.batch_size):
52
+ end = min(start + args.batch_size, args.samples)
53
+ batch_sz = end - start
54
+ print(f'current_num:{start}')
55
+ sample_z = torch.randn(batch_sz, 512, device=device)
56
+
57
+ sample, w_latents = netG([sample_z], truncation=args.truncation, truncation_latent=mean_latent,return_latents=True)
58
+ style_space, noise = stylespace_util.encoder_latent(netG, w_latents)
59
+ s_latents = torch.cat(style_space, dim=1)
60
+
61
+ tmp_imgs = stylespace_util.decoder(netG, style_space, w_latents, noise)
62
+ # for s in tmp_imgs:
63
+ # save_image_pytorch(s, f'{args.save_dir}/{str(ind).zfill(6)}.png')
64
+ # ind += 1
65
+
66
+ img_gen_for_clip = upsample(tmp_imgs)
67
+ img_gen_for_clip = avg_pool(img_gen_for_clip)
68
+ c_latents = model.encode_image(img_gen_for_clip)
69
+
70
+ w_latents_list.append(w_latents)
71
+ s_latents_list.append(s_latents)
72
+ c_latents_list.append(c_latents)
73
+ w_all_latents = torch.cat(w_latents_list, dim=0)
74
+ s_all_latents = torch.cat(s_latents_list, dim=0)
75
+ c_all_latents = torch.cat(c_latents_list, dim=0)
76
+
77
+ print(w_all_latents.size())
78
+ print(s_all_latents.size())
79
+ print(c_all_latents.size())
80
+
81
+ w_all_latents = w_all_latents.cpu().numpy()
82
+ s_all_latents = s_all_latents.cpu().numpy()
83
+ c_all_latents = c_all_latents.cpu().numpy()
84
+
85
+ os.makedirs(os.path.join(args.save_dir, args.classname), exist_ok=True)
86
+ np.save(f"{args.save_dir}/{args.classname}/wspace_noise_feat.npy", w_all_latents)
87
+ np.save(f"{args.save_dir}/{args.classname}/sspace_noise_feat.npy", s_all_latents)
88
+ np.save(f"{args.save_dir}/{args.classname}/cspace_noise_feat.npy", c_all_latents)
89
+
90
+ if __name__ == '__main__':
91
+ parser = argparse.ArgumentParser()
92
+
93
+ parser.add_argument('--classname', type=str, default='ffhq', help="place to save the output")
94
+ parser.add_argument('--save_dir', type=str, default='./latent_code', help="place to save the output")
95
+ parser.add_argument('--ckpt', type=str, default='./models/pretrained_models', help="checkpoint file for the generator")
96
+ parser.add_argument('--size', type=int, default=1024, help="output size of the generator")
97
+ parser.add_argument('--fixed_z', type=str, default=None, help="expect a .pth file. If given, will use this file as the input noise for the output")
98
+ parser.add_argument('--w_shift', type=str, default=None, help="expect a .pth file. Apply a w-latent shift to the generator")
99
+ parser.add_argument('--batch_size', type=int, default=10, help="batch size used to generate outputs")
100
+ parser.add_argument('--samples', type=int, default=200000, help="200000 number of samples to generate, will be overridden if --fixed_z is given")
101
+ parser.add_argument('--truncation', type=float, default=1, help="strength of truncation:0.5ori")
102
+ parser.add_argument('--truncation_mean', type=int, default=4096, help="number of samples to calculate the mean latent for truncation")
103
+ parser.add_argument('--seed', type=int, default=None, help="if specified, use a fixed random seed")
104
+ parser.add_argument('--device', type=str, default='cuda')
105
+
106
+ args = parser.parse_args()
107
+
108
+ device = args.device
109
+ # use a fixed seed if given
110
+ if args.seed is not None:
111
+ random.seed(args.seed)
112
+ torch.manual_seed(args.seed)
113
+ torch.cuda.manual_seed_all(args.seed)
114
+
115
+ if not os.path.exists(args.save_dir):
116
+ os.makedirs(args.save_dir)
117
+
118
+ netG = Generator(args.size, 512, 8).to(device)
119
+ if args.classname == 'ffhq':
120
+ ckpt_path = os.path.join(args.ckpt,f'stylegan2-{args.classname}-config-f.pt')
121
+ else:
122
+ ckpt_path = os.path.join(args.ckpt,f'stylegan2-{args.classname}','netG.pth')
123
+ print(ckpt_path)
124
+ checkpoint = torch.load(ckpt_path, map_location='cpu')
125
+
126
+ if args.classname == 'ffhq':
127
+ netG.load_state_dict(checkpoint['g_ema'])
128
+ else:
129
+ netG.load_state_dict(checkpoint)
130
+
131
+ # get mean latent if truncation is applied
132
+ if args.truncation < 1:
133
+ with torch.no_grad():
134
+ mean_latent = netG.mean_latent(args.truncation_mean)
135
+ else:
136
+ mean_latent = None
137
+
138
+ generate(args, netG, device, mean_latent)
delta_edit/models/__init__.py ADDED
File without changes
delta_edit/models/encoders/__init__.py ADDED
File without changes
delta_edit/models/encoders/helpers.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
5
+
6
+ """
7
+ ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
8
+ """
9
+
10
+
11
+ class Flatten(Module):
12
+ def forward(self, input):
13
+ return input.view(input.size(0), -1)
14
+
15
+
16
+ def l2_norm(input, axis=1):
17
+ norm = torch.norm(input, 2, axis, True)
18
+ output = torch.div(input, norm)
19
+ return output
20
+
21
+
22
+ class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
23
+ """ A named tuple describing a ResNet block. """
24
+
25
+
26
+ def get_block(in_channel, depth, num_units, stride=2):
27
+ return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
28
+
29
+
30
+ def get_blocks(num_layers):
31
+ if num_layers == 50:
32
+ blocks = [
33
+ get_block(in_channel=64, depth=64, num_units=3),
34
+ get_block(in_channel=64, depth=128, num_units=4),
35
+ get_block(in_channel=128, depth=256, num_units=14),
36
+ get_block(in_channel=256, depth=512, num_units=3)
37
+ ]
38
+ elif num_layers == 100:
39
+ blocks = [
40
+ get_block(in_channel=64, depth=64, num_units=3),
41
+ get_block(in_channel=64, depth=128, num_units=13),
42
+ get_block(in_channel=128, depth=256, num_units=30),
43
+ get_block(in_channel=256, depth=512, num_units=3)
44
+ ]
45
+ elif num_layers == 152:
46
+ blocks = [
47
+ get_block(in_channel=64, depth=64, num_units=3),
48
+ get_block(in_channel=64, depth=128, num_units=8),
49
+ get_block(in_channel=128, depth=256, num_units=36),
50
+ get_block(in_channel=256, depth=512, num_units=3)
51
+ ]
52
+ else:
53
+ raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
54
+ return blocks
55
+
56
+
57
+ class SEModule(Module):
58
+ def __init__(self, channels, reduction):
59
+ super(SEModule, self).__init__()
60
+ self.avg_pool = AdaptiveAvgPool2d(1)
61
+ self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
62
+ self.relu = ReLU(inplace=True)
63
+ self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
64
+ self.sigmoid = Sigmoid()
65
+
66
+ def forward(self, x):
67
+ module_input = x
68
+ x = self.avg_pool(x)
69
+ x = self.fc1(x)
70
+ x = self.relu(x)
71
+ x = self.fc2(x)
72
+ x = self.sigmoid(x)
73
+ return module_input * x
74
+
75
+
76
+ class bottleneck_IR(Module):
77
+ def __init__(self, in_channel, depth, stride):
78
+ super(bottleneck_IR, self).__init__()
79
+ if in_channel == depth:
80
+ self.shortcut_layer = MaxPool2d(1, stride)
81
+ else:
82
+ self.shortcut_layer = Sequential(
83
+ Conv2d(in_channel, depth, (1, 1), stride, bias=False),
84
+ BatchNorm2d(depth)
85
+ )
86
+ self.res_layer = Sequential(
87
+ BatchNorm2d(in_channel),
88
+ Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
89
+ Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
90
+ )
91
+
92
+ def forward(self, x):
93
+ shortcut = self.shortcut_layer(x)
94
+ res = self.res_layer(x)
95
+ return res + shortcut
96
+
97
+
98
+ class bottleneck_IR_SE(Module):
99
+ def __init__(self, in_channel, depth, stride):
100
+ super(bottleneck_IR_SE, self).__init__()
101
+ if in_channel == depth:
102
+ self.shortcut_layer = MaxPool2d(1, stride)
103
+ else:
104
+ self.shortcut_layer = Sequential(
105
+ Conv2d(in_channel, depth, (1, 1), stride, bias=False),
106
+ BatchNorm2d(depth)
107
+ )
108
+ self.res_layer = Sequential(
109
+ BatchNorm2d(in_channel),
110
+ Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
111
+ PReLU(depth),
112
+ Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
113
+ BatchNorm2d(depth),
114
+ SEModule(depth, 16)
115
+ )
116
+
117
+ def forward(self, x):
118
+ shortcut = self.shortcut_layer(x)
119
+ res = self.res_layer(x)
120
+ return res + shortcut
121
+
122
+
123
+ def _upsample_add(x, y):
124
+ """Upsample and add two feature maps.
125
+ Args:
126
+ x: (Variable) top feature map to be upsampled.
127
+ y: (Variable) lateral feature map.
128
+ Returns:
129
+ (Variable) added feature map.
130
+ Note in PyTorch, when input size is odd, the upsampled feature map
131
+ with `F.upsample(..., scale_factor=2, mode='nearest')`
132
+ maybe not equal to the lateral feature map size.
133
+ e.g.
134
+ original input size: [N,_,15,15] ->
135
+ conv2d feature map size: [N,_,8,8] ->
136
+ upsampled feature map size: [N,_,16,16]
137
+ So we choose bilinear upsample which supports arbitrary output sizes.
138
+ """
139
+ _, _, H, W = y.size()
140
+ return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
delta_edit/models/encoders/model_irse.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
2
+ from models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
3
+
4
+ """
5
+ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
6
+ """
7
+
8
+
9
+ class Backbone(Module):
10
+ def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
11
+ super(Backbone, self).__init__()
12
+ assert input_size in [112, 224], "input_size should be 112 or 224"
13
+ assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
14
+ assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
15
+ blocks = get_blocks(num_layers)
16
+ if mode == 'ir':
17
+ unit_module = bottleneck_IR
18
+ elif mode == 'ir_se':
19
+ unit_module = bottleneck_IR_SE
20
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
21
+ BatchNorm2d(64),
22
+ PReLU(64))
23
+ if input_size == 112:
24
+ self.output_layer = Sequential(BatchNorm2d(512),
25
+ Dropout(drop_ratio),
26
+ Flatten(),
27
+ Linear(512 * 7 * 7, 512),
28
+ BatchNorm1d(512, affine=affine))
29
+ else:
30
+ self.output_layer = Sequential(BatchNorm2d(512),
31
+ Dropout(drop_ratio),
32
+ Flatten(),
33
+ Linear(512 * 14 * 14, 512),
34
+ BatchNorm1d(512, affine=affine))
35
+
36
+ modules = []
37
+ for block in blocks:
38
+ for bottleneck in block:
39
+ modules.append(unit_module(bottleneck.in_channel,
40
+ bottleneck.depth,
41
+ bottleneck.stride))
42
+ self.body = Sequential(*modules)
43
+
44
+ def forward(self, x):
45
+ x = self.input_layer(x)
46
+ x = self.body(x)
47
+ x = self.output_layer(x)
48
+ return l2_norm(x)
49
+
50
+
51
+ def IR_50(input_size):
52
+ """Constructs a ir-50 model."""
53
+ model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
54
+ return model
55
+
56
+
57
+ def IR_101(input_size):
58
+ """Constructs a ir-101 model."""
59
+ model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
60
+ return model
61
+
62
+
63
+ def IR_152(input_size):
64
+ """Constructs a ir-152 model."""
65
+ model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
66
+ return model
67
+
68
+
69
+ def IR_SE_50(input_size):
70
+ """Constructs a ir_se-50 model."""
71
+ model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
72
+ return model
73
+
74
+
75
+ def IR_SE_101(input_size):
76
+ """Constructs a ir_se-101 model."""
77
+ model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
78
+ return model
79
+
80
+
81
+ def IR_SE_152(input_size):
82
+ """Constructs a ir_se-152 model."""
83
+ model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
84
+ return model
delta_edit/models/encoders/psp_encoders.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
7
+
8
+ from models.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add
9
+ from models.stylegan2.model import EqualLinear
10
+
11
+
12
+ class ProgressiveStage(Enum):
13
+ WTraining = 0
14
+ Delta1Training = 1
15
+ Delta2Training = 2
16
+ Delta3Training = 3
17
+ Delta4Training = 4
18
+ Delta5Training = 5
19
+ Delta6Training = 6
20
+ Delta7Training = 7
21
+ Delta8Training = 8
22
+ Delta9Training = 9
23
+ Delta10Training = 10
24
+ Delta11Training = 11
25
+ Delta12Training = 12
26
+ Delta13Training = 13
27
+ Delta14Training = 14
28
+ Delta15Training = 15
29
+ Delta16Training = 16
30
+ Delta17Training = 17
31
+ Inference = 18
32
+
33
+
34
+ class GradualStyleBlock(Module):
35
+ def __init__(self, in_c, out_c, spatial):
36
+ super(GradualStyleBlock, self).__init__()
37
+ self.out_c = out_c
38
+ self.spatial = spatial
39
+ num_pools = int(np.log2(spatial))
40
+ modules = []
41
+ modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
42
+ nn.LeakyReLU()]
43
+ for i in range(num_pools - 1):
44
+ modules += [
45
+ Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
46
+ nn.LeakyReLU()
47
+ ]
48
+ self.convs = nn.Sequential(*modules)
49
+ self.linear = EqualLinear(out_c, out_c, lr_mul=1)
50
+
51
+ def forward(self, x):
52
+ x = self.convs(x)
53
+ x = x.view(-1, self.out_c)
54
+ x = self.linear(x)
55
+ return x
56
+
57
+
58
+ class GradualStyleEncoder(Module):
59
+ def __init__(self, num_layers, mode='ir', opts=None):
60
+ super(GradualStyleEncoder, self).__init__()
61
+ assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
62
+ assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
63
+ blocks = get_blocks(num_layers)
64
+ if mode == 'ir':
65
+ unit_module = bottleneck_IR
66
+ elif mode == 'ir_se':
67
+ unit_module = bottleneck_IR_SE
68
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
69
+ BatchNorm2d(64),
70
+ PReLU(64))
71
+ modules = []
72
+ for block in blocks:
73
+ for bottleneck in block:
74
+ modules.append(unit_module(bottleneck.in_channel,
75
+ bottleneck.depth,
76
+ bottleneck.stride))
77
+ self.body = Sequential(*modules)
78
+
79
+ self.styles = nn.ModuleList()
80
+ log_size = int(math.log(opts.stylegan_size, 2))
81
+ self.style_count = 2 * log_size - 2
82
+ self.coarse_ind = 3
83
+ self.middle_ind = 7
84
+ for i in range(self.style_count):
85
+ if i < self.coarse_ind:
86
+ style = GradualStyleBlock(512, 512, 16)
87
+ elif i < self.middle_ind:
88
+ style = GradualStyleBlock(512, 512, 32)
89
+ else:
90
+ style = GradualStyleBlock(512, 512, 64)
91
+ self.styles.append(style)
92
+ self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
93
+ self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
94
+
95
+ def forward(self, x):
96
+ x = self.input_layer(x)
97
+
98
+ latents = []
99
+ modulelist = list(self.body._modules.values())
100
+ for i, l in enumerate(modulelist):
101
+ x = l(x)
102
+ if i == 6:
103
+ c1 = x
104
+ elif i == 20:
105
+ c2 = x
106
+ elif i == 23:
107
+ c3 = x
108
+
109
+ for j in range(self.coarse_ind):
110
+ latents.append(self.styles[j](c3))
111
+
112
+ p2 = _upsample_add(c3, self.latlayer1(c2))
113
+ for j in range(self.coarse_ind, self.middle_ind):
114
+ latents.append(self.styles[j](p2))
115
+
116
+ p1 = _upsample_add(p2, self.latlayer2(c1))
117
+ for j in range(self.middle_ind, self.style_count):
118
+ latents.append(self.styles[j](p1))
119
+
120
+ out = torch.stack(latents, dim=1)
121
+ return out
122
+
123
+
124
+ class Encoder4Editing(Module):
125
+ def __init__(self, num_layers, stylegan_size, mode='ir'):
126
+ super(Encoder4Editing, self).__init__()
127
+ assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
128
+ assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
129
+ blocks = get_blocks(num_layers)
130
+ if mode == 'ir':
131
+ unit_module = bottleneck_IR
132
+ elif mode == 'ir_se':
133
+ unit_module = bottleneck_IR_SE
134
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
135
+ BatchNorm2d(64),
136
+ PReLU(64))
137
+ modules = []
138
+ for block in blocks:
139
+ for bottleneck in block:
140
+ modules.append(unit_module(bottleneck.in_channel,
141
+ bottleneck.depth,
142
+ bottleneck.stride))
143
+ self.body = Sequential(*modules)
144
+
145
+ self.styles = nn.ModuleList()
146
+ log_size = int(math.log(stylegan_size, 2))
147
+ self.style_count = 2 * log_size - 2
148
+ self.coarse_ind = 3
149
+ self.middle_ind = 7
150
+
151
+ for i in range(self.style_count):
152
+ if i < self.coarse_ind:
153
+ style = GradualStyleBlock(512, 512, 16)
154
+ elif i < self.middle_ind:
155
+ style = GradualStyleBlock(512, 512, 32)
156
+ else:
157
+ style = GradualStyleBlock(512, 512, 64)
158
+ self.styles.append(style)
159
+
160
+ self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
161
+ self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
162
+
163
+ self.progressive_stage = ProgressiveStage.Inference
164
+
165
+ def get_deltas_starting_dimensions(self):
166
+ ''' Get a list of the initial dimension of every delta from which it is applied '''
167
+ return list(range(self.style_count)) # Each dimension has a delta applied to it
168
+
169
+ def set_progressive_stage(self, new_stage: ProgressiveStage):
170
+ self.progressive_stage = new_stage
171
+ print('Changed progressive stage to: ', new_stage)
172
+
173
+ def forward(self, x):
174
+ x = self.input_layer(x)
175
+
176
+ modulelist = list(self.body._modules.values())
177
+ for i, l in enumerate(modulelist):
178
+ x = l(x)
179
+ if i == 6:
180
+ c1 = x
181
+ elif i == 20:
182
+ c2 = x
183
+ elif i == 23:
184
+ c3 = x
185
+
186
+ # Infer main W and duplicate it
187
+ w0 = self.styles[0](c3)
188
+ w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
189
+ stage = self.progressive_stage.value
190
+ features = c3
191
+ for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas
192
+ if i == self.coarse_ind:
193
+ p2 = _upsample_add(c3, self.latlayer1(c2)) # FPN's middle features
194
+ features = p2
195
+ elif i == self.middle_ind:
196
+ p1 = _upsample_add(p2, self.latlayer2(c1)) # FPN's fine features
197
+ features = p1
198
+ delta_i = self.styles[i](features)
199
+ w[:, i] += delta_i
200
+ return w
201
+
202
+
203
+ class BackboneEncoderUsingLastLayerIntoW(Module):
204
+ def __init__(self, num_layers, mode='ir', opts=None):
205
+ super(BackboneEncoderUsingLastLayerIntoW, self).__init__()
206
+ print('Using BackboneEncoderUsingLastLayerIntoW')
207
+ assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
208
+ assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
209
+ blocks = get_blocks(num_layers)
210
+ if mode == 'ir':
211
+ unit_module = bottleneck_IR
212
+ elif mode == 'ir_se':
213
+ unit_module = bottleneck_IR_SE
214
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
215
+ BatchNorm2d(64),
216
+ PReLU(64))
217
+ self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
218
+ self.linear = EqualLinear(512, 512, lr_mul=1)
219
+ modules = []
220
+ for block in blocks:
221
+ for bottleneck in block:
222
+ modules.append(unit_module(bottleneck.in_channel,
223
+ bottleneck.depth,
224
+ bottleneck.stride))
225
+ self.body = Sequential(*modules)
226
+ log_size = int(math.log(opts.stylegan_size, 2))
227
+ self.style_count = 2 * log_size - 2
228
+
229
+ def forward(self, x):
230
+ x = self.input_layer(x)
231
+ x = self.body(x)
232
+ x = self.output_pool(x)
233
+ x = x.view(-1, 512)
234
+ x = self.linear(x)
235
+ return x.repeat(self.style_count, 1, 1).permute(1, 0, 2)
delta_edit/models/stylegan2/__init__.py ADDED
File without changes
delta_edit/models/stylegan2/model.py ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+
8
+ from models.stylegan2.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
9
+
10
+ class PixelNorm(nn.Module):
11
+ def __init__(self):
12
+ super().__init__()
13
+
14
+ def forward(self, input):
15
+ return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
16
+
17
+
18
+ def make_kernel(k):
19
+ k = torch.tensor(k, dtype=torch.float32)
20
+
21
+ if k.ndim == 1:
22
+ k = k[None, :] * k[:, None]
23
+
24
+ k /= k.sum()
25
+
26
+ return k
27
+
28
+
29
+ class Upsample(nn.Module):
30
+ def __init__(self, kernel, factor=2):
31
+ super().__init__()
32
+
33
+ self.factor = factor
34
+ kernel = make_kernel(kernel) * (factor ** 2)
35
+ self.register_buffer('kernel', kernel)
36
+
37
+ p = kernel.shape[0] - factor
38
+
39
+ pad0 = (p + 1) // 2 + factor - 1
40
+ pad1 = p // 2
41
+
42
+ self.pad = (pad0, pad1)
43
+
44
+ def forward(self, input):
45
+ out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
46
+
47
+ return out
48
+
49
+
50
+ class Downsample(nn.Module):
51
+ def __init__(self, kernel, factor=2):
52
+ super().__init__()
53
+
54
+ self.factor = factor
55
+ kernel = make_kernel(kernel)
56
+ self.register_buffer('kernel', kernel)
57
+
58
+ p = kernel.shape[0] - factor
59
+
60
+ pad0 = (p + 1) // 2
61
+ pad1 = p // 2
62
+
63
+ self.pad = (pad0, pad1)
64
+
65
+ def forward(self, input):
66
+ out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
67
+
68
+ return out
69
+
70
+
71
+ class Blur(nn.Module):
72
+ def __init__(self, kernel, pad, upsample_factor=1):
73
+ super().__init__()
74
+
75
+ kernel = make_kernel(kernel)
76
+
77
+ if upsample_factor > 1:
78
+ kernel = kernel * (upsample_factor ** 2)
79
+
80
+ self.register_buffer('kernel', kernel)
81
+
82
+ self.pad = pad
83
+
84
+ def forward(self, input):
85
+ out = upfirdn2d(input, self.kernel, pad=self.pad)
86
+
87
+ return out
88
+
89
+
90
+ class EqualConv2d(nn.Module):
91
+ def __init__(
92
+ self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
93
+ ):
94
+ super().__init__()
95
+
96
+ self.weight = nn.Parameter(
97
+ torch.randn(out_channel, in_channel, kernel_size, kernel_size)
98
+ )
99
+ self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
100
+
101
+ self.stride = stride
102
+ self.padding = padding
103
+
104
+ if bias:
105
+ self.bias = nn.Parameter(torch.zeros(out_channel))
106
+
107
+ else:
108
+ self.bias = None
109
+
110
+ def forward(self, input):
111
+ out = F.conv2d(
112
+ input,
113
+ self.weight * self.scale,
114
+ bias=self.bias,
115
+ stride=self.stride,
116
+ padding=self.padding,
117
+ )
118
+
119
+ return out
120
+
121
+ def __repr__(self):
122
+ return (
123
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
124
+ f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
125
+ )
126
+
127
+
128
+ class EqualLinear(nn.Module):
129
+ def __init__(
130
+ self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
131
+ ):
132
+ super().__init__()
133
+
134
+ self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
135
+
136
+ if bias:
137
+ self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
138
+
139
+ else:
140
+ self.bias = None
141
+
142
+ self.activation = activation
143
+
144
+ self.scale = (1 / math.sqrt(in_dim)) * lr_mul
145
+ self.lr_mul = lr_mul
146
+
147
+ def forward(self, input):
148
+ if self.activation:
149
+ out = F.linear(input, self.weight * self.scale)
150
+ out = fused_leaky_relu(out, self.bias * self.lr_mul)
151
+
152
+ else:
153
+ out = F.linear(
154
+ input, self.weight * self.scale, bias=self.bias * self.lr_mul
155
+ )
156
+
157
+ return out
158
+
159
+ def __repr__(self):
160
+ return (
161
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
162
+ )
163
+
164
+
165
+ class ScaledLeakyReLU(nn.Module):
166
+ def __init__(self, negative_slope=0.2):
167
+ super().__init__()
168
+
169
+ self.negative_slope = negative_slope
170
+
171
+ def forward(self, input):
172
+ out = F.leaky_relu(input, negative_slope=self.negative_slope)
173
+
174
+ return out * math.sqrt(2)
175
+
176
+
177
+ class ModulatedConv2d(nn.Module):
178
+ def __init__(
179
+ self,
180
+ in_channel,
181
+ out_channel,
182
+ kernel_size,
183
+ style_dim,
184
+ demodulate=True,
185
+ upsample=False,
186
+ downsample=False,
187
+ blur_kernel=[1, 3, 3, 1],
188
+ ):
189
+ super().__init__()
190
+
191
+ self.eps = 1e-8
192
+ self.kernel_size = kernel_size
193
+ self.in_channel = in_channel
194
+ self.out_channel = out_channel
195
+ self.upsample = upsample
196
+ self.downsample = downsample
197
+
198
+ if upsample:
199
+ factor = 2
200
+ p = (len(blur_kernel) - factor) - (kernel_size - 1)
201
+ pad0 = (p + 1) // 2 + factor - 1
202
+ pad1 = p // 2 + 1
203
+
204
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
205
+
206
+ if downsample:
207
+ factor = 2
208
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
209
+ pad0 = (p + 1) // 2
210
+ pad1 = p // 2
211
+
212
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1))
213
+
214
+ fan_in = in_channel * kernel_size ** 2
215
+ self.scale = 1 / math.sqrt(fan_in)
216
+ self.padding = kernel_size // 2
217
+
218
+ self.weight = nn.Parameter(
219
+ torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
220
+ )
221
+
222
+ self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
223
+
224
+ self.demodulate = demodulate
225
+
226
+ def __repr__(self):
227
+ return (
228
+ f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
229
+ f'upsample={self.upsample}, downsample={self.downsample})'
230
+ )
231
+
232
+ def forward(self, input, style):
233
+ batch, in_channel, height, width = input.shape
234
+
235
+ style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
236
+ weight = self.scale * self.weight * style
237
+
238
+ if self.demodulate:
239
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
240
+ weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
241
+
242
+ weight = weight.view(
243
+ batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
244
+ )
245
+
246
+ if self.upsample:
247
+ input = input.view(1, batch * in_channel, height, width)
248
+ weight = weight.view(
249
+ batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
250
+ )
251
+ weight = weight.transpose(1, 2).reshape(
252
+ batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
253
+ )
254
+ out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
255
+ _, _, height, width = out.shape
256
+ out = out.view(batch, self.out_channel, height, width)
257
+ out = self.blur(out)
258
+
259
+ elif self.downsample:
260
+ input = self.blur(input)
261
+ _, _, height, width = input.shape
262
+ input = input.view(1, batch * in_channel, height, width)
263
+ out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
264
+ _, _, height, width = out.shape
265
+ out = out.view(batch, self.out_channel, height, width)
266
+
267
+ else:
268
+ input = input.view(1, batch * in_channel, height, width)
269
+ out = F.conv2d(input, weight, padding=self.padding, groups=batch)
270
+ _, _, height, width = out.shape
271
+ out = out.view(batch, self.out_channel, height, width)
272
+
273
+ return out
274
+
275
+
276
+ class NoiseInjection(nn.Module):
277
+ def __init__(self):
278
+ super().__init__()
279
+
280
+ self.weight = nn.Parameter(torch.zeros(1))
281
+
282
+ def forward(self, image, noise=None):
283
+ if noise is None:
284
+ batch, _, height, width = image.shape
285
+ noise = image.new_empty(batch, 1, height, width).normal_()
286
+
287
+ return image + self.weight * noise
288
+
289
+
290
+ class ConstantInput(nn.Module):
291
+ def __init__(self, channel, size=4):
292
+ super().__init__()
293
+
294
+ self.input = nn.Parameter(torch.randn(1, channel, size, size))
295
+
296
+ def forward(self, input):
297
+ batch = input.shape[0]
298
+ out = self.input.repeat(batch, 1, 1, 1)
299
+
300
+ return out
301
+
302
+
303
+ class StyledConv(nn.Module):
304
+ def __init__(
305
+ self,
306
+ in_channel,
307
+ out_channel,
308
+ kernel_size,
309
+ style_dim,
310
+ upsample=False,
311
+ blur_kernel=[1, 3, 3, 1],
312
+ demodulate=True,
313
+ ):
314
+ super().__init__()
315
+
316
+ self.conv = ModulatedConv2d(
317
+ in_channel,
318
+ out_channel,
319
+ kernel_size,
320
+ style_dim,
321
+ upsample=upsample,
322
+ blur_kernel=blur_kernel,
323
+ demodulate=demodulate,
324
+ )
325
+
326
+ self.noise = NoiseInjection()
327
+ # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
328
+ # self.activate = ScaledLeakyReLU(0.2)
329
+ self.activate = FusedLeakyReLU(out_channel)
330
+
331
+ def forward(self, input, style, noise=None):
332
+ out = self.conv(input, style)
333
+ out = self.noise(out, noise=noise)
334
+ # out = out + self.bias
335
+ out = self.activate(out)
336
+
337
+ return out
338
+
339
+
340
+ class ToRGB(nn.Module):
341
+ def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
342
+ super().__init__()
343
+
344
+ if upsample:
345
+ self.upsample = Upsample(blur_kernel)
346
+
347
+ self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
348
+ self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
349
+
350
+ def forward(self, input, style, skip=None):
351
+ out = self.conv(input, style)
352
+ out = out + self.bias
353
+
354
+ if skip is not None:
355
+ skip = self.upsample(skip)
356
+
357
+ out = out + skip
358
+
359
+ return out
360
+
361
+
362
+ class Generator(nn.Module):
363
+ def __init__(
364
+ self,
365
+ size,
366
+ style_dim,
367
+ n_mlp,
368
+ channel_multiplier=2,
369
+ blur_kernel=[1, 3, 3, 1],
370
+ lr_mlp=0.01,
371
+ ):
372
+ super().__init__()
373
+
374
+ self.size = size
375
+
376
+ self.style_dim = style_dim
377
+
378
+ layers = [PixelNorm()]
379
+
380
+ for i in range(n_mlp):
381
+ layers.append(
382
+ EqualLinear(
383
+ style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
384
+ )
385
+ )
386
+
387
+ self.style = nn.Sequential(*layers)
388
+
389
+ self.channels = {
390
+ 4: 512,
391
+ 8: 512,
392
+ 16: 512,
393
+ 32: 512,
394
+ 64: 256 * channel_multiplier,
395
+ 128: 128 * channel_multiplier,
396
+ 256: 64 * channel_multiplier,
397
+ 512: 32 * channel_multiplier,
398
+ 1024: 16 * channel_multiplier,
399
+ }
400
+
401
+ self.input = ConstantInput(self.channels[4])
402
+ self.conv1 = StyledConv(
403
+ self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
404
+ )
405
+ self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
406
+
407
+ self.log_size = int(math.log(size, 2))
408
+ self.num_layers = (self.log_size - 2) * 2 + 1
409
+
410
+ self.convs = nn.ModuleList()
411
+ self.upsamples = nn.ModuleList()
412
+ self.to_rgbs = nn.ModuleList()
413
+ self.noises = nn.Module()
414
+
415
+ in_channel = self.channels[4]
416
+
417
+ for layer_idx in range(self.num_layers):
418
+ res = (layer_idx + 5) // 2
419
+ shape = [1, 1, 2 ** res, 2 ** res]
420
+ self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
421
+
422
+ for i in range(3, self.log_size + 1):
423
+ out_channel = self.channels[2 ** i]
424
+
425
+ self.convs.append(
426
+ StyledConv(
427
+ in_channel,
428
+ out_channel,
429
+ 3,
430
+ style_dim,
431
+ upsample=True,
432
+ blur_kernel=blur_kernel,
433
+ )
434
+ )
435
+
436
+ self.convs.append(
437
+ StyledConv(
438
+ out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
439
+ )
440
+ )
441
+
442
+ self.to_rgbs.append(ToRGB(out_channel, style_dim))
443
+
444
+ in_channel = out_channel
445
+
446
+ self.n_latent = self.log_size * 2 - 2
447
+
448
+ def make_noise(self):
449
+ device = self.input.input.device
450
+
451
+ noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
452
+
453
+ for i in range(3, self.log_size + 1):
454
+ for _ in range(2):
455
+ noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
456
+
457
+ return noises
458
+
459
+ def mean_latent(self, n_latent):
460
+ latent_in = torch.randn(
461
+ n_latent, self.style_dim, device=self.input.input.device
462
+ )
463
+ latent = self.style(latent_in).mean(0, keepdim=True)
464
+
465
+ return latent
466
+
467
+ def get_latent(self, input):
468
+ return self.style(input)
469
+
470
+ def forward(
471
+ self,
472
+ styles,
473
+ return_latents=False,
474
+ inject_index=None,
475
+ truncation=1,
476
+ truncation_latent=None,
477
+ input_is_latent=False,
478
+ noise=None,
479
+ randomize_noise=True,
480
+ ):
481
+ if not input_is_latent:
482
+ styles = [self.style(s) for s in styles]
483
+
484
+ if noise is None:
485
+ if randomize_noise:
486
+ noise = [None] * self.num_layers
487
+ else:
488
+ noise = [
489
+ getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
490
+ ]
491
+
492
+ if truncation < 1:
493
+ style_t = []
494
+
495
+ for style in styles:
496
+ style_t.append(
497
+ truncation_latent + truncation * (style - truncation_latent)
498
+ )
499
+
500
+ styles = style_t
501
+
502
+ if len(styles) < 2:
503
+ inject_index = self.n_latent
504
+
505
+ if styles[0].ndim < 3:
506
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
507
+
508
+ else:
509
+ latent = styles[0]
510
+
511
+ else:
512
+ if inject_index is None:
513
+ inject_index = random.randint(1, self.n_latent - 1)
514
+
515
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
516
+ latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
517
+
518
+ latent = torch.cat([latent, latent2], 1)
519
+
520
+ out = self.input(latent)
521
+ out = self.conv1(out, latent[:, 0], noise=noise[0])
522
+
523
+ skip = self.to_rgb1(out, latent[:, 1])
524
+
525
+ i = 1
526
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
527
+ self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
528
+ ):
529
+ out = conv1(out, latent[:, i], noise=noise1)
530
+ out = conv2(out, latent[:, i + 1], noise=noise2)
531
+ skip = to_rgb(out, latent[:, i + 2], skip)
532
+
533
+ i += 2
534
+
535
+ image = skip
536
+
537
+ if return_latents:
538
+ return image, latent
539
+
540
+ else:
541
+ return image, None
542
+
543
+
544
+ class ConvLayer(nn.Sequential):
545
+ def __init__(
546
+ self,
547
+ in_channel,
548
+ out_channel,
549
+ kernel_size,
550
+ downsample=False,
551
+ blur_kernel=[1, 3, 3, 1],
552
+ bias=True,
553
+ activate=True,
554
+ ):
555
+ layers = []
556
+
557
+ if downsample:
558
+ factor = 2
559
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
560
+ pad0 = (p + 1) // 2
561
+ pad1 = p // 2
562
+
563
+ layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
564
+
565
+ stride = 2
566
+ self.padding = 0
567
+
568
+ else:
569
+ stride = 1
570
+ self.padding = kernel_size // 2
571
+
572
+ layers.append(
573
+ EqualConv2d(
574
+ in_channel,
575
+ out_channel,
576
+ kernel_size,
577
+ padding=self.padding,
578
+ stride=stride,
579
+ bias=bias and not activate,
580
+ )
581
+ )
582
+
583
+ if activate:
584
+ if bias:
585
+ layers.append(FusedLeakyReLU(out_channel))
586
+
587
+ else:
588
+ layers.append(ScaledLeakyReLU(0.2))
589
+
590
+ super().__init__(*layers)
591
+
592
+
593
+ class ResBlock(nn.Module):
594
+ def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
595
+ super().__init__()
596
+
597
+ self.conv1 = ConvLayer(in_channel, in_channel, 3)
598
+ self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
599
+
600
+ self.skip = ConvLayer(
601
+ in_channel, out_channel, 1, downsample=True, activate=False, bias=False
602
+ )
603
+
604
+ def forward(self, input):
605
+ out = self.conv1(input)
606
+ out = self.conv2(out)
607
+
608
+ skip = self.skip(input)
609
+ out = (out + skip) / math.sqrt(2)
610
+
611
+ return out
612
+
613
+
614
+ class Discriminator(nn.Module):
615
+ def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
616
+ super().__init__()
617
+
618
+ channels = {
619
+ 4: 512,
620
+ 8: 512,
621
+ 16: 512,
622
+ 32: 512,
623
+ 64: 256 * channel_multiplier,
624
+ 128: 128 * channel_multiplier,
625
+ 256: 64 * channel_multiplier,
626
+ 512: 32 * channel_multiplier,
627
+ 1024: 16 * channel_multiplier,
628
+ }
629
+
630
+ convs = [ConvLayer(3, channels[size], 1)]
631
+
632
+ log_size = int(math.log(size, 2))
633
+
634
+ in_channel = channels[size]
635
+
636
+ for i in range(log_size, 2, -1):
637
+ out_channel = channels[2 ** (i - 1)]
638
+
639
+ convs.append(ResBlock(in_channel, out_channel, blur_kernel))
640
+
641
+ in_channel = out_channel
642
+
643
+ self.convs = nn.Sequential(*convs)
644
+
645
+ self.stddev_group = 4
646
+ self.stddev_feat = 1
647
+
648
+ self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
649
+ self.final_linear = nn.Sequential(
650
+ EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
651
+ EqualLinear(channels[4], 1),
652
+ )
653
+
654
+ def forward(self, input):
655
+ out = self.convs(input)
656
+
657
+ batch, channel, height, width = out.shape
658
+ group = min(batch, self.stddev_group)
659
+ stddev = out.view(
660
+ group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
661
+ )
662
+ stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
663
+ stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
664
+ stddev = stddev.repeat(group, 1, height, width)
665
+ out = torch.cat([out, stddev], 1)
666
+
667
+ out = self.final_conv(out)
668
+
669
+ out = out.view(batch, -1)
670
+ out = self.final_linear(out)
671
+
672
+ return out
673
+
delta_edit/models/stylegan2/npy_ffhq/fs3.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7260766e6d3d0a0298bc917f57550bed5e0684524b6ee38667e63f0926ac84c1
3
+ size 6193280
delta_edit/models/stylegan2/op/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .fused_act import FusedLeakyReLU, fused_leaky_relu
2
+ from .upfirdn2d import upfirdn2d
delta_edit/models/stylegan2/op/fused_act.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ module_path = os.path.dirname(__file__)
8
+
9
+ class FusedLeakyReLU(nn.Module):
10
+ def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
11
+ super().__init__()
12
+
13
+ self.bias = nn.Parameter(torch.zeros(channel))
14
+ self.negative_slope = negative_slope
15
+ self.scale = scale
16
+
17
+ def forward(self, input):
18
+ return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
19
+
20
+
21
+ def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
22
+ rest_dim = [1] * (input.ndim - bias.ndim - 1)
23
+ input = input.cuda()
24
+ if input.ndim == 3:
25
+ return (
26
+ F.leaky_relu(
27
+ input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope
28
+ )
29
+ * scale
30
+ )
31
+ else:
32
+ return (
33
+ F.leaky_relu(
34
+ input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope
35
+ )
36
+ * scale
37
+ )
38
+
delta_edit/models/stylegan2/op/upfirdn2d.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch.nn import functional as F
4
+
5
+ module_path = os.path.dirname(__file__)
6
+
7
+ def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
8
+ out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])
9
+
10
+ return out
11
+
12
+ def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1):
13
+
14
+ _, channel, in_h, in_w = input.shape
15
+ input = input.reshape(-1, in_h, in_w, 1)
16
+
17
+ _, in_h, in_w, minor = input.shape
18
+ kernel_h, kernel_w = kernel.shape
19
+
20
+ out = input.view(-1, in_h, 1, in_w, 1, minor)
21
+ out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
22
+ out = out.view(-1, in_h * up_y, in_w * up_x, minor)
23
+
24
+ out = F.pad(
25
+ out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
26
+ )
27
+ out = out[
28
+ :,
29
+ max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
30
+ max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
31
+ :,
32
+ ]
33
+
34
+ out = out.permute(0, 3, 1, 2)
35
+ out = out.reshape(
36
+ [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
37
+ )
38
+ w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
39
+ out = F.conv2d(out, w)
40
+ out = out.reshape(
41
+ -1,
42
+ minor,
43
+ in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
44
+ in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
45
+ )
46
+ out = out.permute(0, 2, 3, 1)
47
+ out = out[:, ::down_y, ::down_x, :]
48
+
49
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
50
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
51
+
52
+ return out.view(-1, channel, out_h, out_w)
delta_edit/options/__init__.py ADDED
File without changes
delta_edit/options/test_options.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+
3
+ class TestOptions:
4
+
5
+ def __init__(self):
6
+ self.parser = ArgumentParser()
7
+ self.initialize()
8
+
9
+ def initialize(self):
10
+ # arguments for inference script
11
+
12
+ self.parser.add_argument('--batch_size', default=1, type=int, help='Batch size for inference')
13
+ self.parser.add_argument('--workers', default=4, type=int, help='Number of test dataloader workers')
14
+
15
+ self.parser.add_argument('--stylegan_weights', default='weights/stylegan2-ffhq-config-f.pt', type=str, help='Path to StyleGAN model weights')
16
+ self.parser.add_argument('--stylegan_size', default=1024, type=int)
17
+
18
+ self.parser.add_argument("--threshold", type=int, default=0.03)
19
+ self.parser.add_argument("--checkpoint_path", type=str, default='weights/net_face.pth')
20
+ self.parser.add_argument("--save_dir", type=str, default='output')
21
+ self.parser.add_argument("--image_dir", type=str, default='./test_imgs')
22
+ self.parser.add_argument("--num_all", type=int, default=20)
23
+
24
+ self.parser.add_argument("--target", type=str, required=True, help='Specify the target attributes to be edited')
25
+
26
+ def parse(self):
27
+ opts = self.parser.parse_args()
28
+ return opts
delta_edit/options/train_options.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+
3
+ class TrainOptions:
4
+
5
+ def __init__(self):
6
+ self.parser = ArgumentParser()
7
+ self.initialize()
8
+
9
+ def initialize(self):
10
+
11
+ self.parser.add_argument('--batch_size', default=64, type=int, help='Batch size for training')
12
+ self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers')
13
+
14
+ self.parser.add_argument('--learning_rate', default=0.5, type=float, help='Optimizer learning rate')
15
+
16
+ self.parser.add_argument('--l2_lambda', default=1.0, type=float, help='l2 loss')
17
+ self.parser.add_argument('--cos_lambda', default=1.0, type=float, help='cos loss')
18
+
19
+ self.parser.add_argument('--checkpoint_path', default='checkpoints', type=str, help='Path to StyleCLIPModel model checkpoint')
20
+ self.parser.add_argument('--classname', type=str, default='ffhq', help="which specific domain for training")
21
+ self.parser.add_argument('--print_interval', default=1000, type=int, help='Interval for printing loss values during training')
22
+ self.parser.add_argument('--val_interval', default=5000, type=int, help='Validation interval')
23
+ self.parser.add_argument('--save_interval', default=10000, type=int, help='Model checkpoint interval')
24
+
25
+ def parse(self):
26
+ opts = self.parser.parse_args()
27
+ return opts
delta_edit/scripts/__init__.py ADDED
File without changes
delta_edit/scripts/inference.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ sys.path.append("")
5
+ sys.path.append("..")
6
+
7
+ import copy
8
+ import clip
9
+ import numpy as np
10
+
11
+ import torch
12
+ import torchvision
13
+ from torch.utils.data import DataLoader
14
+
15
+ import torch.nn.functional as F
16
+
17
+ from datasets.test_dataset import TestLatentsDataset
18
+
19
+ from models.stylegan2.model import Generator
20
+ from delta_mapper import DeltaMapper
21
+
22
+ from options.test_options import TestOptions
23
+
24
+ from utils import map_tool
25
+ from utils import stylespace_util
26
+
27
+
28
+ def GetBoundary(fs3, dt, threshold):
29
+ tmp = np.dot(fs3, dt)
30
+
31
+ select = np.abs(tmp) < threshold
32
+ return select
33
+
34
+
35
+ def improved_ds(ds, select):
36
+ ds_imp = copy.copy(ds)
37
+ ds_imp[select] = 0
38
+ ds_imp = ds_imp.unsqueeze(0)
39
+ return ds_imp
40
+
41
+
42
+ def main(opts):
43
+ device = "cuda" if torch.cuda.is_available() else "cpu"
44
+
45
+ # Initialize test dataset
46
+ test_dataset = TestLatentsDataset()
47
+ test_dataloader = DataLoader(test_dataset,
48
+ batch_size=opts.batch_size,
49
+ shuffle=False,
50
+ num_workers=int(opts.workers),
51
+ drop_last=True)
52
+
53
+ # Initialize generator
54
+ print('Loading stylegan weights from pretrained!')
55
+ g_ema = Generator(size=opts.stylegan_size, style_dim=512, n_mlp=8)
56
+ g_ema_ckpt = torch.load(opts.stylegan_weights)
57
+ g_ema.load_state_dict(g_ema_ckpt['g_ema'], strict=False)
58
+ g_ema.eval()
59
+ g_ema = g_ema.to(device)
60
+
61
+ # load relevance matrix Rs
62
+ fs3 = np.load('./models/stylegan2/npy_ffhq/fs3.npy')
63
+ np.set_printoptions(suppress=True)
64
+
65
+ # Initialze DeltaMapper
66
+ net = DeltaMapper()
67
+ net_ckpt = torch.load(opts.checkpoint_path)
68
+ net.load_state_dict(net_ckpt)
69
+ net = net.to(device)
70
+
71
+ # Load CLIP model
72
+ clip_model, preprocess = clip.load("ViT-B/32", device=device)
73
+
74
+ os.makedirs(opts.save_dir, exist_ok=True)
75
+
76
+ neutral = 'face'
77
+ target_list = opts.target.split(',')
78
+ # print(target_list)
79
+
80
+ dt_list = []
81
+ select_list = []
82
+ for target in target_list:
83
+ classnames = [target, neutral]
84
+ dt = map_tool.GetDt(classnames, clip_model)
85
+ select = GetBoundary(fs3, dt, opts.threshold)
86
+ dt = torch.Tensor(dt).to(device)
87
+ dt = dt / dt.norm(dim=-1, keepdim=True).float().clamp(min=1e-5)
88
+
89
+ select_list.append(select)
90
+ dt_list.append(dt)
91
+
92
+ for bid, batch in enumerate(test_dataloader):
93
+ if bid == opts.num_all:
94
+ break
95
+
96
+ latent_s, delta_c, latent_w = batch
97
+ latent_s = latent_s.to(device)
98
+ delta_c = delta_c.to(device)
99
+ latent_w = latent_w.to(device)
100
+ delta_s_list = []
101
+
102
+ for i, dt in enumerate(dt_list):
103
+ delta_c[0, 512:] = dt
104
+ with torch.no_grad():
105
+ fake_delta_s = net(latent_s, delta_c)
106
+ improved_fake_delta_s = improved_ds(fake_delta_s[0], select_list[i])
107
+ delta_s_list.append(improved_fake_delta_s)
108
+
109
+ with torch.no_grad():
110
+ img_ori = stylespace_util.decoder_validate(g_ema, latent_s, latent_w)
111
+
112
+ img_list = [img_ori]
113
+ for delta_s in delta_s_list:
114
+ img_gen = stylespace_util.decoder_validate(g_ema, latent_s + delta_s, latent_w)
115
+ img_list.append(img_gen)
116
+ img_gen_all = torch.cat(img_list, dim=3)
117
+ torchvision.utils.save_image(img_gen_all, os.path.join(opts.save_dir, "%04d.jpg" % (bid + 1)),
118
+ normalize=True, range=(-1, 1))
119
+ print(f'completed👍! Please check results in {opts.save_dir}')
120
+
121
+
122
+ if __name__ == "__main__":
123
+ opts = TestOptions().parse()
124
+ main(opts)
delta_edit/scripts/inference_laion.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append("")
4
+ sys.path.append("..")
5
+
6
+ import copy
7
+ import clip
8
+ import numpy as np
9
+ from PIL import Image
10
+
11
+ import torch
12
+ import torchvision
13
+ from torch.utils.data import Dataset
14
+ from torch.utils.data import DataLoader
15
+ from torchvision import transforms
16
+
17
+ import torch.nn.functional as F
18
+ from tqdm import tqdm
19
+
20
+ from datasets.test_dataset import TestLatentsDataset
21
+
22
+ from models.stylegan2.model import Generator
23
+ from models.encoders import psp_encoders
24
+ from delta_mapper import DeltaMapper
25
+
26
+ from options.test_options import TestOptions
27
+
28
+ from utils import map_tool
29
+ from utils import stylespace_util
30
+
31
+
32
+
33
+ def get_keys(d, name):
34
+ if 'state_dict' in d:
35
+ d = d['state_dict']
36
+ d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
37
+ return d_filt
38
+
39
+ class Imagedataset(Dataset):
40
+ def __init__(self,
41
+ path,
42
+ image_size=256,
43
+ split=None):
44
+
45
+ self.path = path
46
+ self.images = os.listdir(path)
47
+ self.images = [img for img in self.images if img.endswith('jpg')]
48
+
49
+ self.image_size = image_size
50
+
51
+ self.length = len(self.images)
52
+
53
+ transform = [
54
+ transforms.Resize(image_size),
55
+ transforms.ToTensor(),
56
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
57
+ ]
58
+
59
+ self.transform = transforms.Compose(transform)
60
+
61
+ def __len__(self):
62
+ return self.length
63
+
64
+ def __getitem__(self, index):
65
+ cur_name = self.images[index]
66
+ img_path = os.path.join(self.path, cur_name)
67
+
68
+ img = Image.open(img_path).convert("RGB")
69
+
70
+ if self.transform is not None:
71
+ img = self.transform(img)
72
+ return img, img_path
73
+
74
+ def encoder_latent(G, latent):
75
+ # an encoder warper for G
76
+ #styles = [noise]
77
+ style_space = []
78
+
79
+ #styles = [G.style(s) for s in styles]
80
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
81
+ # inject_index = G.n_latent
82
+ #latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
83
+ style_space.append(G.conv1.conv.modulation(latent[:, 0]))
84
+
85
+ i = 1
86
+ for conv1, conv2, to_rgb in zip(
87
+ G.convs[::2], G.convs[1::2], G.to_rgbs
88
+ ):
89
+ style_space.append(conv1.conv.modulation(latent[:, i]))
90
+ style_space.append(conv2.conv.modulation(latent[:, i+1]))
91
+ i += 2
92
+
93
+ return style_space, noise
94
+
95
+ def GetBoundary(fs3,dt,threshold):
96
+ tmp=np.dot(fs3,dt)
97
+
98
+ select=np.abs(tmp)<threshold
99
+ return select
100
+
101
+ def improved_ds(ds, select):
102
+ ds_imp = copy.copy(ds)
103
+ ds_imp[select] = 0
104
+ ds_imp = ds_imp.unsqueeze(0)
105
+ return ds_imp
106
+
107
+ def main(opts):
108
+
109
+ device = "cuda" if torch.cuda.is_available() else "cpu"
110
+
111
+ # NOTE load e4e
112
+ checkpoint_path = "weights/e4e_ffhq_encode.pt"
113
+ ckpt_enc = torch.load(checkpoint_path, map_location='cpu') #dict_keys(['state_dict', 'latent_avg', 'opts'])
114
+ encoder = psp_encoders.Encoder4Editing(50, 1024, 'ir_se')
115
+ encoder.load_state_dict(get_keys(ckpt_enc, 'encoder'), strict=True)
116
+ encoder.eval()
117
+ encoder.to(device)
118
+
119
+ #Initialize generator
120
+ print('Loading stylegan weights from pretrained!')
121
+ g_ema = Generator(size=opts.stylegan_size, style_dim=512, n_mlp=8)
122
+ g_ema_ckpt = torch.load(opts.stylegan_weights)
123
+ g_ema.load_state_dict(g_ema_ckpt['g_ema'], strict=False)
124
+ g_ema.eval()
125
+ g_ema = g_ema.to(device)
126
+
127
+ #load relevance matrix Rs
128
+ fs3=np.load('./models/stylegan2/npy_ffhq/fs3.npy')
129
+ np.set_printoptions(suppress=True)
130
+
131
+ #Initialze DeltaMapper
132
+ net = DeltaMapper()
133
+ net_ckpt = torch.load(opts.checkpoint_path)
134
+ net.load_state_dict(net_ckpt)
135
+ net = net.to(device)
136
+
137
+ #Load CLIP model
138
+ clip_model, preprocess = clip.load("ViT-B/32", device=device)
139
+ avg_pool = torch.nn.AvgPool2d(kernel_size=256//32)
140
+ upsample = torch.nn.Upsample(scale_factor=7)
141
+
142
+
143
+ # neutral='face'
144
+ # target_list = opts.target.split(',')
145
+ # print(target_list)
146
+
147
+ neutral = 'face'
148
+ attr_file = './editing_attributes.txt'
149
+ target_list = open(attr_file, 'r').readlines()
150
+
151
+ dt_list = []
152
+ select_list = []
153
+ dis_list = []
154
+ prompt_list = []
155
+ for target in target_list:
156
+ tar_att, dis, tar_prompt = target.strip().split(',')
157
+
158
+ classnames = [tar_att, neutral]
159
+ dt = map_tool.GetDt(classnames, clip_model)
160
+ select = GetBoundary(fs3, dt, opts.threshold)
161
+ dt = torch.Tensor(dt).to(device)
162
+ dt = dt / dt.norm(dim=-1, keepdim=True).float().clamp(min=1e-5)
163
+
164
+ select_list.append(select)
165
+ dt_list.append(dt)
166
+ dis_list.append(float(dis))
167
+ prompt_list.append(tar_prompt)
168
+
169
+ id_dirs = os.listdir(opts.image_dir)
170
+ id_dirs.sort()
171
+
172
+ for id_dir in tqdm(id_dirs):
173
+ test_dataset = Imagedataset(os.path.join(opts.image_dir, id_dir), image_size=256)
174
+ test_dataloader = DataLoader(test_dataset,
175
+ batch_size=opts.batch_size,
176
+ shuffle=False,
177
+ num_workers=int(opts.workers),
178
+ drop_last=True)
179
+
180
+ for bid, (batch, img_path) in enumerate(test_dataloader):
181
+ # if bid == opts.num_all:
182
+ # break
183
+ image_name = os.path.basename(img_path[0])[:-4]
184
+ cur_image_save_dir = os.path.join(opts.save_dir, id_dir, image_name)
185
+
186
+ if os.path.exists(cur_image_save_dir):
187
+ continue
188
+
189
+ os.makedirs(cur_image_save_dir, exist_ok=True)
190
+
191
+ input_img = batch.to(device)
192
+ with torch.no_grad():
193
+ latent_w = encoder(input_img)
194
+ latent_avg = ckpt_enc['latent_avg'].cuda()
195
+ latent_w = latent_w + latent_avg.repeat(latent_w.shape[0], 1, 1)
196
+
197
+ style_space, noise = encoder_latent(g_ema, latent_w)
198
+ latent_s = torch.cat(style_space, dim=1)
199
+
200
+ img_gen_for_clip = upsample(input_img)
201
+ img_gen_for_clip = avg_pool(img_gen_for_clip)
202
+ c_latents = clip_model.encode_image(img_gen_for_clip)
203
+ c_latents = c_latents / c_latents.norm(dim=-1, keepdim=True).float()
204
+
205
+ delta_s_list = []
206
+
207
+ for i, dt in enumerate(dt_list):
208
+ delta_c = torch.cat((c_latents, dt.unsqueeze(0)), dim=1)
209
+ with torch.no_grad():
210
+ fake_delta_s = net(latent_s, delta_c)
211
+ improved_fake_delta_s = improved_ds(fake_delta_s[0], select_list[i])
212
+ delta_s_list.append(improved_fake_delta_s)
213
+
214
+ with torch.no_grad():
215
+ img_ori = stylespace_util.decoder_validate(g_ema, latent_s, latent_w)
216
+ torchvision.utils.save_image(img_ori, os.path.join(cur_image_save_dir, "face.jpg"), normalize=True, value_range=(-1, 1))
217
+ # torchvision.utils.save_image(img_ori, os.path.join(cur_image_save_dir, "face.jpg"), normalize=True, range=(-1, 1))
218
+
219
+ for ii, delta_s in enumerate(delta_s_list):
220
+ img_gen = stylespace_util.decoder_validate(g_ema, latent_s + delta_s * dis_list[ii], latent_w)
221
+ torchvision.utils.save_image(img_gen, os.path.join(cur_image_save_dir, "{}.jpg".format(prompt_list[ii])), normalize=True, value_range=(-1, 1))
222
+ # torchvision.utils.save_image(img_gen, os.path.join(cur_image_save_dir, "{}.jpg".format(prompt_list[ii])), normalize=True, range=(-1, 1))
223
+ # img_list.append(img_gen)
224
+ # img_gen_all = torch.cat(img_list, dim=3)
225
+ # torchvision.utils.save_image(img_gen_all, os.path.join(opts.save_dir, "%04d.jpg" %(bid+1)), normalize=True, range=(-1, 1))
226
+ print(f'completed👍! Please check results in {opts.save_dir}')
227
+
228
+ if __name__ == "__main__":
229
+ opts = TestOptions().parse()
230
+ main(opts)
delta_edit/scripts/inference_real.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ sys.path.append("")
5
+ sys.path.append("..")
6
+
7
+ import copy
8
+ import clip
9
+ import numpy as np
10
+ from PIL import Image
11
+
12
+ import torch
13
+ import torchvision
14
+ from torch.utils.data import Dataset
15
+ from torch.utils.data import DataLoader
16
+ from torchvision import transforms
17
+
18
+ import torch.nn.functional as F
19
+
20
+ from datasets.test_dataset import TestLatentsDataset
21
+
22
+ from models.stylegan2.model import Generator
23
+ from models.encoders import psp_encoders
24
+ from delta_mapper import DeltaMapper
25
+
26
+ from options.test_options import TestOptions
27
+
28
+ from utils import map_tool
29
+ from utils import stylespace_util
30
+
31
+
32
+ def get_keys(d, name):
33
+ if 'state_dict' in d:
34
+ d = d['state_dict']
35
+ d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
36
+ return d_filt
37
+
38
+
39
+ class Imagedataset(Dataset):
40
+ def __init__(self,
41
+ path,
42
+ image_size=256,
43
+ split=None):
44
+ self.path = path
45
+ self.images = os.listdir(path)
46
+
47
+ self.image_size = image_size
48
+
49
+ self.length = len(self.images)
50
+
51
+ transform = [
52
+ transforms.Resize(image_size),
53
+ transforms.ToTensor(),
54
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
55
+ ]
56
+
57
+ self.transform = transforms.Compose(transform)
58
+
59
+ def __len__(self):
60
+ return self.length
61
+
62
+ def __getitem__(self, index):
63
+ cur_name = self.images[index]
64
+ img_path = os.path.join(self.path, cur_name)
65
+
66
+ img = Image.open(img_path).convert("RGB")
67
+
68
+ if self.transform is not None:
69
+ img = self.transform(img)
70
+ return img
71
+
72
+
73
+ def encoder_latent(G, latent):
74
+ # an encoder warper for G
75
+ # styles = [noise]
76
+ style_space = []
77
+
78
+ # styles = [G.style(s) for s in styles]
79
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
80
+ # inject_index = G.n_latent
81
+ # latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
82
+ style_space.append(G.conv1.conv.modulation(latent[:, 0]))
83
+
84
+ i = 1
85
+ for conv1, conv2, to_rgb in zip(
86
+ G.convs[::2], G.convs[1::2], G.to_rgbs
87
+ ):
88
+ style_space.append(conv1.conv.modulation(latent[:, i]))
89
+ style_space.append(conv2.conv.modulation(latent[:, i + 1]))
90
+ i += 2
91
+
92
+ return style_space, noise
93
+
94
+
95
+ def GetBoundary(fs3, dt, threshold):
96
+ tmp = np.dot(fs3, dt)
97
+
98
+ select = np.abs(tmp) < threshold
99
+ return select
100
+
101
+
102
+ def improved_ds(ds, select):
103
+ ds_imp = copy.copy(ds)
104
+ ds_imp[select] = 0
105
+ ds_imp = ds_imp.unsqueeze(0)
106
+ return ds_imp
107
+
108
+
109
+ def main(opts):
110
+ device = "cuda" if torch.cuda.is_available() else "cpu"
111
+
112
+ # NOTE load e4e
113
+ checkpoint_path = "encoder4editing-main/e4e_ffhq_encode.pt"
114
+ ckpt_enc = torch.load(checkpoint_path, map_location='cpu') # dict_keys(['state_dict', 'latent_avg', 'opts'])
115
+ encoder = psp_encoders.Encoder4Editing(50, 1024, 'ir_se')
116
+ encoder.load_state_dict(get_keys(ckpt_enc, 'encoder'), strict=True)
117
+ encoder.eval()
118
+ encoder.to(device)
119
+
120
+ # Initialize test dataset
121
+ test_dataset = Imagedataset('./test_imgs', image_size=256)
122
+ test_dataloader = DataLoader(test_dataset,
123
+ batch_size=opts.batch_size,
124
+ shuffle=False,
125
+ num_workers=int(opts.workers),
126
+ drop_last=True)
127
+
128
+ # Initialize generator
129
+ print('Loading stylegan weights from pretrained!')
130
+ g_ema = Generator(size=opts.stylegan_size, style_dim=512, n_mlp=8)
131
+ g_ema_ckpt = torch.load(opts.stylegan_weights)
132
+ g_ema.load_state_dict(g_ema_ckpt['g_ema'], strict=False)
133
+ g_ema.eval()
134
+ g_ema = g_ema.to(device)
135
+
136
+ # load relevance matrix Rs
137
+ fs3 = np.load('./models/stylegan2/npy_ffhq/fs3.npy')
138
+ np.set_printoptions(suppress=True)
139
+
140
+ # Initialze DeltaMapper
141
+ net = DeltaMapper()
142
+ net_ckpt = torch.load(opts.checkpoint_path)
143
+ net.load_state_dict(net_ckpt)
144
+ net = net.to(device)
145
+
146
+ # Load CLIP model
147
+ clip_model, preprocess = clip.load("ViT-B/32", device=device)
148
+ avg_pool = torch.nn.AvgPool2d(kernel_size=256 // 32)
149
+ upsample = torch.nn.Upsample(scale_factor=7)
150
+
151
+ os.makedirs(opts.save_dir, exist_ok=True)
152
+
153
+ neutral = 'face'
154
+ target_list = opts.target.split(',')
155
+ # print(target_list)
156
+
157
+ dt_list = []
158
+ select_list = []
159
+ for target in target_list:
160
+ classnames = [target, neutral]
161
+ dt = map_tool.GetDt(classnames, clip_model)
162
+ select = GetBoundary(fs3, dt, opts.threshold)
163
+ dt = torch.Tensor(dt).to(device)
164
+ dt = dt / dt.norm(dim=-1, keepdim=True).float().clamp(min=1e-5)
165
+
166
+ select_list.append(select)
167
+ dt_list.append(dt)
168
+
169
+ for bid, batch in enumerate(test_dataloader):
170
+ if bid == opts.num_all:
171
+ break
172
+ input_img = batch.to(device)
173
+ with torch.no_grad():
174
+ latent_w = encoder(input_img)
175
+ latent_avg = ckpt_enc['latent_avg'].cuda()
176
+ latent_w = latent_w + latent_avg.repeat(latent_w.shape[0], 1, 1)
177
+
178
+ style_space, noise = encoder_latent(g_ema, latent_w)
179
+ latent_s = torch.cat(style_space, dim=1)
180
+
181
+ img_gen_for_clip = upsample(input_img)
182
+ img_gen_for_clip = avg_pool(img_gen_for_clip)
183
+ c_latents = clip_model.encode_image(img_gen_for_clip)
184
+ c_latents = c_latents / c_latents.norm(dim=-1, keepdim=True).float()
185
+
186
+ delta_s_list = []
187
+
188
+ for i, dt in enumerate(dt_list):
189
+ delta_c = torch.cat((c_latents, dt.unsqueeze(0)), dim=1)
190
+ with torch.no_grad():
191
+ fake_delta_s = net(latent_s, delta_c)
192
+ improved_fake_delta_s = improved_ds(fake_delta_s[0], select_list[i])
193
+ delta_s_list.append(improved_fake_delta_s)
194
+
195
+ with torch.no_grad():
196
+ img_ori = stylespace_util.decoder_validate(g_ema, latent_s, latent_w)
197
+
198
+ img_list = [img_ori]
199
+ for delta_s in delta_s_list:
200
+ img_gen = stylespace_util.decoder_validate(g_ema, latent_s + delta_s, latent_w)
201
+ img_list.append(img_gen)
202
+ img_gen_all = torch.cat(img_list, dim=3)
203
+ torchvision.utils.save_image(img_gen_all, os.path.join(opts.save_dir, "%04d.jpg" % (bid + 1)),
204
+ normalize=True, range=(-1, 1))
205
+ print(f'completed👍! Please check results in {opts.save_dir}')
206
+
207
+
208
+ if __name__ == "__main__":
209
+ opts = TestOptions().parse()
210
+ main(opts)
delta_edit/scripts/train.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import torch
5
+ from torch.utils.data import DataLoader
6
+
7
+ sys.path.append("")
8
+ sys.path.append("..")
9
+
10
+ from datasets.train_dataset import TrainLatentsDataset
11
+ from options.train_options import TrainOptions
12
+ from delta_mapper import DeltaMapper
13
+
14
+ def main(opts):
15
+
16
+ device = "cuda" if torch.cuda.is_available() else "cpu"
17
+
18
+ train_dataset = TrainLatentsDataset(opts)
19
+ train_dataloader = DataLoader(train_dataset,
20
+ batch_size=opts.batch_size,
21
+ shuffle=True,
22
+ num_workers=int(opts.workers),
23
+ drop_last=True)
24
+
25
+ #Initialze DeltaMapper
26
+ net = DeltaMapper().to(device)
27
+
28
+ #Initialize optimizer
29
+ optimizer = torch.optim.Adam(list(net.parameters()), lr=opts.learning_rate)
30
+
31
+ #Initialize loss
32
+ l2_loss = torch.nn.MSELoss().to(device)
33
+ cosine_loss = torch.nn.CosineSimilarity(dim=-1).to(device)
34
+
35
+ #save dir
36
+ os.makedirs(os.path.join(opts.checkpoint_path, opts.classname), exist_ok=True)
37
+
38
+ for batch_idx, batch in enumerate(train_dataloader):
39
+
40
+ latent_s, delta_c, delta_s = batch
41
+ latent_s = latent_s.to(device)
42
+ delta_c = delta_c.to(device)
43
+ delta_s = delta_s.to(device)
44
+
45
+ fake_delta_s = net(latent_s, delta_c)
46
+
47
+ optimizer.zero_grad()
48
+ loss_l2 = l2_loss(fake_delta_s, delta_s)
49
+ loss_cos = 1 - torch.mean(cosine_loss(fake_delta_s, delta_s))
50
+
51
+ loss = opts.l2_lambda * loss_l2 + opts.cos_lambda * loss_cos
52
+ loss.backward()
53
+ optimizer.step()
54
+
55
+ if batch_idx % opts.print_interval == 0 :
56
+ print(batch_idx, loss.detach().cpu().numpy(), loss_l2.detach().cpu().numpy(), loss_cos.detach().cpu().numpy())
57
+
58
+ if batch_idx % opts.save_interval == 0:
59
+ torch.save(net.state_dict(), os.path.join(opts.checkpoint_path, opts.classname, "net_%06d.pth" % batch_idx))
60
+
61
+ if __name__ == "__main__":
62
+ opts = TrainOptions().parse()
63
+ main(opts)
delta_edit/utils/__init__.py ADDED
File without changes
delta_edit/utils/map_tool.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import clip
3
+ import os
4
+ import numpy as np
5
+
6
+ imagenet_templates = [
7
+ 'a bad photo of a {}.',
8
+ # 'a photo of many {}.',
9
+ 'a sculpture of a {}.',
10
+ 'a photo of the hard to see {}.',
11
+ 'a low resolution photo of the {}.',
12
+ 'a rendering of a {}.',
13
+ 'graffiti of a {}.',
14
+ 'a bad photo of the {}.',
15
+ 'a cropped photo of the {}.',
16
+ 'a tattoo of a {}.',
17
+ 'the embroidered {}.',
18
+ 'a photo of a hard to see {}.',
19
+ 'a bright photo of a {}.',
20
+ 'a photo of a clean {}.',
21
+ 'a photo of a dirty {}.',
22
+ 'a dark photo of the {}.',
23
+ 'a drawing of a {}.',
24
+ 'a photo of my {}.',
25
+ 'the plastic {}.',
26
+ 'a photo of the cool {}.',
27
+ 'a close-up photo of a {}.',
28
+ 'a black and white photo of the {}.',
29
+ 'a painting of the {}.',
30
+ 'a painting of a {}.',
31
+ 'a pixelated photo of the {}.',
32
+ 'a sculpture of the {}.',
33
+ 'a bright photo of the {}.',
34
+ 'a cropped photo of a {}.',
35
+ 'a plastic {}.',
36
+ 'a photo of the dirty {}.',
37
+ 'a jpeg corrupted photo of a {}.',
38
+ 'a blurry photo of the {}.',
39
+ 'a photo of the {}.',
40
+ 'a good photo of the {}.',
41
+ 'a rendering of the {}.',
42
+ 'a {} in a video game.',
43
+ 'a photo of one {}.',
44
+ 'a doodle of a {}.',
45
+ 'a close-up photo of the {}.',
46
+ 'a photo of a {}.',
47
+ 'the origami {}.',
48
+ 'the {} in a video game.',
49
+ 'a sketch of a {}.',
50
+ 'a doodle of the {}.',
51
+ 'a origami {}.',
52
+ 'a low resolution photo of a {}.',
53
+ 'the toy {}.',
54
+ 'a rendition of the {}.',
55
+ 'a photo of the clean {}.',
56
+ 'a photo of a large {}.',
57
+ 'a rendition of a {}.',
58
+ 'a photo of a nice {}.',
59
+ 'a photo of a weird {}.',
60
+ 'a blurry photo of a {}.',
61
+ 'a cartoon {}.',
62
+ 'art of a {}.',
63
+ 'a sketch of the {}.',
64
+ 'a embroidered {}.',
65
+ 'a pixelated photo of a {}.',
66
+ 'itap of the {}.',
67
+ 'a jpeg corrupted photo of the {}.',
68
+ 'a good photo of a {}.',
69
+ 'a plushie {}.',
70
+ 'a photo of the nice {}.',
71
+ 'a photo of the small {}.',
72
+ 'a photo of the weird {}.',
73
+ 'the cartoon {}.',
74
+ 'art of the {}.',
75
+ 'a drawing of the {}.',
76
+ 'a photo of the large {}.',
77
+ 'a black and white photo of a {}.',
78
+ 'the plushie {}.',
79
+ 'a dark photo of a {}.',
80
+ 'itap of a {}.',
81
+ 'graffiti of the {}.',
82
+ 'a toy {}.',
83
+ 'itap of my {}.',
84
+ 'a photo of a cool {}.',
85
+ 'a photo of a small {}.',
86
+ 'a tattoo of the {}.',
87
+ ]
88
+
89
+ def zeroshot_classifier(classnames, templates,model):
90
+ with torch.no_grad():
91
+ zeroshot_weights = []
92
+ for classname in classnames:
93
+ texts = [template.format(classname) for template in templates] #format with class
94
+ texts = clip.tokenize(texts).cuda() #tokenize
95
+ class_embeddings = model.encode_text(texts) #embed with text encoder
96
+ class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
97
+ class_embedding = class_embeddings.mean(dim=0)
98
+ class_embedding /= class_embedding.norm()
99
+ zeroshot_weights.append(class_embedding)
100
+ zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda()
101
+ return zeroshot_weights
102
+
103
+ def GetDt(classnames,model):
104
+ text_features=zeroshot_classifier(classnames, imagenet_templates,model).t()
105
+
106
+ dt=text_features[0]-text_features[1]
107
+ dt=dt.cpu().numpy()
108
+
109
+ return dt
110
+
111
+
112
+ if __name__ == "__main__":
113
+ device = "cuda" if torch.cuda.is_available() else "cpu"
114
+ model, preprocess = clip.load("ViT-B/32", device=device)
115
+
116
+ neutral='face with eyes' #@param {type:"string"}
117
+ target='face with blue eyes' #@param {type:"string"}
118
+ classnames=[target,neutral]
119
+ dt = GetDt(classnames,model)
120
+ print(dt.shape)
delta_edit/utils/stylespace_util.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import torchvision
5
+
6
+ from torch.nn import functional as F
7
+
8
+ index = [0,1,1,2,2,3,4,4,5,6,6,7,8,8,9,10,10,11,12,12,13,14,14,15,16,16]
9
+
10
+ def conv_warper(layer, input, style, noise):
11
+
12
+ conv = layer.conv
13
+ batch, in_channel, height, width = input.shape
14
+
15
+ style = style.view(batch, 1, in_channel, 1, 1)
16
+ weight = conv.scale * conv.weight * style
17
+
18
+ if conv.demodulate:
19
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
20
+ weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
21
+
22
+ weight = weight.view(
23
+ batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
24
+ )
25
+
26
+ if conv.upsample:
27
+ input = input.view(1, batch * in_channel, height, width)
28
+ weight = weight.view(
29
+ batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
30
+ )
31
+ weight = weight.transpose(1, 2).reshape(
32
+ batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
33
+ )
34
+ out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
35
+ _, _, height, width = out.shape
36
+ out = out.view(batch, conv.out_channel, height, width)
37
+ out = conv.blur(out)
38
+
39
+ elif conv.downsample:
40
+ input = conv.blur(input)
41
+ _, _, height, width = input.shape
42
+ input = input.view(1, batch * in_channel, height, width)
43
+ out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
44
+ _, _, height, width = out.shape
45
+ out = out.view(batch, conv.out_channel, height, width)
46
+
47
+ else:
48
+ input = input.view(1, batch * in_channel, height, width)
49
+ out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
50
+ _, _, height, width = out.shape
51
+ out = out.view(batch, conv.out_channel, height, width)
52
+
53
+ out = layer.noise(out, noise=noise)
54
+ out = layer.activate(out)
55
+
56
+ return out
57
+
58
+ def decoder(G, style_space, latent, noise):
59
+
60
+ out = G.input(latent)
61
+ out = conv_warper(G.conv1, out, style_space[0], noise[0])
62
+ skip = G.to_rgb1(out, latent[:, 1])
63
+
64
+ i = 1
65
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
66
+ G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
67
+ ):
68
+ out = conv_warper(conv1, out, style_space[i], noise=noise1)
69
+ out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
70
+ skip = to_rgb(out, latent[:, i + 2], skip)
71
+
72
+ i += 2
73
+
74
+ image = skip
75
+
76
+ return image
77
+
78
+ def decoder_validate(G, style_space, latent):
79
+
80
+ style_space = split_stylespace(style_space)
81
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
82
+
83
+ out = G.input(latent)
84
+ out = conv_warper(G.conv1, out, style_space[0], noise[0])
85
+ skip = G.to_rgb1(out, latent[:, 1])
86
+
87
+ i = 1
88
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
89
+ G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
90
+ ):
91
+ out = conv_warper(conv1, out, style_space[i], noise=noise1)
92
+ out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
93
+ skip = to_rgb(out, latent[:, i + 2], skip)
94
+
95
+ i += 2
96
+
97
+ image = skip
98
+
99
+ return image
100
+
101
+ def encoder_noise(G, noise):
102
+
103
+ styles = [noise]
104
+ style_space = []
105
+
106
+ styles = [G.style(s) for s in styles]
107
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
108
+ inject_index = G.n_latent
109
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
110
+ style_space.append(G.conv1.conv.modulation(latent[:, 0]))
111
+
112
+ i = 1
113
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
114
+ G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
115
+ ):
116
+ style_space.append(conv1.conv.modulation(latent[:, i]))
117
+ style_space.append(conv2.conv.modulation(latent[:, i+1]))
118
+ i += 2
119
+
120
+ return style_space, latent, noise
121
+
122
+ def encoder_latent(G, latent):
123
+ # an encoder warper for G
124
+
125
+ style_space = []
126
+
127
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
128
+
129
+ style_space.append(G.conv1.conv.modulation(latent[:, 0]))
130
+
131
+ i = 1
132
+ for conv1, conv2, to_rgb in zip(
133
+ G.convs[::2], G.convs[1::2], G.to_rgbs
134
+ ):
135
+ style_space.append(conv1.conv.modulation(latent[:, i]))
136
+ style_space.append(conv2.conv.modulation(latent[:, i+1]))
137
+ i += 2
138
+
139
+ return style_space, noise
140
+
141
+ def split_stylespace(style):
142
+ style_space = []
143
+
144
+ for idx in range(10):
145
+ style_space.append(style[:, idx*512 : (idx+1) * 512])
146
+
147
+ style_space.append(style[:, 10*512: 10*512 + 256])
148
+ style_space.append(style[:, 10*512 + 256: 10*512 + 256*2])
149
+ style_space.append(style[:, 10*512 + 256*2: 10*512 + 256*2 + 128])
150
+ style_space.append(style[:, 10*512 + 256*2 + 128: 10*512 + 256*2 + 128 * 2])
151
+ style_space.append(style[:, 10*512 + 256*2 + 128*2: 10*512 + 256*2 + 128*2 + 64])
152
+ style_space.append(style[:, 10*512 + 256*2 + 128*2 + 64: 10*512 + 256*2 + 128*2 + 64*2])
153
+ style_space.append(style[:, 10*512 + 256*2 + 128*2 + 64*2: 10*512 + 256*2 + 128*2 + 64*2 + 32])
154
+
155
+ return style_space
156
+
157
+ def fuse_stylespace(style):
158
+ new_s = torch.cat(style, dim=1)
159
+
160
+ return new_s
delta_edit/weights/e4e_ffhq_encode.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ace1d9a8c05c10a399bcd500b8dda118f759ff1aac89dbdab7435f2136a0999
3
+ size 1201649680
delta_edit/weights/net_face.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71dc9c1e1dfc29409a80d0f8c35c11f80ab503c79e43fb7e73f7ba3c27a75e2
3
+ size 331066573
delta_edit/weights/stylegan2-ffhq-config-f.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bae494ef77e32a9cd1792a81a3c167692a0e64f6bcd8b06592ff42917e2ed46e
3
+ size 381462551
filtered_laion_faces.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d87bc740b1cf34768a4fd5d0622b48873f801779bef9b98823533e40f538f70b
3
+ size 216228753
filtered_laion_faces.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76441b2ce1b3df6b9db079e99646f479be8cefa49f2ea72cc69084d6fecee391
3
+ size 24711982