Zhongfang Zhuang commited on
Commit
a9dae3e
·
verified ·
1 Parent(s): 12e00fd

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. README.md +29 -3
  2. config.json +24 -0
  3. mlp.py +269 -0
  4. model.safetensors +3 -0
  5. models_hf.py +431 -0
  6. ndlinear.py +91 -0
README.md CHANGED
@@ -1,3 +1,29 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DiT Model
2
+
3
+ This repository contains the implementation of the DiT (Diffusion Transformer) model, which leverages NdLinear layers for efficient multi-dimensional linear transformations. The model is designed to be compact yet powerful, suitable for various tasks requiring high-dimensional data processing.
4
+
5
+ ## Overview
6
+
7
+ The DiT model is built using several components:
8
+
9
+ - **NdLinear**: A custom PyTorch layer for projecting tensors into multi-space representations, capturing multivariate structures.
10
+ - **NdMlp**: A multi-layer perceptron using NdLinear layers for enhanced feature extraction.
11
+ - **NdTimestepEmbedder**: Embeds scalar timesteps into vector representations using NdLinear transformations.
12
+
13
+ ## Files
14
+
15
+ - **mlp.py**: Contains the implementation of various MLP architectures, including NdMlp and GluMlp.
16
+ - **models_hf.py**: Defines the DiT model architecture, including the DiTBlock and FinalLayer.
17
+ - **ndlinear.py**: Implements the NdLinear layer, which is central to the model's ability to handle multi-dimensional data efficiently.
18
+
19
+ ## Installation
20
+
21
+ To use the DiT model, ensure you have the required dependencies installed:
22
+
23
+ ```bash
24
+ pip install torch transformers==4.52.4
25
+ ```
26
+
27
+ ## License
28
+
29
+ This project is licensed under the MIT License. See the LICENSE file for more details.
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DiT"
4
+ ],
5
+ "class_dropout_prob": 0.1,
6
+ "depth": 28,
7
+ "hidden_size": 1152,
8
+ "in_channels": 4,
9
+ "input_size": 32,
10
+ "learn_sigma": true,
11
+ "mlp_ratio": 4.0,
12
+ "model_type": "ndlinear_dit",
13
+ "num_classes": 1000,
14
+ "num_heads": 16,
15
+ "out_channels": 8,
16
+ "patch_size": 2,
17
+ "torch_dtype": "float32",
18
+ "transformers_version": "4.52.4",
19
+ "tse_scale_factor": 8.0,
20
+ "use_ndmlp": true,
21
+ "use_ndtse": true,
22
+ "use_num_transforms": 20,
23
+ "use_variant": 4
24
+ }
mlp.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ MLP module w/ dropout and configurable activation layer
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+
5
+ Modified by Ensemble AI to use NdLinear instead of Linear. Copyright 2025
6
+
7
+ """
8
+ from functools import partial
9
+
10
+ from torch import nn as nn
11
+ from ndlinear import NdLinear
12
+ from timm.layers.grn import GlobalResponseNorm
13
+ from timm.layers.helpers import to_2tuple
14
+
15
+
16
+ class NdMlp(nn.Module):
17
+ def __init__(
18
+ self,
19
+ in_features,
20
+ hidden_features=None,
21
+ out_features=None,
22
+ act_layer=nn.GELU,
23
+ norm_layer=None,
24
+ bias=True,
25
+ drop=0.,
26
+ use_variant=4
27
+ ):
28
+ super().__init__()
29
+ out_features = out_features or in_features
30
+ hidden_features = hidden_features or in_features
31
+ bias = to_2tuple(bias)
32
+ self.use_variant = use_variant
33
+ drop_probs = to_2tuple(drop)
34
+ self.fc1 = NdLinear((in_features, 1), (hidden_features // 4, 1)) # (384, 1), (384, 1)
35
+ self.fc2 = NdLinear((in_features, 1), (hidden_features // 4, 1))
36
+
37
+ self.act = act_layer()
38
+ self.drop1 = nn.Dropout(drop_probs[0])
39
+ self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
40
+ self.drop2 = nn.Dropout(drop_probs[1])
41
+
42
+ def forward(self, x):
43
+ x_dim0, x_dim1, x_dim2 = x.shape
44
+ # print(f"x.shape: {x.shape}")
45
+ x = x.reshape(x_dim0 * x_dim1, x_dim2, 1) if self.use_variant != 9 else x
46
+ x = self.fc1(x)
47
+ x = self.act(x)
48
+ x = self.drop1(x)
49
+ # x = self.norm(x) #
50
+ x = self.fc2(x)
51
+ x = x.reshape(x_dim0, x_dim1, x_dim2) if self.use_variant != 9 else x
52
+ x = self.drop2(x)
53
+ return x
54
+
55
+ class GluMlp(nn.Module):
56
+ """ MLP w/ GLU style gating
57
+ See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202
58
+
59
+ NOTE: When use_conv=True, expects 2D NCHW tensors, otherwise N*C expected.
60
+ """
61
+ def __init__(
62
+ self,
63
+ in_features,
64
+ hidden_features=None,
65
+ out_features=None,
66
+ act_layer=nn.Sigmoid,
67
+ norm_layer=None,
68
+ bias=True,
69
+ drop=0.,
70
+ use_conv=False,
71
+ gate_last=True,
72
+ ):
73
+ super().__init__()
74
+ out_features = out_features or in_features
75
+ hidden_features = hidden_features or in_features
76
+ assert hidden_features % 2 == 0
77
+ bias = to_2tuple(bias)
78
+ drop_probs = to_2tuple(drop)
79
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear
80
+ self.chunk_dim = 1 if use_conv else -1
81
+ self.gate_last = gate_last # use second half of width for gate
82
+
83
+ self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0])
84
+ self.act = act_layer()
85
+ self.drop1 = nn.Dropout(drop_probs[0])
86
+ self.norm = norm_layer(hidden_features // 2) if norm_layer is not None else nn.Identity()
87
+ self.fc2 = linear_layer(hidden_features // 2, out_features, bias=bias[1])
88
+ self.drop2 = nn.Dropout(drop_probs[1])
89
+
90
+ def init_weights(self):
91
+ # override init of fc1 w/ gate portion set to weight near zero, bias=1
92
+ if self.fc1.bias is not None:
93
+ nn.init.ones_(self.fc1.bias[self.fc1.bias.shape[0] // 2:])
94
+ nn.init.normal_(self.fc1.weight[self.fc1.weight.shape[0] // 2:], std=1e-6)
95
+
96
+ def forward(self, x):
97
+ x = self.fc1(x)
98
+ x1, x2 = x.chunk(2, dim=self.chunk_dim)
99
+ x = x1 * self.act(x2) if self.gate_last else self.act(x1) * x2
100
+ x = self.drop1(x)
101
+ x = self.norm(x)
102
+ x = self.fc2(x)
103
+ x = self.drop2(x)
104
+ return x
105
+
106
+
107
+ SwiGLUPacked = partial(GluMlp, act_layer=nn.SiLU, gate_last=False)
108
+
109
+
110
+ class SwiGLU(nn.Module):
111
+ """ SwiGLU
112
+ NOTE: GluMLP above can implement SwiGLU, but this impl has split fc1 and
113
+ better matches some other common impl which makes mapping checkpoints simpler.
114
+ """
115
+ def __init__(
116
+ self,
117
+ in_features,
118
+ hidden_features=None,
119
+ out_features=None,
120
+ act_layer=nn.SiLU,
121
+ norm_layer=None,
122
+ bias=True,
123
+ drop=0.,
124
+ ):
125
+ super().__init__()
126
+ out_features = out_features or in_features
127
+ hidden_features = hidden_features or in_features
128
+ bias = to_2tuple(bias)
129
+ drop_probs = to_2tuple(drop)
130
+
131
+ self.fc1_g = nn.Linear(in_features, hidden_features, bias=bias[0])
132
+ self.fc1_x = nn.Linear(in_features, hidden_features, bias=bias[0])
133
+ self.act = act_layer()
134
+ self.drop1 = nn.Dropout(drop_probs[0])
135
+ self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
136
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])
137
+ self.drop2 = nn.Dropout(drop_probs[1])
138
+
139
+ def init_weights(self):
140
+ # override init of fc1 w/ gate portion set to weight near zero, bias=1
141
+ if self.fc1_g.bias is not None:
142
+ nn.init.ones_(self.fc1_g.bias)
143
+ nn.init.normal_(self.fc1_g.weight, std=1e-6)
144
+
145
+ def forward(self, x):
146
+ x_gate = self.fc1_g(x)
147
+ x = self.fc1_x(x)
148
+ x = self.act(x_gate) * x
149
+ x = self.drop1(x)
150
+ x = self.norm(x)
151
+ x = self.fc2(x)
152
+ x = self.drop2(x)
153
+ return x
154
+
155
+
156
+ class GatedMlp(nn.Module):
157
+ """ MLP as used in gMLP
158
+ """
159
+ def __init__(
160
+ self,
161
+ in_features,
162
+ hidden_features=None,
163
+ out_features=None,
164
+ act_layer=nn.GELU,
165
+ norm_layer=None,
166
+ gate_layer=None,
167
+ bias=True,
168
+ drop=0.,
169
+ ):
170
+ super().__init__()
171
+ out_features = out_features or in_features
172
+ hidden_features = hidden_features or in_features
173
+ bias = to_2tuple(bias)
174
+ drop_probs = to_2tuple(drop)
175
+
176
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0])
177
+ self.act = act_layer()
178
+ self.drop1 = nn.Dropout(drop_probs[0])
179
+ if gate_layer is not None:
180
+ assert hidden_features % 2 == 0
181
+ self.gate = gate_layer(hidden_features)
182
+ hidden_features = hidden_features // 2 # FIXME base reduction on gate property?
183
+ else:
184
+ self.gate = nn.Identity()
185
+ self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
186
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])
187
+ self.drop2 = nn.Dropout(drop_probs[1])
188
+
189
+ def forward(self, x):
190
+ x = self.fc1(x)
191
+ x = self.act(x)
192
+ x = self.drop1(x)
193
+ x = self.gate(x)
194
+ x = self.norm(x)
195
+ x = self.fc2(x)
196
+ x = self.drop2(x)
197
+ return x
198
+
199
+
200
+ class ConvMlp(nn.Module):
201
+ """ MLP using 1x1 convs that keeps spatial dims (for 2D NCHW tensors)
202
+ """
203
+ def __init__(
204
+ self,
205
+ in_features,
206
+ hidden_features=None,
207
+ out_features=None,
208
+ act_layer=nn.ReLU,
209
+ norm_layer=None,
210
+ bias=True,
211
+ drop=0.,
212
+ ):
213
+ super().__init__()
214
+ out_features = out_features or in_features
215
+ hidden_features = hidden_features or in_features
216
+ bias = to_2tuple(bias)
217
+
218
+ self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])
219
+ self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
220
+ self.act = act_layer()
221
+ self.drop = nn.Dropout(drop)
222
+ self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])
223
+
224
+ def forward(self, x):
225
+ x = self.fc1(x)
226
+ x = self.norm(x)
227
+ x = self.act(x)
228
+ x = self.drop(x)
229
+ x = self.fc2(x)
230
+ return x
231
+
232
+
233
+ class GlobalResponseNormMlp(nn.Module):
234
+ """ MLP w/ Global Response Norm (see grn.py), nn.Linear or 1x1 Conv2d
235
+
236
+ NOTE: Intended for '2D' NCHW (use_conv=True) or NHWC (use_conv=False, channels-last) tensor layouts
237
+ """
238
+ def __init__(
239
+ self,
240
+ in_features,
241
+ hidden_features=None,
242
+ out_features=None,
243
+ act_layer=nn.GELU,
244
+ bias=True,
245
+ drop=0.,
246
+ use_conv=False,
247
+ ):
248
+ super().__init__()
249
+ out_features = out_features or in_features
250
+ hidden_features = hidden_features or in_features
251
+ bias = to_2tuple(bias)
252
+ drop_probs = to_2tuple(drop)
253
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear
254
+
255
+ self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0])
256
+ self.act = act_layer()
257
+ self.drop1 = nn.Dropout(drop_probs[0])
258
+ self.grn = GlobalResponseNorm(hidden_features, channels_last=not use_conv)
259
+ self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1])
260
+ self.drop2 = nn.Dropout(drop_probs[1])
261
+
262
+ def forward(self, x):
263
+ x = self.fc1(x)
264
+ x = self.act(x)
265
+ x = self.drop1(x)
266
+ x = self.grn(x)
267
+ x = self.fc2(x)
268
+ x = self.drop2(x)
269
+ return x
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8d3d3e218068ea8163743a1d4049dc3e225ac3bec897cda9816598f7ce07a19
3
+ size 915766976
models_hf.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ # --------------------------------------------------------
7
+ # References:
8
+ # GLIDE: https://github.com/openai/glide-text2im
9
+ # MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
10
+ #
11
+ # Modifications Copyright (c) Ensemble AI, 2025.
12
+ # Description of modifications: Using NdLinear in the model to
13
+ # make the model more compact yet with similar performance.
14
+ import torch
15
+ import torch.nn as nn
16
+ import numpy as np
17
+ import math
18
+ from timm.models.vision_transformer import PatchEmbed, Attention, Mlp
19
+ from mlp import NdMlp
20
+ from ndlinear import NdLinear
21
+ from transformers import PreTrainedModel, PretrainedConfig
22
+
23
+ def modulate(x, shift, scale):
24
+ return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
25
+
26
+ class TimestepEmbedder(nn.Module):
27
+ def __init__(self, hidden_size, frequency_embedding_size=256):
28
+ super().__init__()
29
+ self.mlp = nn.Sequential(
30
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
31
+ nn.SiLU(),
32
+ nn.Linear(hidden_size, hidden_size, bias=True),
33
+ )
34
+ self.frequency_embedding_size = frequency_embedding_size
35
+
36
+ @staticmethod
37
+ def timestep_embedding(t, dim, max_period=10000):
38
+ half = dim // 2
39
+ freqs = torch.exp(
40
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
41
+ ).to(device=t.device)
42
+ args = t[:, None].float() * freqs[None]
43
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
44
+ if dim % 2:
45
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
46
+ return embedding
47
+
48
+ def forward(self, t):
49
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
50
+ t_emb = self.mlp(t_freq)
51
+ return t_emb
52
+
53
+ class NdTimestepEmbedder(nn.Module):
54
+ def __init__(self, hidden_size, frequency_embedding_size=256, use_num_transforms=2, tse_scale_factor=1, knowledge_transfer=False, src_layers=None):
55
+ super().__init__()
56
+ self.activation = nn.SiLU()
57
+ self.frequency_embedding_size = frequency_embedding_size
58
+ self.use_num_transforms = use_num_transforms
59
+
60
+ if knowledge_transfer and not src_layers:
61
+ raise ValueError("Source layers must be provided for knowledge transfer.")
62
+
63
+ if use_num_transforms == 2:
64
+ self.ndlinear_1 = NdLinear((frequency_embedding_size // 16, 16), (int(hidden_size // tse_scale_factor // 2), 2))
65
+ self.ndlinear_2 = NdLinear((int(hidden_size // tse_scale_factor // 2), 2), (hidden_size, 1))
66
+
67
+ if use_num_transforms == 20:
68
+ self.ndlinear_1 = NdLinear((frequency_embedding_size, 1), (int(hidden_size // tse_scale_factor), 1))
69
+ self.ndlinear_2 = NdLinear((int(hidden_size // tse_scale_factor), 1), (hidden_size, 1))
70
+
71
+ @staticmethod
72
+ def timestep_embedding(t, dim, max_period=10000):
73
+ half = dim // 2
74
+ freqs = torch.exp(
75
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
76
+ ).to(device=t.device)
77
+ args = t[:, None].float() * freqs[None]
78
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
79
+ if dim % 2:
80
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
81
+ return embedding
82
+
83
+ def forward(self, t):
84
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
85
+ if self.use_num_transforms == 2:
86
+ t_freq = t_freq.reshape(*t_freq.shape, 1)
87
+ elif self.use_num_transforms == 21:
88
+ t_freq = t_freq.reshape(t_freq.shape[0], 16, 16)
89
+ elif self.use_num_transforms == 3:
90
+ t_freq = t_freq.reshape(t_freq.shape[0], t_freq.shape[1] // 16, 16, 1)
91
+ elif self.use_num_transforms == 4:
92
+ t_freq = t_freq.reshape(t_freq.shape[0], t_freq.shape[1] // 16, 4, 4, 1)
93
+ t_emb = self.ndlinear_1(t_freq)
94
+ t_emb = self.activation(t_emb)
95
+ t_emb = self.ndlinear_2(t_emb)
96
+ t_emb = t_emb.squeeze()
97
+ return t_emb
98
+
99
+ class LabelEmbedder(nn.Module):
100
+ def __init__(self, num_classes, hidden_size, dropout_prob):
101
+ super().__init__()
102
+ use_cfg_embedding = dropout_prob > 0
103
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
104
+ self.num_classes = num_classes
105
+ self.dropout_prob = dropout_prob
106
+
107
+ def token_drop(self, labels, force_drop_ids=None):
108
+ if force_drop_ids is None:
109
+ drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
110
+ else:
111
+ drop_ids = force_drop_ids == 1
112
+ labels = torch.where(drop_ids, self.num_classes, labels)
113
+ return labels
114
+
115
+ def forward(self, labels, train, force_drop_ids=None):
116
+ use_dropout = self.dropout_prob > 0
117
+ if (train and use_dropout) or (force_drop_ids is not None):
118
+ labels = self.token_drop(labels, force_drop_ids)
119
+ embeddings = self.embedding_table(labels)
120
+ return embeddings
121
+
122
+ class DiTBlock(nn.Module):
123
+ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, use_ndmlp=False, use_variant=4, use_ndadaln=False, **block_kwargs):
124
+ super().__init__()
125
+ self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
126
+ self.attn = Attention(hidden_size, num_heads=num_heads, qkv_bias=True, **block_kwargs)
127
+ self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
128
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
129
+ approx_gelu = lambda: nn.GELU(approximate="tanh")
130
+ if use_ndmlp:
131
+ self.mlp = NdMlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=approx_gelu, drop=0, use_variant=use_variant)
132
+ else:
133
+ self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=approx_gelu, drop=0)
134
+
135
+ def forward(self, x, c):
136
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=1)
137
+ x = x + gate_msa.unsqueeze(1) * self.attn(modulate(self.norm1(x), shift_msa, scale_msa))
138
+ modulated_x = modulate(self.norm2(x), shift_mlp, scale_mlp)
139
+ mlp_output = self.mlp(modulated_x)
140
+ gated_mlp_output = gate_mlp.unsqueeze(1) * mlp_output
141
+ x = x + gated_mlp_output
142
+ return x
143
+
144
+ class FinalLayer(nn.Module):
145
+ def __init__(self, hidden_size, patch_size, out_channels, use_ndadaln=False):
146
+ super().__init__()
147
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
148
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
149
+ self.use_ndadaln = use_ndadaln
150
+ if self.use_ndadaln:
151
+ self.adaLN_modulation = nn.Sequential(
152
+ nn.SiLU(),
153
+ NdLinear((hidden_size, 1), (2 * hidden_size, 1))
154
+ )
155
+ else:
156
+ self.adaLN_modulation = nn.Sequential(
157
+ nn.SiLU(),
158
+ nn.Linear(hidden_size, 2 * hidden_size, bias=True)
159
+ )
160
+
161
+ def forward(self, x, c):
162
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
163
+ x = modulate(self.norm_final(x), shift, scale)
164
+ x = self.linear(x)
165
+ return x
166
+
167
+ class DiTConfig(PretrainedConfig):
168
+ model_type = "ndlinear_dit"
169
+
170
+ def __init__(self, input_size=32, patch_size=2, in_channels=4, hidden_size=1152, depth=28, num_heads=16, mlp_ratio=4.0, class_dropout_prob=0.1, num_classes=1000, learn_sigma=True, use_ndmlp=False, use_ndtse=False, use_variant=4, tse_scale_factor=2, use_num_transforms=2, **kwargs):
171
+ super().__init__(**kwargs)
172
+ self.input_size = input_size
173
+ self.patch_size = patch_size
174
+ self.in_channels = in_channels
175
+ self.out_channels = in_channels * 2 if learn_sigma else in_channels
176
+ self.hidden_size = hidden_size
177
+ self.depth = depth
178
+ self.num_heads = num_heads
179
+ self.mlp_ratio = mlp_ratio
180
+ self.class_dropout_prob = class_dropout_prob
181
+ self.num_classes = num_classes
182
+ self.learn_sigma = learn_sigma
183
+ self.use_ndmlp = use_ndmlp
184
+ self.use_ndtse = use_ndtse
185
+ self.use_variant = use_variant
186
+ self.tse_scale_factor = tse_scale_factor
187
+ self.use_num_transforms = use_num_transforms
188
+
189
+ class DiT(PreTrainedModel):
190
+ config_class = DiTConfig
191
+
192
+ def __init__(self, config):
193
+ super().__init__(config)
194
+ self.input_size = config.input_size
195
+ self.patch_size = config.patch_size
196
+ self.in_channels = config.in_channels
197
+ self.hidden_size = config.hidden_size
198
+ self.depth = config.depth
199
+ self.num_heads = config.num_heads
200
+ self.mlp_ratio = config.mlp_ratio
201
+ self.class_dropout_prob = config.class_dropout_prob
202
+ self.num_classes = config.num_classes
203
+ self.learn_sigma = config.learn_sigma
204
+ self.use_ndmlp = config.use_ndmlp
205
+ self.use_ndtse = config.use_ndtse
206
+ self.use_variant = config.use_variant
207
+ self.tse_scale_factor = config.tse_scale_factor
208
+ self.use_num_transforms = config.use_num_transforms
209
+ self.out_channels = config.out_channels
210
+ self.ndadaln = getattr(config, "ndadaln", False)
211
+
212
+ self.x_embedder = PatchEmbed(self.input_size, self.patch_size, self.in_channels, self.hidden_size, bias=True)
213
+
214
+ if self.use_ndtse:
215
+ self.t_embedder = NdTimestepEmbedder(
216
+ hidden_size=self.hidden_size,
217
+ frequency_embedding_size=256,
218
+ use_num_transforms=self.use_num_transforms,
219
+ tse_scale_factor=1,
220
+ knowledge_transfer=False,
221
+ src_layers=None
222
+ )
223
+ else:
224
+ self.t_embedder = TimestepEmbedder(self.hidden_size)
225
+
226
+ self.y_embedder = LabelEmbedder(self.num_classes, self.hidden_size, self.class_dropout_prob)
227
+ num_patches = self.x_embedder.num_patches
228
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.hidden_size), requires_grad=False)
229
+
230
+ self.blocks = nn.ModuleList([
231
+ DiTBlock(self.hidden_size, self.num_heads, mlp_ratio=self.mlp_ratio,
232
+ use_ndmlp=self.use_ndmlp, use_variant=self.use_variant)
233
+ for _ in range(self.depth)
234
+ ])
235
+ if self.use_ndmlp:
236
+ approx_gelu = lambda: nn.GELU(approximate="tanh")
237
+ for idx, layer in enumerate(self.blocks):
238
+ if idx % 2 == 0:
239
+ layer.mlp = NdMlp(
240
+ in_features=self.hidden_size,
241
+ hidden_features=self.hidden_size * 4,
242
+ act_layer=approx_gelu,
243
+ drop=0,
244
+ use_variant=self.use_variant
245
+ )
246
+ self.final_layer = FinalLayer(self.hidden_size, self.patch_size, self.out_channels, use_ndadaln=self.ndadaln)
247
+ self.initialize_weights()
248
+
249
+ def initialize_weights(self):
250
+ def _basic_init(module):
251
+ if isinstance(module, nn.Linear):
252
+ torch.nn.init.xavier_uniform_(module.weight)
253
+ if module.bias is not None:
254
+ nn.init.constant_(module.bias, 0)
255
+
256
+ self.apply(_basic_init)
257
+
258
+ pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.x_embedder.num_patches ** 0.5))
259
+ self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
260
+
261
+ w = self.x_embedder.proj.weight.data
262
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
263
+ nn.init.constant_(self.x_embedder.proj.bias, 0)
264
+
265
+ nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02)
266
+
267
+ if not self.use_ndtse:
268
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
269
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
270
+
271
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
272
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0)
273
+ nn.init.constant_(self.final_layer.linear.weight, 0)
274
+ nn.init.constant_(self.final_layer.linear.bias, 0)
275
+
276
+ def unpatchify(self, x):
277
+ c = self.out_channels
278
+ p = self.x_embedder.patch_size[0]
279
+ h = w = int(x.shape[1] ** 0.5)
280
+ assert h * w == x.shape[1]
281
+
282
+ x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
283
+ x = torch.einsum('nhwpqc->nchpwq', x)
284
+ imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p))
285
+ return imgs
286
+
287
+ def ckpt_wrapper(self, module):
288
+ def ckpt_forward(*inputs):
289
+ outputs = module(*inputs)
290
+ return outputs
291
+
292
+ return ckpt_forward
293
+
294
+ def forward(self, x, t, y):
295
+ x = self.x_embedder(x) + self.pos_embed
296
+ t = self.t_embedder(t)
297
+ y = self.y_embedder(y, self.training)
298
+ c = t + y
299
+
300
+ for block in self.blocks:
301
+ x = torch.utils.checkpoint.checkpoint(self.ckpt_wrapper(block), x, c)
302
+ x = self.final_layer(x, c)
303
+ x = self.unpatchify(x)
304
+ return x
305
+
306
+ def forward_with_cfg(self, x, t, y, cfg_scale):
307
+ half = x[: len(x) // 2]
308
+ combined = torch.cat([half, half], dim=0)
309
+ model_out = self.forward(combined, t, y)
310
+ eps, rest = model_out[:, :3], model_out[:, 3:]
311
+ cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
312
+ half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)
313
+ eps = torch.cat([half_eps, half_eps], dim=0)
314
+ return torch.cat([eps, rest], dim=1)
315
+
316
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0):
317
+ grid_h = np.arange(grid_size, dtype=np.float32)
318
+ grid_w = np.arange(grid_size, dtype=np.float32)
319
+ grid = np.meshgrid(grid_w, grid_h)
320
+ grid = np.stack(grid, axis=0)
321
+
322
+ grid = grid.reshape([2, 1, grid_size, grid_size])
323
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
324
+ if cls_token and extra_tokens > 0:
325
+ pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
326
+ return pos_embed
327
+
328
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
329
+ assert embed_dim % 2 == 0
330
+
331
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0])
332
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1])
333
+
334
+ emb = np.concatenate([emb_h, emb_w], axis=1)
335
+ return emb
336
+
337
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
338
+ assert embed_dim % 2 == 0
339
+ omega = np.arange(embed_dim // 2, dtype=np.float64)
340
+ omega /= embed_dim / 2.
341
+ omega = 1. / 10000 ** omega
342
+
343
+ pos = pos.reshape(-1)
344
+ out = np.einsum('m,d->md', pos, omega)
345
+
346
+ emb_sin = np.sin(out)
347
+ emb_cos = np.cos(out)
348
+
349
+ emb = np.concatenate([emb_sin, emb_cos], axis=1)
350
+ return emb
351
+
352
+ def DiT_XL_2(**kwargs):
353
+ config = DiTConfig(depth=28, hidden_size=1152, patch_size=2, num_heads=16, **kwargs)
354
+ return DiT(config)
355
+
356
+ def DiT_XL_4(**kwargs):
357
+ config = DiTConfig(depth=28, hidden_size=1152, patch_size=4, num_heads=16, **kwargs)
358
+ return DiT(config)
359
+
360
+ def DiT_XL_8(**kwargs):
361
+ config = DiTConfig(depth=28, hidden_size=1152, patch_size=8, num_heads=16, **kwargs)
362
+ return DiT(config)
363
+
364
+ def DiT_L_2(**kwargs):
365
+ config = DiTConfig(depth=24, hidden_size=1024, patch_size=2, num_heads=16, **kwargs)
366
+ return DiT(config)
367
+
368
+ def DiT_L_4(**kwargs):
369
+ config = DiTConfig(depth=24, hidden_size=1024, patch_size=4, num_heads=16, **kwargs)
370
+ return DiT(config)
371
+
372
+ def DiT_L_8(**kwargs):
373
+ config = DiTConfig(depth=24, hidden_size=1024, patch_size=8, num_heads=16, **kwargs)
374
+ return DiT(config)
375
+
376
+ def DiT_B_2(**kwargs):
377
+ config = DiTConfig(depth=12, hidden_size=768, patch_size=2, num_heads=12, **kwargs)
378
+ return DiT(config)
379
+
380
+ def DiT_B_4(**kwargs):
381
+ config = DiTConfig(depth=12, hidden_size=768, patch_size=4, num_heads=12, **kwargs)
382
+ return DiT(config)
383
+
384
+ def DiT_B_8(**kwargs):
385
+ config = DiTConfig(depth=12, hidden_size=768, patch_size=8, num_heads=12, **kwargs)
386
+ return DiT(config)
387
+
388
+ def DiT_S_2(**kwargs):
389
+ config = DiTConfig(depth=12, hidden_size=384, patch_size=2, num_heads=6, **kwargs)
390
+ return DiT(config)
391
+
392
+ def DiT_S_4(**kwargs):
393
+ config = DiTConfig(depth=12, hidden_size=384, patch_size=4, num_heads=6, **kwargs)
394
+ return DiT(config)
395
+
396
+ def DiT_S_8(**kwargs):
397
+ config = DiTConfig(depth=12, hidden_size=384, patch_size=8, num_heads=6, **kwargs)
398
+ return DiT(config)
399
+
400
+ def DiT_MS_2(**kwargs):
401
+ config = DiTConfig(depth=6, hidden_size=384, patch_size=2, num_heads=6, **kwargs)
402
+ return DiT(config)
403
+
404
+ def DiT_MS_4(**kwargs):
405
+ config = DiTConfig(depth=6, hidden_size=384, patch_size=4, num_heads=6, **kwargs)
406
+ return DiT(config)
407
+
408
+ def DiT_MS_8(**kwargs):
409
+ config = DiTConfig(depth=6, hidden_size=384, patch_size=8, num_heads=6, **kwargs)
410
+ return DiT(config)
411
+
412
+ def DiT_XS_2(**kwargs):
413
+ config = DiTConfig(depth=1, hidden_size=384, patch_size=2, num_heads=6, **kwargs)
414
+ return DiT(config)
415
+
416
+ def DiT_XS_4(**kwargs):
417
+ config = DiTConfig(depth=1, hidden_size=384, patch_size=4, num_heads=6, **kwargs)
418
+ return DiT(config)
419
+
420
+ def DiT_XS_8(**kwargs):
421
+ config = DiTConfig(depth=1, hidden_size=384, patch_size=8, num_heads=6, **kwargs)
422
+ return DiT(config)
423
+
424
+ DiT_models = {
425
+ 'DiT-XL/2': DiT_XL_2, 'DiT-XL/4': DiT_XL_4, 'DiT-XL/8': DiT_XL_8,
426
+ 'DiT-L/2': DiT_L_2, 'DiT-L/4': DiT_L_4, 'DiT-L/8': DiT_L_8,
427
+ 'DiT-B/2': DiT_B_2, 'DiT-B/4': DiT_B_4, 'DiT-B/8': DiT_B_8,
428
+ 'DiT-S/2': DiT_S_2, 'DiT-S/4': DiT_S_4, 'DiT-S/8': DiT_S_8,
429
+ 'DiT-XS/2': DiT_XS_2, 'DiT-XS/4': DiT_XS_4, 'DiT-XS/8': DiT_XS_8,
430
+ 'DiT-MS/2': DiT_MS_2, 'DiT-MS/4': DiT_MS_4, 'DiT-MS/8': DiT_MS_8
431
+ }
ndlinear.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+
6
+
7
+ class NdLinear(nn.Module):
8
+ def __init__(self, input_dims: tuple, hidden_size: tuple, transform_outer=True, act_func=None, use_bias=True):
9
+ """
10
+ NdLinear: A PyTorch layer for projecting tensors into multi-space representations.
11
+
12
+ Unlike conventional embedding layers that map into a single vector space, NdLinear
13
+ transforms tensors across a collection of vector spaces, capturing multivariate structure
14
+ and topical information that standard deep learning architectures typically lose.
15
+
16
+ Args:
17
+ input_dims (tuple): Shape of input tensor (excluding batch dimension).
18
+ hidden_size (tuple): Target hidden dimensions after transformation.
19
+ """
20
+ super(NdLinear, self).__init__()
21
+
22
+ if len(input_dims) != len(hidden_size):
23
+ raise Exception("Input shape and hidden shape do not match.")
24
+
25
+ self.input_dims = input_dims
26
+ self.hidden_size = hidden_size
27
+ self.num_layers = len(input_dims) # Must match since dims are equal
28
+ # Custom activation function. Default to Identity -> Do nothing.
29
+ self.act_func = act_func if act_func is not None else nn.Identity()
30
+ self.transform_outer = transform_outer
31
+
32
+ # Define transformation layers per dimension
33
+ self.align_layers = nn.ModuleList([
34
+ nn.Linear(input_dims[i], hidden_size[i], bias=use_bias) for i in range(self.num_layers)
35
+ ])
36
+ self.initialize_weights()
37
+
38
+
39
+ def initialize_weights(self, mean=0.0, std=0.02):
40
+ for layer in self.align_layers:
41
+ nn.init.normal_(layer.weight, mean=mean, std=std)
42
+ if layer.bias is not None:
43
+ nn.init.constant_(layer.bias, 0)
44
+
45
+
46
+ def forward(self, X):
47
+ """
48
+ Forward pass to project input tensor into a new multi-space representation.
49
+ - Incrementally transposes, flattens, applies linear layers, and restores shape.
50
+
51
+ Expected Input Shape: [batch_size, *input_dims]
52
+ Output Shape: [batch_size, *hidden_size]
53
+
54
+ Args:
55
+ X (torch.Tensor): Input tensor with shape [batch_size, *input_dims]
56
+
57
+ Returns:
58
+ torch.Tensor: Output tensor with shape [batch_size, *hidden_size]
59
+ """
60
+ num_transforms = self.num_layers # Number of transformations
61
+
62
+ # Define iteration order
63
+ # transform_indices = range(num_transforms) if transform_outer else reversed(range(num_transforms))
64
+
65
+ for i in range(num_transforms):
66
+ if self.transform_outer:
67
+ layer = self.align_layers[i]
68
+ transpose_dim = i + 1
69
+ else:
70
+ layer = self.align_layers[num_transforms - (i+1)]
71
+ transpose_dim = num_transforms - i
72
+
73
+ # Transpose the selected dimension to the last position
74
+ X = torch.transpose(X, transpose_dim, num_transforms).contiguous()
75
+
76
+ # Store original shape before transformation
77
+ X_size = X.shape[:-1]
78
+
79
+ # Flatten everything except the last dimension
80
+ X = X.view(-1, X.shape[-1])
81
+
82
+ # Apply transformation
83
+ X = self.act_func(layer(X))
84
+
85
+ # Reshape back to the original spatial structure (with new embedding dim)
86
+ X = X.view(*X_size, X.shape[-1])
87
+
88
+ # Transpose the dimension back to its original position
89
+ X = torch.transpose(X, transpose_dim, num_transforms).contiguous()
90
+
91
+ return X