Luffuly commited on
Commit
bd5376c
·
verified ·
1 Parent(s): 074f529
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 224
26
+ }
27
+ }
image_encoder/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Luffuly/unique3d-mvimage-diffuser",
3
+ "architectures": [
4
+ "CLIPVisionModelWithProjection"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "dropout": 0.0,
8
+ "hidden_act": "quick_gelu",
9
+ "hidden_size": 1024,
10
+ "image_size": 224,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-05,
15
+ "model_type": "clip_vision_model",
16
+ "num_attention_heads": 16,
17
+ "num_channels": 3,
18
+ "num_hidden_layers": 24,
19
+ "patch_size": 14,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.45.2"
23
+ }
image_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dd0c1777bb75e2e9b7aa29b799b3ee8ebaaaa731e2c471a9ec589f12542cce7
3
+ size 607980096
model_index.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "MVDiffusionImagePipeline",
3
+ "_diffusers_version": "0.30.3",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPImageProcessor"
7
+ ],
8
+ "image_encoder": [
9
+ "transformers",
10
+ "CLIPVisionModelWithProjection"
11
+ ],
12
+ "requires_safety_checker": true,
13
+ "safety_checker": [
14
+ null,
15
+ null
16
+ ],
17
+ "scheduler": [
18
+ "diffusers",
19
+ "DDIMScheduler"
20
+ ],
21
+ "unet": [
22
+ "mv_unet",
23
+ "UnifieldWrappedUNet"
24
+ ],
25
+ "vae": [
26
+ "diffusers",
27
+ "AutoencoderKL"
28
+ ]
29
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.30.3",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "skip_prk_steps": true,
16
+ "steps_offset": 1,
17
+ "thresholding": false,
18
+ "timestep_spacing": "leading",
19
+ "trained_betas": null
20
+ }
unet/config.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UnifieldWrappedUNet",
3
+ "_diffusers_version": "0.30.3",
4
+ "_name_or_path": "outputs/vroid-mvimage-6view/checkpoint",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "addition_time_embed_dim": null,
9
+ "attention_head_dim": 8,
10
+ "attention_type": "default",
11
+ "block_out_channels": [
12
+ 320,
13
+ 640,
14
+ 1280,
15
+ 1280
16
+ ],
17
+ "center_input_sample": false,
18
+ "class_embed_type": null,
19
+ "class_embeddings_concat": false,
20
+ "conv_in_kernel": 3,
21
+ "conv_out_kernel": 3,
22
+ "cross_attention_dim": 768,
23
+ "cross_attention_norm": null,
24
+ "down_block_types": [
25
+ "CrossAttnDownBlock2D",
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "DownBlock2D"
29
+ ],
30
+ "downsample_padding": 1,
31
+ "dropout": 0.0,
32
+ "dual_cross_attention": false,
33
+ "encoder_hid_dim": null,
34
+ "encoder_hid_dim_type": null,
35
+ "flip_sin_to_cos": true,
36
+ "freq_shift": 0,
37
+ "in_channels": 8,
38
+ "layers_per_block": 2,
39
+ "mid_block_only_cross_attention": null,
40
+ "mid_block_scale_factor": 1,
41
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
42
+ "norm_eps": 1e-05,
43
+ "norm_num_groups": 32,
44
+ "num_attention_heads": null,
45
+ "num_class_embeds": 8,
46
+ "only_cross_attention": false,
47
+ "out_channels": 4,
48
+ "projection_class_embeddings_input_dim": null,
49
+ "resnet_out_scale_factor": 1.0,
50
+ "resnet_skip_time_act": false,
51
+ "resnet_time_scale_shift": "default",
52
+ "reverse_transformer_layers_per_block": null,
53
+ "sample_size": 64,
54
+ "time_cond_proj_dim": null,
55
+ "time_embedding_act_fn": null,
56
+ "time_embedding_dim": null,
57
+ "time_embedding_type": "positional",
58
+ "timestep_post_act": null,
59
+ "transformer_layers_per_block": 1,
60
+ "up_block_types": [
61
+ "UpBlock2D",
62
+ "CrossAttnUpBlock2D",
63
+ "CrossAttnUpBlock2D",
64
+ "CrossAttnUpBlock2D"
65
+ ],
66
+ "n_views": 6,
67
+ "upcast_attention": false,
68
+ "use_linear_projection": false
69
+ }
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1134886018ecde11b10f01ae71557789b24762d0c9263524c3fb2bfeff99f02
3
+ size 3438254688
unet/mv_unet.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Optional, Tuple, Union
3
+ from diffusers import UNet2DConditionModel
4
+ from diffusers.models.attention_processor import Attention
5
+ from diffusers.models.unets.unet_2d_condition import UNet2DConditionOutput
6
+
7
+
8
+ def switch_multiview_processor(model, enable_filter=lambda x:True):
9
+ def recursive_add_processors(name: str, module: torch.nn.Module):
10
+ for sub_name, child in module.named_children():
11
+ recursive_add_processors(f"{name}.{sub_name}", child)
12
+
13
+ if isinstance(module, Attention):
14
+ processor = module.get_processor()
15
+ if isinstance(processor, multiviewAttnProc):
16
+ processor.enabled = enable_filter(f"{name}.processor")
17
+
18
+ for name, module in model.named_children():
19
+ recursive_add_processors(name, module)
20
+
21
+
22
+ def add_multiview_processor(model: torch.nn.Module, enable_filter=lambda x:True, **kwargs):
23
+ return_dict = torch.nn.ModuleDict()
24
+ def recursive_add_processors(name: str, module: torch.nn.Module):
25
+ for sub_name, child in module.named_children():
26
+ if "ref_unet" not in (sub_name + name):
27
+ recursive_add_processors(f"{name}.{sub_name}", child)
28
+
29
+ if isinstance(module, Attention):
30
+ new_processor = multiviewAttnProc(
31
+ chained_proc=module.get_processor(),
32
+ enabled=enable_filter(f"{name}.processor"),
33
+ name=f"{name}.processor",
34
+ hidden_states_dim=module.inner_dim,
35
+ **kwargs
36
+ )
37
+ module.set_processor(new_processor)
38
+ return_dict[f"{name}.processor".replace(".", "__")] = new_processor
39
+
40
+ for name, module in model.named_children():
41
+ recursive_add_processors(name, module)
42
+
43
+ return return_dict
44
+
45
+
46
+ class multiviewAttnProc(torch.nn.Module):
47
+ def __init__(
48
+ self,
49
+ chained_proc,
50
+ enabled=False,
51
+ name=None,
52
+ hidden_states_dim=None,
53
+ chain_pos="parralle", # before or parralle or after
54
+ num_modalities=1,
55
+ views=4,
56
+ base_img_size=64,
57
+ ) -> None:
58
+ super().__init__()
59
+ self.enabled = enabled
60
+ self.chained_proc = chained_proc
61
+ self.name = name
62
+ self.hidden_states_dim = hidden_states_dim
63
+ self.num_modalities = num_modalities
64
+ self.views = views
65
+ self.base_img_size = base_img_size
66
+ self.chain_pos = chain_pos
67
+ self.diff_joint_attn = True
68
+
69
+ def __call__(
70
+ self,
71
+ attn: Attention,
72
+ hidden_states: torch.FloatTensor,
73
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
74
+ attention_mask: Optional[torch.FloatTensor] = None,
75
+ **kwargs
76
+ ) -> torch.Tensor:
77
+ if not self.enabled:
78
+ return self.chained_proc(attn, hidden_states, encoder_hidden_states, attention_mask, **kwargs)
79
+
80
+ B, L, C = hidden_states.shape
81
+ mv = self.views
82
+ hidden_states = hidden_states.reshape(B // mv, mv, L, C).reshape(-1, mv * L, C)
83
+ hidden_states = self.chained_proc(attn, hidden_states, encoder_hidden_states, attention_mask, **kwargs)
84
+ return hidden_states.reshape(B // mv, mv, L, C).reshape(-1, L, C)
85
+
86
+
87
+
88
+ class UnifieldWrappedUNet(UNet2DConditionModel):
89
+ def __init__(
90
+ self,
91
+ sample_size: Optional[int] = None,
92
+ in_channels: int = 4,
93
+ out_channels: int = 4,
94
+ center_input_sample: bool = False,
95
+ flip_sin_to_cos: bool = True,
96
+ freq_shift: int = 0,
97
+ down_block_types: Tuple[str] = (
98
+ "CrossAttnDownBlock2D",
99
+ "CrossAttnDownBlock2D",
100
+ "CrossAttnDownBlock2D",
101
+ "DownBlock2D",
102
+ ),
103
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
104
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
105
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
106
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
107
+ layers_per_block: Union[int, Tuple[int]] = 2,
108
+ downsample_padding: int = 1,
109
+ mid_block_scale_factor: float = 1,
110
+ dropout: float = 0.0,
111
+ act_fn: str = "silu",
112
+ norm_num_groups: Optional[int] = 32,
113
+ norm_eps: float = 1e-5,
114
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
115
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
116
+ reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
117
+ encoder_hid_dim: Optional[int] = None,
118
+ encoder_hid_dim_type: Optional[str] = None,
119
+ attention_head_dim: Union[int, Tuple[int]] = 8,
120
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
121
+ dual_cross_attention: bool = False,
122
+ use_linear_projection: bool = False,
123
+ class_embed_type: Optional[str] = None,
124
+ addition_embed_type: Optional[str] = None,
125
+ addition_time_embed_dim: Optional[int] = None,
126
+ num_class_embeds: Optional[int] = None,
127
+ upcast_attention: bool = False,
128
+ resnet_time_scale_shift: str = "default",
129
+ resnet_skip_time_act: bool = False,
130
+ resnet_out_scale_factor: float = 1.0,
131
+ time_embedding_type: str = "positional",
132
+ time_embedding_dim: Optional[int] = None,
133
+ time_embedding_act_fn: Optional[str] = None,
134
+ timestep_post_act: Optional[str] = None,
135
+ time_cond_proj_dim: Optional[int] = None,
136
+ conv_in_kernel: int = 3,
137
+ conv_out_kernel: int = 3,
138
+ projection_class_embeddings_input_dim: Optional[int] = None,
139
+ attention_type: str = "default",
140
+ class_embeddings_concat: bool = False,
141
+ mid_block_only_cross_attention: Optional[bool] = None,
142
+ cross_attention_norm: Optional[str] = None,
143
+ addition_embed_type_num_heads: int = 64,
144
+ multiview_attn_position: str = "attn1",
145
+ n_views: int = 4,
146
+ num_modalities: int = 1,
147
+ latent_size: int = 64,
148
+ multiview_chain_pose: str = "parralle",
149
+ **kwargs
150
+ ):
151
+ super().__init__(**{
152
+ k: v for k, v in locals().items() if k not in
153
+ ["self", "kwargs", "__class__", "multiview_attn_position", "n_views", "num_modalities", "latent_size", "multiview_chain_pose"]
154
+ })
155
+ add_multiview_processor(
156
+ model = self,
157
+ enable_filter = lambda name: name.endswith(f"{multiview_attn_position}.processor"),
158
+ num_modalities = num_modalities,
159
+ base_img_size = latent_size,
160
+ chain_pos = multiview_chain_pose,
161
+ views=n_views
162
+ )
163
+
164
+ switch_multiview_processor(self, enable_filter=lambda name: name.endswith(f"{multiview_attn_position}.processor"))
165
+
166
+ def __call__(
167
+ self,
168
+ sample: torch.Tensor,
169
+ timestep: Union[torch.Tensor, float, int],
170
+ encoder_hidden_states: torch.Tensor,
171
+ condition_latens: torch.Tensor = None,
172
+ class_labels: Optional[torch.Tensor] = None,
173
+ ) -> Union[UNet2DConditionOutput, Tuple]:
174
+ sample = torch.cat([sample, condition_latens], dim=1)
175
+ return self.forward(
176
+ sample, timestep, encoder_hidden_states, class_labels=class_labels,
177
+ )
vae/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.30.3",
4
+ "_name_or_path": "Luffuly/unique3d-mvimage-diffuser",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "latents_mean": null,
22
+ "latents_std": null,
23
+ "layers_per_block": 2,
24
+ "mid_block_add_attention": true,
25
+ "norm_num_groups": 32,
26
+ "out_channels": 3,
27
+ "sample_size": 256,
28
+ "scaling_factor": 0.18215,
29
+ "shift_factor": null,
30
+ "up_block_types": [
31
+ "UpDecoderBlock2D",
32
+ "UpDecoderBlock2D",
33
+ "UpDecoderBlock2D",
34
+ "UpDecoderBlock2D"
35
+ ],
36
+ "use_post_quant_conv": true,
37
+ "use_quant_conv": true
38
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e4c08995484ee61270175e9e7a072b66a6e4eeb5f0c266667fe1f45b90daf9a
3
+ size 167335342