tuandunghcmut commited on
Commit
c7d7da9
·
verified ·
1 Parent(s): dd09ef3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. DeepSeek-VL2/deepseek_vl2/__pycache__/__init__.cpython-311.pyc +0 -0
  2. DeepSeek-VL2/deepseek_vl2/__pycache__/__init__.cpython-312.pyc +0 -0
  3. DeepSeek-VL2/deepseek_vl2/models/__init__.py +26 -0
  4. DeepSeek-VL2/deepseek_vl2/models/__pycache__/__init__.cpython-39.pyc +0 -0
  5. DeepSeek-VL2/deepseek_vl2/models/__pycache__/configuration_deepseek.cpython-312.pyc +0 -0
  6. DeepSeek-VL2/deepseek_vl2/models/__pycache__/conversation.cpython-310.pyc +0 -0
  7. DeepSeek-VL2/deepseek_vl2/models/configuration_deepseek.py +210 -0
  8. DeepSeek-VL2/deepseek_vl2/models/conversation.py +310 -0
  9. DeepSeek-VL2/deepseek_vl2/models/processing_deepseek_vl_v2.py +675 -0
  10. DeepSeek-VL2/deepseek_vl2/serve/__init__.py +0 -0
  11. DeepSeek-VL2/images/multi_image_2.jpeg +0 -0
  12. EAGLE/README.md +355 -0
  13. EAGLE/evaluate_lmms_eval.py +345 -0
  14. Emu/README.md +55 -0
  15. LLM2CLIP/CODE_OF_CONDUCT.md +9 -0
  16. LLaVA/.gitattributes +29 -0
  17. LLaVA/.gitignore +35 -0
  18. PaddleMIX/.gitmodules +3 -0
  19. PaddleMIX/CITATION.cff +15 -0
  20. PaddleMIX/README.md +413 -0
  21. PaddleMIX/paddlemix_applications.md +244 -0
  22. VILA/Dockerfile +18 -0
  23. VILA/server.py +254 -0
  24. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/commands/env.py +82 -0
  25. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/experimental/README.md +5 -0
  26. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/experimental/__init__.py +15 -0
  27. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/__init__.py +54 -0
  28. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/deprecate.py +63 -0
  29. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/ip_adapter.py +196 -0
  30. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/lora.py +1871 -0
  31. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/lora_conversion_utils.py +283 -0
  32. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/single_file.py +766 -0
  33. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/textual_inversion.py +449 -0
  34. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/unet.py +830 -0
  35. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/utils.py +121 -0
  36. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/__init__.py +113 -0
  37. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/activations.py +123 -0
  38. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/adapter.py +583 -0
  39. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/attention.py +721 -0
  40. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/autoencoder_tiny.py +363 -0
  41. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/controlnet_sd3.py +422 -0
  42. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/dit_llama.py +576 -0
  43. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/dual_transformer_2d.py +158 -0
  44. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/ema.py +109 -0
  45. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/lvdm_aemodules3d.py +219 -0
  46. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/lvdm_distributions.py +100 -0
  47. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/lvdm_unet_3d.py +713 -0
  48. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/modeling_outputs.py +44 -0
  49. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/modelscope_autoencoder_img2vid.py +487 -0
  50. VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/paddleinfer_runtime.py +399 -0
DeepSeek-VL2/deepseek_vl2/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (659 Bytes). View file
 
DeepSeek-VL2/deepseek_vl2/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (609 Bytes). View file
 
DeepSeek-VL2/deepseek_vl2/models/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023-2024 DeepSeek.
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of
4
+ # this software and associated documentation files (the "Software"), to deal in
5
+ # the Software without restriction, including without limitation the rights to
6
+ # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
7
+ # the Software, and to permit persons to whom the Software is furnished to do so,
8
+ # subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in all
11
+ # copies or substantial portions of the Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
15
+ # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
16
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
17
+ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19
+
20
+ from .processing_deepseek_vl_v2 import DeepseekVLV2Processor
21
+ from .modeling_deepseek_vl_v2 import DeepseekVLV2ForCausalLM
22
+
23
+ __all__ = [
24
+ "DeepseekVLV2Processor",
25
+ "DeepseekVLV2ForCausalLM",
26
+ ]
DeepSeek-VL2/deepseek_vl2/models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (346 Bytes). View file
 
DeepSeek-VL2/deepseek_vl2/models/__pycache__/configuration_deepseek.cpython-312.pyc ADDED
Binary file (10.2 kB). View file
 
DeepSeek-VL2/deepseek_vl2/models/__pycache__/conversation.cpython-310.pyc ADDED
Binary file (6.41 kB). View file
 
DeepSeek-VL2/deepseek_vl2/models/configuration_deepseek.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+ logger = logging.get_logger(__name__)
5
+
6
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
7
+ class DeepseekV2Config(PretrainedConfig):
8
+ r"""
9
+ This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate an DeepSeek
10
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
11
+ defaults will yield a similar configuration to that of the DeepSeek-V2 with multi-latent attention.
12
+
13
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
14
+ documentation from [`PretrainedConfig`] for more information.
15
+
16
+
17
+ Args:
18
+ vocab_size (`int`, *optional*, defaults to 102400):
19
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
20
+ `inputs_ids` passed when calling [`DeepseekV2Model`]
21
+ hidden_size (`int`, *optional*, defaults to 4096):
22
+ Dimension of the hidden representations.
23
+ intermediate_size (`int`, *optional*, defaults to 11008):
24
+ Dimension of the MLP representations.
25
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
26
+ Dimension of the MoE representations.
27
+ num_hidden_layers (`int`, *optional*, defaults to 32):
28
+ Number of hidden layers in the Transformer decoder.
29
+ num_attention_heads (`int`, *optional*, defaults to 32):
30
+ Number of attention heads for each attention layer in the Transformer decoder.
31
+ n_shared_experts (`int`, *optional*, defaults to None):
32
+ Number of shared experts, None means dense model.
33
+ n_routed_experts (`int`, *optional*, defaults to None):
34
+ Number of routed experts, None means dense model.
35
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
36
+ Scaling factor or routed experts.
37
+ topk_method (`str`, *optional*, defaults to `gready`):
38
+ Topk method used in routed gate.
39
+ n_group (`int`, *optional*, defaults to None):
40
+ Number of groups for routed experts.
41
+ topk_group (`int`, *optional*, defaults to None):
42
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
43
+ num_experts_per_tok (`int`, *optional*, defaults to None):
44
+ Number of selected experts, None means dense model.
45
+ moe_layer_freq (`int`, *optional*, defaults to 1):
46
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
47
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
48
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
49
+ \--k dense layers--/
50
+ norm_topk_prob (`bool`, *optional*, defaults to False):
51
+ Whether to normalize the weights of the routed experts.
52
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
53
+ Method of computing expert weights.
54
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
55
+ Auxiliary loss weight coefficient.
56
+ seq_aux = (`bool`, *optional*, defaults to True):
57
+ Whether to compute the auxiliary loss for each individual sample.
58
+ num_key_value_heads (`int`, *optional*):
59
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
60
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
61
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
62
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
63
+ by meanpooling all the original heads within that group. For more details checkout [this
64
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
65
+ `num_attention_heads`.
66
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
67
+ The non-linear activation function (function or string) in the decoder.
68
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
69
+ The maximum sequence length that this model might ever be used with.
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
73
+ The epsilon used by the rms normalization layers.
74
+ use_cache (`bool`, *optional*, defaults to `True`):
75
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
76
+ relevant if `config.is_decoder=True`.
77
+ pad_token_id (`int`, *optional*):
78
+ Padding token id.
79
+ bos_token_id (`int`, *optional*, defaults to 1):
80
+ Beginning of stream token id.
81
+ eos_token_id (`int`, *optional*, defaults to 2):
82
+ End of stream token id.
83
+ pretraining_tp (`int`, *optional*, defaults to 1):
84
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
85
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
86
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
87
+ issue](https://github.com/pytorch/pytorch/issues/76232).
88
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
89
+ Whether to tie weight embeddings
90
+ rope_theta (`float`, *optional*, defaults to 10000.0):
91
+ The base period of the RoPE embeddings.
92
+ rope_scaling (`Dict`, *optional*):
93
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
94
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
95
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
96
+ `max_position_embeddings` to the expected new maximum.
97
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
98
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
99
+ attention_dropout (`float`, *optional*, defaults to 0.0):
100
+ The dropout ratio for the attention probabilities.
101
+ use_mla (`bool`, *optional*, defaults to `True`): Use multi-latent attention or multi-head attention. If True,
102
+ the model will use multi-latent attention, otherwise, it will use multi-head attention.
103
+
104
+ ```python
105
+ >>> from transformers import DeepseekV2Model, DeepseekV2Config
106
+
107
+ >>> # Initializing a Deepseek-V2 style configuration
108
+ >>> configuration = DeepseekV2Config()
109
+
110
+ >>> # Accessing the model configuration
111
+ >>> configuration = model.config
112
+ ```"""
113
+
114
+ model_type = "deepseek_v2"
115
+ keys_to_ignore_at_inference = ["past_key_values"]
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=102400,
120
+ hidden_size=4096,
121
+ intermediate_size=11008,
122
+ moe_intermediate_size = 1407,
123
+ num_hidden_layers=30,
124
+ num_attention_heads=32,
125
+ num_key_value_heads=32,
126
+ n_shared_experts = None,
127
+ n_routed_experts = None,
128
+ ep_size = 1,
129
+ routed_scaling_factor = 1.0,
130
+ kv_lora_rank = 512,
131
+ q_lora_rank = 1536,
132
+ qk_rope_head_dim = 64,
133
+ v_head_dim = 128,
134
+ qk_nope_head_dim = 128,
135
+ topk_method = 'gready',
136
+ n_group = None,
137
+ topk_group = None,
138
+ num_experts_per_tok = None,
139
+ moe_layer_freq = 1,
140
+ first_k_dense_replace = 0,
141
+ norm_topk_prob = False,
142
+ scoring_func = 'softmax',
143
+ aux_loss_alpha = 0.001,
144
+ seq_aux = True,
145
+ hidden_act="silu",
146
+ max_position_embeddings=2048,
147
+ initializer_range=0.02,
148
+ rms_norm_eps=1e-6,
149
+ use_cache=True,
150
+ pad_token_id=None,
151
+ bos_token_id=100000,
152
+ eos_token_id=100001,
153
+ pretraining_tp=1,
154
+ tie_word_embeddings=False,
155
+ rope_theta=10000.0,
156
+ rope_scaling=None,
157
+ attention_bias=False,
158
+ attention_dropout=0.0,
159
+ use_mla=True,
160
+ **kwargs,
161
+ ):
162
+ self.vocab_size = vocab_size
163
+ self.max_position_embeddings = max_position_embeddings
164
+ self.hidden_size = hidden_size
165
+ self.intermediate_size = intermediate_size
166
+ self.moe_intermediate_size = moe_intermediate_size
167
+ self.num_hidden_layers = num_hidden_layers
168
+ self.num_attention_heads = num_attention_heads
169
+ self.n_shared_experts = n_shared_experts
170
+ self.n_routed_experts = n_routed_experts
171
+ self.ep_size = ep_size
172
+ self.routed_scaling_factor = routed_scaling_factor
173
+ self.kv_lora_rank = kv_lora_rank
174
+ self.q_lora_rank = q_lora_rank
175
+ self.qk_rope_head_dim = qk_rope_head_dim
176
+ self.v_head_dim = v_head_dim
177
+ self.qk_nope_head_dim = qk_nope_head_dim
178
+ self.topk_method = topk_method
179
+ self.n_group = n_group
180
+ self.topk_group = topk_group
181
+ self.num_experts_per_tok = num_experts_per_tok
182
+ self.moe_layer_freq = moe_layer_freq
183
+ self.first_k_dense_replace = first_k_dense_replace
184
+ self.norm_topk_prob = norm_topk_prob
185
+ self.scoring_func = scoring_func
186
+ self.aux_loss_alpha = aux_loss_alpha
187
+ self.seq_aux = seq_aux
188
+ # for backward compatibility
189
+ if num_key_value_heads is None:
190
+ num_key_value_heads = num_attention_heads
191
+
192
+ self.num_key_value_heads = num_key_value_heads
193
+ self.hidden_act = hidden_act
194
+ self.initializer_range = initializer_range
195
+ self.rms_norm_eps = float(rms_norm_eps)
196
+ self.pretraining_tp = pretraining_tp
197
+ self.use_cache = use_cache
198
+ self.rope_theta = rope_theta
199
+ self.rope_scaling = rope_scaling
200
+ self.attention_bias = attention_bias
201
+ self.attention_dropout = attention_dropout
202
+ self.use_mla = use_mla
203
+
204
+ super().__init__(
205
+ pad_token_id=pad_token_id,
206
+ bos_token_id=bos_token_id,
207
+ eos_token_id=eos_token_id,
208
+ tie_word_embeddings=tie_word_embeddings,
209
+ **kwargs,
210
+ )
DeepSeek-VL2/deepseek_vl2/models/conversation.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ From https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
3
+ """
4
+
5
+ import dataclasses
6
+ from enum import IntEnum, auto
7
+ from typing import Any, Dict, List
8
+
9
+
10
+ class SeparatorStyle(IntEnum):
11
+ """Separator styles."""
12
+
13
+ DeepSeek = auto()
14
+ DeepSeekV2 = auto()
15
+ PLAIN = auto()
16
+ ALIGNMENT = auto()
17
+
18
+
19
+ @dataclasses.dataclass
20
+ class Conversation:
21
+ """A class that manages prompt templates and keeps all conversation history."""
22
+
23
+ # The name of this template
24
+ name: str
25
+ # The template of the system prompt
26
+ system_template: str = "{system_message}"
27
+ # The system message
28
+ system_message: str = ""
29
+ # The names of two roles
30
+ roles: List[str] = (("USER", "ASSISTANT"),)
31
+ # All messages. Each item is (role, message).
32
+ messages: List[List[str]] = ()
33
+ # The number of few shot examples
34
+ offset: int = 0
35
+ # The separator style and configurations
36
+ sep_style: SeparatorStyle = SeparatorStyle.DeepSeek
37
+ sep: str = "\n"
38
+ sep2: str = None
39
+ # Stop criteria (the default one is EOS token)
40
+ stop_str: str = None
41
+ # Stops generation if meeting any token in this list
42
+ stop_token_ids: List[int] = None
43
+
44
+ def get_prompt(self) -> str:
45
+ """Get the prompt for generation."""
46
+ system_prompt = self.system_template.format(system_message=self.system_message)
47
+ if self.sep_style == SeparatorStyle.DeepSeek:
48
+ seps = [self.sep, self.sep2]
49
+ if system_prompt == "" or system_prompt is None:
50
+ ret = ""
51
+ else:
52
+ ret = system_prompt + seps[0]
53
+ for i, (role, message) in enumerate(self.messages):
54
+ if message:
55
+ ret += role + ": " + message + seps[i % 2]
56
+ else:
57
+ ret += role + ":"
58
+ return ret
59
+ elif self.sep_style == SeparatorStyle.DeepSeekV2:
60
+ seps = [self.sep, self.sep2]
61
+ if system_prompt == "" or system_prompt is None:
62
+ ret = ""
63
+ else:
64
+ ret = system_prompt + seps[0]
65
+ for i, (role, message) in enumerate(self.messages):
66
+ if message:
67
+ if role == "User":
68
+ ret += "<|sft▁begin|>\n" + message + self.sep #<|sft▁begin|>User Input<|sft▁end|>\nResponse<|end▁of▁sentence|>
69
+ else:
70
+ ret += message + self.sep2
71
+ else:
72
+ ret = ret
73
+ return ret
74
+
75
+ elif self.sep_style == SeparatorStyle.PLAIN:
76
+ seps = [self.sep, self.sep2]
77
+ ret = ""
78
+ for i, (role, message) in enumerate(self.messages):
79
+ if message:
80
+ if type(message) is tuple:
81
+ message, _, _ = message
82
+ if i % 2 == 0:
83
+ ret += message + seps[i % 2]
84
+ else:
85
+ ret += message + seps[i % 2]
86
+ else:
87
+ ret += ""
88
+ return ret
89
+ elif self.sep_style == SeparatorStyle.ALIGNMENT:
90
+ seps = [self.sep, self.sep2]
91
+ ret = ""
92
+ for i, (role, message) in enumerate(self.messages):
93
+ if message:
94
+ if type(message) is tuple:
95
+ message, _, _ = message
96
+ if i % 2 == 0:
97
+ ret += '<image>\n' + seps[i % 2]
98
+ else:
99
+ ret += message + seps[i % 2]
100
+ else:
101
+ ret += ""
102
+ return ret
103
+ else:
104
+ raise ValueError(f"Invalid style: {self.sep_style}")
105
+
106
+ def set_system_message(self, system_message: str):
107
+ """Set the system message."""
108
+ self.system_message = system_message
109
+
110
+ def append_message(self, role: str, message: str):
111
+ """Append a new message."""
112
+ self.messages.append([role, message])
113
+
114
+ def update_last_message(self, message: str):
115
+ """Update the last output.
116
+
117
+ The last message is typically set to be None when constructing the prompt,
118
+ so we need to update it in-place after getting the response from a model.
119
+ """
120
+ self.messages[-1][1] = message
121
+
122
+ def reset_message(self):
123
+ """Reset a new message."""
124
+ self.messages = []
125
+
126
+ def to_gradio_chatbot(self):
127
+ """Convert the conversation to gradio chatbot format."""
128
+ ret = []
129
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
130
+ if i % 2 == 0:
131
+ ret.append([msg, None])
132
+ else:
133
+ ret[-1][-1] = msg
134
+ return ret
135
+
136
+ def to_openai_api_messages(self):
137
+ """Convert the conversation to OpenAI chat completion format."""
138
+ system_prompt = self.system_template.format(system_message=self.system_message)
139
+ ret = [{"role": "system", "content": system_prompt}]
140
+
141
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
142
+ if i % 2 == 0:
143
+ ret.append({"role": "user", "content": msg})
144
+ else:
145
+ if msg is not None:
146
+ ret.append({"role": "assistant", "content": msg})
147
+ return ret
148
+
149
+ def copy(self):
150
+ return Conversation(
151
+ name=self.name,
152
+ system_template=self.system_template,
153
+ system_message=self.system_message,
154
+ roles=self.roles,
155
+ messages=[[x, y] for x, y in self.messages],
156
+ offset=self.offset,
157
+ sep_style=self.sep_style,
158
+ sep=self.sep,
159
+ sep2=self.sep2,
160
+ stop_str=self.stop_str,
161
+ stop_token_ids=self.stop_token_ids,
162
+ )
163
+
164
+ def dict(self):
165
+ return {
166
+ "template_name": self.name,
167
+ "system_message": self.system_message,
168
+ "roles": self.roles,
169
+ "messages": self.messages,
170
+ "offset": self.offset,
171
+ }
172
+
173
+
174
+ # A global registry for all conversation templates
175
+ conv_templates: Dict[str, Conversation] = {}
176
+
177
+
178
+ def register_conv_template(template: Conversation, override: bool = False):
179
+ """Register a new conversation template."""
180
+ if not override:
181
+ assert template.name not in conv_templates, f"{template.name} has been registered."
182
+
183
+ conv_templates[template.name] = template
184
+
185
+
186
+ def get_conv_template(name: str) -> Conversation:
187
+ """Get a conversation template."""
188
+ return conv_templates[name].copy()
189
+
190
+
191
+ # register_conv_template(
192
+ # Conversation(
193
+ # name="deepseek",
194
+ # system_template="{system_message}",
195
+ # # system_message="You are a helpful assistant. Please answer truthfully and write out your "
196
+ # # "thinking step by step to be sure you get the right answer.",
197
+ # system_message="",
198
+ # roles=("User", "Assistant"),
199
+ # messages=(),
200
+ # offset=0,
201
+ # sep_style=SeparatorStyle.DeepSeek,
202
+ # sep="\n\n",
203
+ # sep2="<|end▁of▁sentence|>",
204
+ # stop_token_ids=[100001],
205
+ # stop_str=["User:", "<|end▁of▁sentence|>"]
206
+ # )
207
+ # )
208
+ register_conv_template(
209
+ Conversation(
210
+ name="deepseek",
211
+ system_template="{system_message}",
212
+ # system_message="You are a helpful assistant. Please answer truthfully and write out your "
213
+ # "thinking step by step to be sure you get the right answer.",
214
+ system_message="",
215
+ roles=("<|User|>", "<|Assistant|>"),
216
+ messages=(),
217
+ offset=0,
218
+ sep_style=SeparatorStyle.DeepSeek,
219
+ sep="\n\n",
220
+ sep2="<|end▁of▁sentence|>",
221
+ stop_token_ids=[100001],
222
+ stop_str=["User:", "<|end▁of▁sentence|>"]
223
+ )
224
+ )
225
+ # register_conv_template(
226
+ # Conversation(
227
+ # name="deepseekv2",
228
+ # system_template="{system_message}",
229
+ # system_message="",
230
+ # roles=("User", "Assistant"),
231
+ # messages=(),
232
+ # offset=0,
233
+ # sep_style=SeparatorStyle.DeepSeekV2,
234
+ # sep="\n<|sft▁end|>",
235
+ # sep2="<|end▁of▁sentence|>",
236
+ # stop_token_ids=[100001],
237
+ # stop_str=["User:", "<|end▁of▁sentence|>"]
238
+ # )
239
+ # )
240
+ register_conv_template(
241
+ Conversation(
242
+ name="deepseekv2",
243
+ system_template="{system_message}",
244
+ system_message="",
245
+ roles=("|<User>|", "|<Assistant>|"),
246
+ messages=(),
247
+ offset=0,
248
+ sep_style=SeparatorStyle.DeepSeekV2,
249
+ sep="\n<|sft▁end|>",
250
+ sep2="<|end▁of▁sentence|>",
251
+ stop_token_ids=[100001],
252
+ stop_str=["User:", "<|end▁of▁sentence|>"]
253
+ )
254
+ )
255
+
256
+
257
+ register_conv_template(
258
+ Conversation(
259
+ name="plain",
260
+ system_template="",
261
+ system_message="",
262
+ roles=("", ""),
263
+ messages=(),
264
+ offset=0,
265
+ sep_style=SeparatorStyle.PLAIN,
266
+ sep="",
267
+ sep2="",
268
+ stop_token_ids=[100001],
269
+ stop_str=['</s>'],
270
+ )
271
+ )
272
+
273
+
274
+ register_conv_template(
275
+ Conversation(
276
+ name="alignment",
277
+ system_template="",
278
+ system_message="",
279
+ roles=("", ""),
280
+ messages=(),
281
+ offset=0,
282
+ sep_style=SeparatorStyle.ALIGNMENT,
283
+ sep="",
284
+ sep2="",
285
+ stop_token_ids=[100001],
286
+ stop_str=['</s>'],
287
+ )
288
+ )
289
+
290
+
291
+ if __name__ == "__main__":
292
+ print("deepseek template:")
293
+ conv = get_conv_template("deepseek")
294
+ conv.append_message(conv.roles[0], "Hello!")
295
+ conv.append_message(conv.roles[1], "Hi! This is Tony.")
296
+ conv.append_message(conv.roles[0], "Who are you?")
297
+ conv.append_message(conv.roles[1], "I am a helpful assistant.")
298
+ conv.append_message(conv.roles[0], "How are you?")
299
+ conv.append_message(conv.roles[1], None)
300
+ print(conv.get_prompt())
301
+
302
+ print("deepseekv2 template:")
303
+ conv = get_conv_template("deepseekv2")
304
+ conv.append_message(conv.roles[0], "Hello!")
305
+ conv.append_message(conv.roles[1], "Hi! This is Tony.")
306
+ conv.append_message(conv.roles[0], "Who are you?")
307
+ conv.append_message(conv.roles[1], "I am a helpful assistant.")
308
+ conv.append_message(conv.roles[0], "How are you?")
309
+ conv.append_message(conv.roles[1], None)
310
+ print(conv.get_prompt())
DeepSeek-VL2/deepseek_vl2/models/processing_deepseek_vl_v2.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023-2024 DeepSeek.
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of
4
+ # this software and associated documentation files (the "Software"), to deal in
5
+ # the Software without restriction, including without limitation the rights to
6
+ # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
7
+ # the Software, and to permit persons to whom the Software is furnished to do so,
8
+ # subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in all
11
+ # copies or substantial portions of the Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
15
+ # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
16
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
17
+ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19
+
20
+ from dataclasses import dataclass
21
+ from typing import Dict, Tuple, List, Literal, Optional
22
+ import math
23
+
24
+ import torch
25
+ from torch.nn.utils.rnn import pad_sequence
26
+ import torchvision.transforms as T
27
+ from transformers import LlamaTokenizerFast
28
+ from transformers.processing_utils import ProcessorMixin
29
+ from PIL import Image, ImageOps
30
+
31
+ from .conversation import get_conv_template
32
+
33
+
34
+ def select_best_resolution(image_size, candidate_resolutions):
35
+ # used for cropping
36
+ original_width, original_height = image_size
37
+ best_fit = None
38
+ max_effective_resolution = 0
39
+ min_wasted_resolution = float("inf")
40
+
41
+ for width, height in candidate_resolutions:
42
+ scale = min(width / original_width, height / original_height)
43
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
44
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
45
+ wasted_resolution = (width * height) - effective_resolution
46
+
47
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
48
+ max_effective_resolution = effective_resolution
49
+ min_wasted_resolution = wasted_resolution
50
+ best_fit = (width, height)
51
+
52
+ return best_fit
53
+
54
+
55
+ class DictOutput(object):
56
+ def keys(self):
57
+ return self.__dict__.keys()
58
+
59
+ def __getitem__(self, item):
60
+ return self.__dict__[item]
61
+
62
+ def __setitem__(self, key, value):
63
+ self.__dict__[key] = value
64
+
65
+
66
+ # 对于inference sample也可以维护input_ids,反正最后不会用到
67
+ @dataclass
68
+ class VLChatProcessorOutput(DictOutput):
69
+ sft_format: str
70
+ input_ids: torch.LongTensor
71
+ target_ids: torch.LongTensor
72
+ images: torch.Tensor
73
+ images_seq_mask: torch.BoolTensor
74
+ images_spatial_crop: torch.LongTensor
75
+ num_image_tokens: List[int]
76
+
77
+ def __len__(self):
78
+ return len(self.input_ids)
79
+
80
+
81
+ @dataclass
82
+ class BatchCollateOutput(DictOutput):
83
+ sft_format: List[str]
84
+ input_ids: torch.LongTensor
85
+ labels: torch.LongTensor
86
+ images: torch.Tensor
87
+ attention_mask: torch.Tensor
88
+ images_seq_mask: torch.BoolTensor
89
+ images_spatial_crop: torch.LongTensor
90
+ seq_lens: List[int]
91
+
92
+ def to(self, device, dtype=torch.bfloat16):
93
+ self.input_ids = self.input_ids.to(device)
94
+ self.labels = self.labels.to(device)
95
+ self.attention_mask = self.attention_mask.to(device)
96
+ self.images_seq_mask = self.images_seq_mask.to(device)
97
+ self.images_spatial_crop = self.images_spatial_crop.to(device)
98
+ self.images = self.images.to(device=device, dtype=dtype)
99
+ return self
100
+
101
+
102
+ class ImageTransform(object):
103
+ def __init__(
104
+ self,
105
+ mean: Optional[Tuple[float, float, float]] = (0.5, 0.5, 0.5),
106
+ std: Optional[Tuple[float, float, float]] = (0.5, 0.5, 0.5),
107
+ normalize: bool = True
108
+ ):
109
+ self.mean = mean
110
+ self.std = std
111
+ self.normalize = normalize
112
+
113
+ transform_pipelines = [
114
+ T.ToTensor()
115
+ ]
116
+
117
+ if normalize:
118
+ transform_pipelines.append(T.Normalize(mean, std))
119
+
120
+ self.transform = T.Compose(transform_pipelines)
121
+
122
+ def __call__(self, pil_img: Image.Image):
123
+ x = self.transform(pil_img)
124
+ return x
125
+
126
+
127
+
128
+ class DeepseekVLV2Processor(ProcessorMixin):
129
+ tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
130
+ attributes = ["tokenizer"]
131
+
132
+ def __init__(
133
+ self,
134
+ tokenizer: LlamaTokenizerFast,
135
+ candidate_resolutions: Tuple[Tuple[int, int]],
136
+ patch_size: int,
137
+ downsample_ratio: int,
138
+ image_mean: Tuple[float, float, float] = (0.5, 0.5, 0.5),
139
+ image_std: Tuple[float, float, float] = (0.5, 0.5, 0.5),
140
+ normalize: bool = True,
141
+ image_token: str = "<image>",
142
+ pad_token: str = "<|▁pad▁|>",
143
+ add_special_token: bool = False,
144
+ sft_format: str = "deepseek",
145
+ mask_prompt: bool = True,
146
+ ignore_id: int = -100,
147
+ **kwargs,
148
+ ):
149
+
150
+ self.candidate_resolutions = candidate_resolutions
151
+ self.image_size = candidate_resolutions[0][0]
152
+ self.patch_size = patch_size
153
+ self.image_mean = image_mean
154
+ self.image_std = image_std
155
+ self.normalize = normalize
156
+ self.downsample_ratio = downsample_ratio
157
+
158
+ self.image_transform = ImageTransform(mean=image_mean, std=image_std, normalize=normalize)
159
+ self.tokenizer = tokenizer
160
+ self.tokenizer.padding_side = 'left' # must set this,padding side with make a difference in batch inference
161
+
162
+ # add the pad_token as special token to use 'tokenizer.pad_token' and 'tokenizer.pad_token_id'
163
+ if tokenizer.pad_token is None:
164
+ self.tokenizer.add_special_tokens({'pad_token': pad_token})
165
+ print(f"Add pad token = ['{pad_token}'] to the tokenizer\n"
166
+ f"{pad_token}:{tokenizer.encode(pad_token, add_special_tokens=False)[0]}")
167
+
168
+ # add image token
169
+ image_token_id = self.tokenizer.vocab.get(image_token)
170
+ if image_token_id is None:
171
+ special_tokens = [image_token]
172
+ special_tokens_dict = {"additional_special_tokens": special_tokens}
173
+ self.tokenizer.add_special_tokens(special_tokens_dict)
174
+ self.image_token_id = self.tokenizer.vocab.get(image_token)
175
+ print(f"Add image token = ['{image_token}'] to the tokenizer\n"
176
+ f"{image_token}:{tokenizer.encode(image_token, add_special_tokens=False)[0]}")
177
+
178
+ # add five special tokens for grounding-related tasks
179
+ # <|ref|>, <|/ref|>, <|det|>, <|/det|>, <|grounding|>
180
+ special_tokens = ['<|ref|>', '<|/ref|>', '<|det|>', '<|/det|>', '<|grounding|>']
181
+ special_tokens_dict = {"additional_special_tokens": special_tokens}
182
+ self.tokenizer.add_special_tokens(special_tokens_dict)
183
+ print(f"Add grounding-related tokens = {special_tokens} to the tokenizer with input_ids\n"
184
+ f"<|ref|>:{tokenizer.encode('<|ref|>', add_special_tokens=False)[0]}\n"
185
+ f"<|/ref|>:{tokenizer.encode('<|/ref|>', add_special_tokens=False)[0]}\n"
186
+ f"<|det|>:{tokenizer.encode('<|det|>', add_special_tokens=False)[0]}\n"
187
+ f"<|/det|>:{tokenizer.encode('<|/det|>', add_special_tokens=False)[0]}\n"
188
+ f"<|grounding|>:{tokenizer.encode('<|grounding|>', add_special_tokens=False)[0]}")
189
+
190
+ # add special tokens for SFT data
191
+ special_tokens = ["<|User|>", "<|Assistant|>"]
192
+ special_tokens_dict = {"additional_special_tokens": special_tokens}
193
+ self.tokenizer.add_special_tokens(special_tokens_dict)
194
+ print(f"Add chat tokens = {special_tokens} to the tokenizer with input_ids\n"
195
+ f"<|User|>:{tokenizer.encode('<|User|>', add_special_tokens=False)[0]}\n"
196
+ f"<|Assistant|>:{tokenizer.encode('<|Assistant|>', add_special_tokens=False)[0]}\n")
197
+
198
+ self.image_token = image_token
199
+ self.pad_token = pad_token
200
+ self.add_special_token = add_special_token
201
+ self.sft_format = sft_format
202
+ self.mask_prompt = mask_prompt
203
+ self.ignore_id = ignore_id
204
+
205
+ super().__init__(
206
+ tokenizer,
207
+ **kwargs,
208
+ )
209
+
210
+ def new_chat_template(self):
211
+ conv = get_conv_template(self.sft_format)
212
+ return conv
213
+
214
+ def format_messages(
215
+ self,
216
+ conversations: List[Dict[str, str]],
217
+ sft_format: str = "deepseek",
218
+ system_prompt: str = "",
219
+ ):
220
+ """
221
+ Applies the SFT template to conversation.
222
+
223
+ Args:
224
+ conversations (List[Dict]): A List of messages.
225
+ sft_format (str, optional): The format of the SFT template to use. Defaults to "deepseek".
226
+ system_prompt (str, optional): The system prompt to use in the SFT template. Defaults to "".
227
+
228
+ Returns:
229
+ sft_prompt (str): The formatted text.
230
+ """
231
+
232
+ conv = get_conv_template(sft_format)
233
+ conv.set_system_message(system_prompt)
234
+ for message in conversations:
235
+ conv.append_message(message["role"], message["content"].strip())
236
+ sft_prompt = conv.get_prompt().strip()
237
+
238
+ return sft_prompt
239
+
240
+ def format_messages_v2(self, messages, pil_images, systems=None):
241
+ """play the role of format_messages_v2 and get_images_info in the last version"""
242
+ tokenized_data = []
243
+ masked_tokenized_data = [] # labels
244
+ images_list = []
245
+ images_seq_mask = []
246
+ images_spatial_crop = []
247
+ num_image_tokens = []
248
+
249
+ image_index = 0
250
+
251
+ conv = get_conv_template(self.sft_format)
252
+ conv_system_message = conv.system_message
253
+
254
+ for idx, message in enumerate(messages):
255
+ if idx == 0:
256
+ tokenized_data += [self.bos_id]
257
+ masked_tokenized_data += [self.bos_id]
258
+ images_seq_mask += [False]
259
+ conv.system_message = conv_system_message
260
+ else:
261
+ conv.system_message = ''
262
+
263
+ if message['role'] == conv.roles[0] or message['role'] == "user":
264
+ conv.reset_message()
265
+ conv.append_message(conv.roles[0], str(message['content']).strip())
266
+ conv.append_message(conv.roles[1], '')
267
+ formatted_question = conv.get_prompt()
268
+ tokenized_str, images, seq_mask, spatial_crop, n_image_tokens = self.tokenize_with_images(
269
+ formatted_question,
270
+ pil_images[image_index: image_index + formatted_question.count(self.image_token)],
271
+ bos=False,
272
+ eos=False,
273
+ cropping=len(pil_images) <= 2
274
+ )
275
+ image_index += formatted_question.count(self.image_token)
276
+
277
+ tokenized_data += tokenized_str
278
+ if self.mask_prompt:
279
+ masked_tokenized_data += [self.ignore_id] * len(tokenized_str)
280
+ else:
281
+ masked_tokenized_data += tokenized_str
282
+ images_list += images
283
+ images_seq_mask += seq_mask
284
+ images_spatial_crop += spatial_crop
285
+ num_image_tokens += n_image_tokens
286
+
287
+ elif message['role'] == conv.roles[1] or message['role'] == "assistant":
288
+ formatted_answer = message['content'].strip()
289
+ assert formatted_answer.count(
290
+ self.image_token) == 0, f"there should be no {self.image_token} in the assistant's reply, but got {messages}"
291
+ tokenized_str, images, seq_mask, spatial_crop, n_image_tokens = self.tokenize_with_images(
292
+ formatted_answer,
293
+ [],
294
+ bos=False,
295
+ eos=True,
296
+ cropping=len(pil_images) <= 2)
297
+
298
+ tokenized_data += tokenized_str
299
+ masked_tokenized_data += tokenized_str
300
+ images_seq_mask += seq_mask
301
+
302
+ elif message['role'] == 'system' or message['role'] == 'deepseekapi-sys':
303
+ # 如果message里面有system,那就只允许出现在message的第一句,同时conv原本的system就会失效
304
+ assert idx == 0, 'system information should only exist in the begining of the conversation'
305
+ formatted_system = message['content'].strip()
306
+ tokenized_str = self.encode(formatted_system, bos=False, eos=False)
307
+ tokenized_data += tokenized_str
308
+ if self.mask_prompt:
309
+ masked_tokenized_data += [self.ignore_id] * len(tokenized_str)
310
+ else:
311
+ masked_tokenized_data += tokenized_str
312
+ seq_mask = [False] * len(tokenized_str)
313
+ images_seq_mask += seq_mask
314
+
315
+ else:
316
+ assert False, f"Unknown role: {message['role']}"
317
+
318
+ assert len(tokenized_data) == len(
319
+ images_seq_mask), f"format_messages_v2: tokenized_str's length {len(tokenized_str)} is not equal to imags_seq_mask's length {len(images_seq_mask)}"
320
+ assert len(images_spatial_crop) == len(num_image_tokens), f"image number should be compatible"
321
+
322
+ return tokenized_data, masked_tokenized_data, images_list, images_seq_mask, images_spatial_crop, num_image_tokens
323
+
324
+ def format_prompts(
325
+ self,
326
+ prompts: str,
327
+ sft_format: str = "deepseek",
328
+ system_prompt: str = "",
329
+ ):
330
+ """
331
+ Applies the SFT template to prompts.
332
+
333
+ Args:
334
+ prompts (str): the non-sft formatted prompt;
335
+ sft_format (str, optional): The format of the SFT template to use. Defaults to "deepseek".
336
+ system_prompt (str, optional): The system prompt to use in the SFT template. Defaults to "".
337
+
338
+ Returns:
339
+ sft_prompt (str): The formatted text.
340
+ """
341
+
342
+ conv = get_conv_template(sft_format)
343
+ conv.set_system_message(system_prompt)
344
+ conv.append_message(conv.roles[0], prompts.strip())
345
+ conv.append_message(conv.roles[1], "")
346
+
347
+ sft_prompt = conv.get_prompt().strip()
348
+
349
+ return sft_prompt
350
+
351
+ @property
352
+ def bos_id(self):
353
+ return self.tokenizer.bos_token_id
354
+
355
+ @property
356
+ def eos_id(self):
357
+ return self.tokenizer.eos_token_id
358
+
359
+ @property
360
+ def pad_id(self):
361
+ return self.tokenizer.pad_token_id
362
+
363
+ def encode(self, text: str, bos: bool = True, eos: bool = False):
364
+ t = self.tokenizer.encode(text, add_special_tokens=False)
365
+
366
+ if bos:
367
+ t = [self.bos_id] + t
368
+ if eos:
369
+ t = t + [self.eos_id]
370
+
371
+ return t
372
+
373
+ def decode(self, t: List[int], **kwargs) -> str:
374
+ return self.tokenizer.decode(t, **kwargs)
375
+
376
+ def process_one(
377
+ self,
378
+ prompt: str = None,
379
+ conversations: List[Dict[str, str]] = None,
380
+ images: List[Image.Image] = None,
381
+ apply_sft_format: bool = False,
382
+ inference_mode: bool = True,
383
+ system_prompt: str = "",
384
+ **kwargs,
385
+ ):
386
+ """
387
+
388
+ Args:
389
+ prompt (str): the formatted prompt;
390
+ conversations (List[Dict]): conversations with a list of messages;
391
+ images (List[ImageType]): the list of images;
392
+ apply_sft_format (bool): if prompt is not None, then apply the SFT format to prompt;
393
+ if conversations is not None, then it will always apply the SFT format to conversations;
394
+ inference_mode (bool): if True, then remove the last eos token;
395
+ system_prompt (str): the system prompt;
396
+ **kwargs:
397
+
398
+ Returns:
399
+ outputs (BaseProcessorOutput): the output of the processor,
400
+ - input_ids (torch.LongTensor): [N + image tokens]
401
+ - target_ids (torch.LongTensor): [N + image tokens]
402
+ - images (torch.FloatTensor): [n_images, 3, H, W]
403
+ - image_id (int): the id of the image token
404
+ - num_image_tokens (List[int]): the number of image tokens
405
+ """
406
+
407
+ assert (
408
+ prompt is None or conversations is None
409
+ ), "prompt and conversations cannot be used at the same time."
410
+
411
+ if prompt is None:
412
+ # apply sft format
413
+ sft_format = self.format_messages(
414
+ conversations=conversations,
415
+ sft_format=self.sft_format,
416
+ system_prompt=system_prompt,
417
+ )
418
+ tokenized_str, masked_tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens = self.format_messages_v2(
419
+ conversations, images)
420
+ else:
421
+ if apply_sft_format:
422
+ sft_format = self.format_prompts(
423
+ prompts=prompt,
424
+ sft_format=self.sft_format,
425
+ system_prompt=system_prompt
426
+ )
427
+ else:
428
+ sft_format = prompt
429
+ tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens = self.tokenize_with_images(
430
+ sft_format, images, bos=True, eos=True, cropping=len(images) <= 2)
431
+ masked_tokenized_str = []
432
+ for token_index in tokenized_str:
433
+ if token_index != self.image_token_id:
434
+ masked_tokenized_str.append(token_index)
435
+ else:
436
+ masked_tokenized_str.append(self.ignore_id)
437
+
438
+ assert len(tokenized_str) == len(images_seq_mask) == len(masked_tokenized_str), \
439
+ (f"tokenized_str's length {len(tokenized_str)}, input_ids' length {len(masked_tokenized_str)}, "
440
+ f"imags_seq_mask's length {len(images_seq_mask)}, are not equal")
441
+
442
+ input_ids = torch.LongTensor(tokenized_str)
443
+ target_ids = torch.LongTensor(masked_tokenized_str)
444
+ images_seq_mask = torch.tensor(images_seq_mask, dtype=torch.bool)
445
+
446
+ # set input_ids < 0 | input_ids == self.image_token_id as ignore_id
447
+ target_ids[(input_ids < 0) | (input_ids == self.image_token_id)] = self.ignore_id
448
+ input_ids[input_ids < 0] = self.pad_id
449
+
450
+ if inference_mode:
451
+ # 去掉结尾的eos token
452
+ assert input_ids[-1] == self.eos_id
453
+ input_ids = input_ids[:-1]
454
+ target_ids = target_ids[:-1]
455
+ images_seq_mask = images_seq_mask[:-1]
456
+
457
+ if len(images_list) == 0:
458
+ images = torch.zeros((1, 3, self.image_size, self.image_size))
459
+ images_spatial_crop = torch.zeros((1, 2), dtype=torch.long)
460
+ else:
461
+ images = torch.stack(images_list, dim=0)
462
+ images_spatial_crop = torch.tensor(images_spatial_crop, dtype=torch.long)
463
+
464
+ prepare = VLChatProcessorOutput(
465
+ sft_format=sft_format,
466
+ input_ids=input_ids,
467
+ target_ids=target_ids,
468
+ images=images,
469
+ images_seq_mask=images_seq_mask,
470
+ images_spatial_crop=images_spatial_crop,
471
+ num_image_tokens=num_image_tokens
472
+ )
473
+
474
+ return prepare
475
+
476
+ def __call__(
477
+ self,
478
+ *,
479
+ prompt: str = None,
480
+ conversations: List[Dict[str, str]] = None,
481
+ images: List[Image.Image] = None,
482
+ apply_sft_format: bool = False,
483
+ force_batchify: bool = True,
484
+ inference_mode: bool = True,
485
+ system_prompt: str = "",
486
+ **kwargs,
487
+ ):
488
+ """
489
+
490
+ Args:
491
+ prompt (str): the formatted prompt;
492
+ conversations (List[Dict]): conversations with a list of messages;
493
+ images (List[ImageType]): the list of images;
494
+ apply_sft_format (bool): if prompt is not None, then apply the SFT format to prompt;
495
+ if conversations is not None, then it will always apply the SFT format to conversations;
496
+ force_batchify (bool): force batchify the inputs;
497
+ inference_mode (bool): if True, then remove the last eos token;
498
+ system_prompt (str): the system prompt;
499
+ **kwargs:
500
+
501
+ Returns:
502
+ outputs (BaseProcessorOutput): the output of the processor,
503
+ - input_ids (torch.LongTensor): [N + image tokens]
504
+ - images (torch.FloatTensor): [n_images, 3, H, W]
505
+ - image_id (int): the id of the image token
506
+ - num_image_tokens (List[int]): the number of image tokens
507
+ """
508
+
509
+ prepare = self.process_one(
510
+ prompt=prompt,
511
+ conversations=conversations,
512
+ images=images,
513
+ apply_sft_format=apply_sft_format,
514
+ inference_mode=inference_mode,
515
+ system_prompt=system_prompt
516
+ )
517
+
518
+ if force_batchify:
519
+ prepare = self.batchify([prepare])
520
+
521
+ return prepare
522
+
523
+ def tokenize_with_images(
524
+ self,
525
+ conversation: str,
526
+ images: List[Image.Image],
527
+ bos: bool = True,
528
+ eos: bool = True,
529
+ cropping: bool = True,
530
+ ):
531
+ """Tokenize text with <image> tags."""
532
+ assert conversation.count(self.image_token) == len(images)
533
+ text_splits = conversation.split(self.image_token)
534
+ images_list, images_seq_mask, images_spatial_crop = [], [], []
535
+ num_image_tokens = []
536
+ tokenized_str = []
537
+ for text_sep, image in zip(text_splits, images):
538
+ """encode text_sep"""
539
+ tokenized_sep = self.encode(text_sep, bos=False, eos=False)
540
+ tokenized_str += tokenized_sep
541
+ images_seq_mask += [False] * len(tokenized_sep)
542
+
543
+ """select best resolution for anyres"""
544
+ if cropping:
545
+ best_width, best_height = select_best_resolution(image.size, self.candidate_resolutions)
546
+ else:
547
+ best_width, best_height = self.image_size, self.image_size
548
+ # print(image.size, (best_width, best_height)) # check the select_best_resolutions func
549
+
550
+ """process the global view"""
551
+ global_view = ImageOps.pad(image, (self.image_size, self.image_size),
552
+ color=tuple(int(x * 255) for x in self.image_transform.mean))
553
+ images_list.append(self.image_transform(global_view))
554
+
555
+ """process the local views"""
556
+ local_view = ImageOps.pad(image, (best_width, best_height),
557
+ color=tuple(int(x * 255) for x in self.image_transform.mean))
558
+ for i in range(0, best_height, self.image_size):
559
+ for j in range(0, best_width, self.image_size):
560
+ images_list.append(
561
+ self.image_transform(local_view.crop((j, i, j + self.image_size, i + self.image_size))))
562
+
563
+ """record height / width crop num"""
564
+ num_width_tiles, num_height_tiles = best_width // self.image_size, best_height // self.image_size
565
+ images_spatial_crop.append([num_width_tiles, num_height_tiles])
566
+
567
+ """add image tokens"""
568
+ h = w = math.ceil((self.image_size // self.patch_size) / self.downsample_ratio)
569
+ # global views tokens h * (w + 1), 1 is for line seperator
570
+ tokenized_image = [self.image_token_id] * h * (w + 1)
571
+ # add a seperator between global and local views
572
+ tokenized_image += [self.image_token_id]
573
+ # local views tokens, (num_height_tiles * h) * (num_width_tiles * w + 1)
574
+ tokenized_image += [self.image_token_id] * (num_height_tiles * h) * (num_width_tiles * w + 1)
575
+
576
+ tokenized_str += tokenized_image
577
+ images_seq_mask += [True] * len(tokenized_image)
578
+ num_image_tokens.append(len(tokenized_image))
579
+ # print(width_crop_num, height_crop_num, len(tokenized_image)) # test the correctness of the number of image-related tokens
580
+
581
+ """process the last text split"""
582
+ tokenized_sep = self.encode(text_splits[-1], bos=False, eos=False)
583
+ tokenized_str += tokenized_sep
584
+ images_seq_mask += [False] * len(tokenized_sep)
585
+
586
+ """add the bos and eos tokens"""
587
+ if bos:
588
+ tokenized_str = [self.bos_id] + tokenized_str
589
+ images_seq_mask = [False] + images_seq_mask
590
+ if eos:
591
+ tokenized_str = tokenized_str + [self.eos_id]
592
+ images_seq_mask = images_seq_mask + [False]
593
+
594
+ assert len(tokenized_str) == len(
595
+ images_seq_mask), f"tokenize_with_images func: tokenized_str's length {len(tokenized_str)} is not equal to imags_seq_mask's length {len(images_seq_mask)}"
596
+
597
+ return tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens
598
+
599
+ def batchify(
600
+ self,
601
+ sample_list: List[VLChatProcessorOutput],
602
+ padding: Literal["left", "right"] = "left"
603
+ ) -> BatchCollateOutput:
604
+ """
605
+ Preprocesses the inputs for multimodal inference.
606
+
607
+ Args:
608
+ sample_list (List[VLChatProcessorOutput]): A list of VLChatProcessorOutput.
609
+ padding (str): The padding method. Defaults to "left".
610
+
611
+ Returns:
612
+ BatchCollateOutput: A dictionary of the inputs to use for multimodal inference.
613
+ """
614
+
615
+ batched_sft_format = [sample.sft_format for sample in sample_list]
616
+ batched_input_ids = [sample.input_ids for sample in sample_list]
617
+ batched_labels = [sample.target_ids for sample in sample_list]
618
+ batched_images_seq_mask = [sample["images_seq_mask"] for sample in sample_list]
619
+ seq_lens = [len(sample) for sample in sample_list]
620
+
621
+ """padding input_ids and images_seq_mask"""
622
+ if padding == "left":
623
+ # the tokenizer is default to pad at left
624
+ ## TODO, You're using a LlamaTokenizerFast tokenizer.
625
+ # Please note that with a fast tokenizer, using the `__call__` method is faster than
626
+ # using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
627
+ padded_input_ids = self.tokenizer.pad({"input_ids": batched_input_ids})
628
+ batched_input_ids, batched_attention_mask = padded_input_ids["input_ids"], padded_input_ids[
629
+ "attention_mask"].bool()
630
+ batched_labels = self.tokenizer.pad({"input_ids": batched_labels})["input_ids"]
631
+ batched_labels[batched_labels == self.pad_id] = self.ignore_id # labels正常不会出现pad_id,无需额外保护
632
+ batched_images_seq_mask = self.tokenizer.pad({"input_ids": batched_images_seq_mask})["input_ids"]
633
+ batched_images_seq_mask[batched_images_seq_mask == self.pad_id] = False
634
+ else:
635
+ batched_input_ids = pad_sequence(batched_input_ids, batch_first=True, padding_value=self.pad_id)
636
+ batched_labels = pad_sequence(batched_labels, batch_first=True, padding_value=self.ignore_id)
637
+ batched_images_seq_mask = pad_sequence(batched_images_seq_mask, batch_first=True, padding_value=0)
638
+ batched_attention_mask = batched_input_ids != self.pad_id
639
+
640
+ """padding images to max_patch_num"""
641
+ max_n_patches = max(sample["images"].shape[0] for sample in sample_list)
642
+ batched_images = []
643
+ for sample in sample_list:
644
+ images = sample["images"]
645
+ n_pads = max_n_patches - images.shape[0]
646
+ if n_pads > 0:
647
+ pad_images = torch.zeros((n_pads, *images.shape[1:]), dtype=images.dtype)
648
+ images = torch.cat([images, pad_images], dim=0)
649
+ batched_images.append(images)
650
+ batched_images = torch.stack(batched_images, dim=0)
651
+
652
+ """padding images_spatial_crop to max_n_images"""
653
+ max_n_images = max(sample["images_spatial_crop"].shape[0] for sample in sample_list)
654
+ batched_images_spatial_crop = []
655
+ for sample in sample_list:
656
+ images_spatial_crop = sample["images_spatial_crop"]
657
+ n_pads = max_n_images - sample["images_spatial_crop"].shape[0]
658
+ if n_pads > 0:
659
+ pad_images_spatial_crop = torch.full((n_pads, 2), 0, dtype=images_spatial_crop.dtype)
660
+ images_spatial_crop = torch.cat([images_spatial_crop, pad_images_spatial_crop], dim=0)
661
+ batched_images_spatial_crop.append(images_spatial_crop)
662
+ batched_images_spatial_crop = torch.stack(batched_images_spatial_crop, dim=0)
663
+
664
+ batched_samples = BatchCollateOutput(
665
+ input_ids=batched_input_ids,
666
+ attention_mask=batched_attention_mask,
667
+ labels=batched_labels,
668
+ images=batched_images,
669
+ images_seq_mask=batched_images_seq_mask,
670
+ images_spatial_crop=batched_images_spatial_crop,
671
+ sft_format=batched_sft_format,
672
+ seq_lens=seq_lens
673
+ )
674
+
675
+ return batched_samples
DeepSeek-VL2/deepseek_vl2/serve/__init__.py ADDED
File without changes
DeepSeek-VL2/images/multi_image_2.jpeg ADDED
EAGLE/README.md ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center" width="100%">
2
+ <img src="assets/Logo.png" width="80%" height="80%">
3
+ </p>
4
+ <!-- # EAGLE: Exploring The Design Space for Multimodal LLMs with Mixture of Encoders -->
5
+
6
+ ---
7
+
8
+ [![Code License](https://img.shields.io/badge/Code%20License-Apache_2.0-green.svg)](https://github.com/tatsu-lab/stanford_alpaca/blob/main/LICENSE)
9
+ [![Model License](https://img.shields.io/badge/MODEL%20License-CC%20By%20NC%204.0-red.svg)](MODEL_LICENSE)
10
+
11
+ [[arXiv](https://arxiv.org/pdf/2408.15998)] [[HuggingFace](https://huggingface.co/papers/2408.15998)] [[Demo](https://huggingface.co/spaces/NVEagle/Eagle-X5-13B-Chat)]
12
+ [[Model Zoo](https://huggingface.co/NVEagle)] [[Data](https://huggingface.co/datasets/shi-labs/Eagle-1.8M)]
13
+
14
+
15
+ ## Introduction
16
+
17
+ Eagle is a family of Vision-Centric High-Resolution Multimodal LLMs. It presents a thorough exploration to strengthen multimodal LLM perception with a mixture of vision encoders and different input resolutions. The model contains a channel-concatenation-based "CLIP+X" fusion for vision experts with different architectures (ViT/ConvNets) and knowledge (detection/segmentation/OCR/SSL). The resulting family of Eagle models support up to over 1K input resolution and obtain strong results on multimodal LLM benchmarks, especially resolution-sensitive tasks such as optical character recognition and document understanding.
18
+
19
+ <div align="center">
20
+ <img src="assets/fig-teaser.jpg" width="90%">
21
+ </div>
22
+
23
+
24
+ ## Updates
25
+ - [TODO] Vision encoder model weights with pre-alignment.
26
+ - [2024/09] 🔥 Release models trained on the [Cambrian-1](https://huggingface.co/collections/nyu-visionx/cambrian-data-6667ce801e179b4fbe774e11) data.
27
+ - [2024/09] Provide an [example](#evaluation) for evaluation.
28
+ - [2024/08] Release the Eagle-X5-13B-Chat online [demo](https://huggingface.co/spaces/NVEagle/Eagle-X5-13B-Chat).
29
+ - [2024/08] Release the [Eagle-SFT-1.8M](https://huggingface.co/datasets/shi-labs/Eagle-1.8m) data.
30
+ - [2024/08] 🔥 Release models trained on the [LLaVA-1.5 Pre-train](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain) and [Eagle-SFT-1.8M](https://huggingface.co/datasets/shi-labs/Eagle-1.8m) data.
31
+ - [2024/08] Release training and inference code of Eagle.
32
+ - [2024/06] 🏆 Winning the 2nd Place in CVPR24 Challenge on Driving with Language [Website](https://opendrivelab.com/challenge2024/#driving_with_language).
33
+ - [2024/05] Serving as the 2D VLM pre-training for [OmniDrive](https://github.com/NVlabs/OmniDrive).
34
+
35
+
36
+ ## Contents
37
+ - [Models & Performance](#models--performance)
38
+ - [Visual Examples](#visual-examples)
39
+ - [Install](#install)
40
+ - [Training Data](#training-data)
41
+ - [Checkpoint Preparation](#checkpoint-preparation)
42
+ - [Training](#training)
43
+ - [Inference](#inference)
44
+ - [Evaluation](#evaluation)
45
+ - [Gradio Demo](#gradio-demo)
46
+
47
+
48
+ ## Models & Performance
49
+ Models trained on the [LLaVA-1.5 Pre-train](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain) and [Eagle-SFT-1.8M](https://huggingface.co/datasets/shi-labs/Eagle-1.8m) data are available to download here.
50
+ | Model&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | LLM&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | Pretrain&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | SFT | GQA | MME | MMMU(Val) | OCR | SQA(I) | POPE | TextVQA | InfoVQA | VizWiz | SEED(I) | VQAv2 | MathVista | MMBench | ChartQA | DocVQA |
51
+ |------|------|------|------|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|
52
+ | [Eagle-X4-7B](https://huggingface.co/NVEagle/Eagle-X4-7B) | Vicuna-7B | LLaVA-v1.5 | 1.8M | 64.8 | 1561 | 34.9 | 540 | 70.5 | 88.4 | 70.9 | 47.4 | 50.8 | 73.4 | 83.4 | 37.3 | 67.8 | 67.5 | 78.8 |
53
+ | [Eagle-X5-7B](https://huggingface.co/NVEagle/Eagle-X5-7B) | Vicuna-7B | LLaVA-v1.5 | 1.8M | 64.9 | 1528 | 36.3 | 529 | 69.8 | 88.8 | 71.2 | 47.4 | 54.4 | 73.9 | 83.4 | 37.0 | 68.4 | 67.8 | 78.6 |
54
+ | [Eagle-X4-13B](https://huggingface.co/NVEagle/Eagle-X4-13B) | Vicuna-13B | LLaVA-v1.5 | 1.8M | 66.3 | 1627 | 36.9 | 561 | 73.1 | 87.7 | 73.9 | 50.7 | 56.2 | 74.4 | 83.8 | 37.6 | 69.9 | 70.5 | 79.9 |
55
+ | [Eagle-X5-13B](https://huggingface.co/NVEagle/Eagle-X5-13B) | Vicuna-13B | LLaVA-v1.5 | 1.8M | 66.2 | 1609 | 36.6 | 574 | 72.8 | 87.8 | 74.2 | 51.8 | 59.3 | 74.1 | 83.8 | 38.8 | 69.2 | 69.9 | 79.4 |
56
+
57
+ Models trained on the [Cambrian-1](https://huggingface.co/collections/nyu-visionx/cambrian-data-6667ce801e179b4fbe774e11) data are available to download here.
58
+ | | | Knowledge | | | | | General | | | | | Document | | | | | Vision | | |
59
+ |------|------|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|
60
+ | LLM&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | Model&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | Avg | SQA(I) | MMMU(Val) | MathVista | AI2D | Avg | MME | MMB | SEED(I) | GQA | Avg | ChartQA | OCR | TextVQA | DocVQA | Avg | MMVP | RWQA |
61
+ | Llama 3-8B | Mini-Gemini-HD | 55.7 | 75.1 | 37.3 | 37.0 | 73.5 | 72.7 | 1606 | 72.7 | 73.2 | 64.5 | 62.9 | 59.1 | 47.7 | 70.2 | 74.6 | 40.4 | 18.7 | 62.1 |
62
+ | | LLaVA-NeXT | 55.6 | 72.8 | 41.7 | 36.3 | 71.6 | 72.5 | 1604 | 72.1 | 72.7 | 65.2 | 63.9 | 69.5 | 49.0 | 64.6 | 72.6 | 49.4 | 38.7 | 60.1 |
63
+ | | Cambrian-1 | 61.3 | 80.4 | 42.7 | 49.0 | 73.0 | 73.1 | 1547 | 75.9 | 74.7 | 64.6 | 71.3 | 73.3 | 62.4 | 71.7 | 77.8 | 57.6 | 51.3 | 64.2 |
64
+ | | [Ealge-X4-8B-Plus](https://huggingface.co/NVEagle/Eagle-X4-8B-Plus) | 64.2 | 84.3 | 43.4 | 52.7 | 76.1 | 73.8 | 1559 | 75.9 | 76.3 | 64.9 | 76.6 | 80.1 | 62.6 | 77.1 | 86.6 | 69.1 | 71.6 | 66.5 |
65
+ | Vicuna-13B | Mini-Gemini-HD | 54.1 | 71.9 | 37.3 | 37.0 | 70.1 | 70.7 | 1597 | 68.6 | 70.6 | 63.7 | 60.8 | 56.6 | 46.6 | 70.2 | 69.8 | 38.4 | 19.3 | 57.5 |
66
+ | | LLaVA-NeXT | 53.7 | 73.5 | 36.2 | 35.1 | 70.0 | 69.9 | 1575 | 70.0 | 65.6 | 65.4 | 62.9 | 62.2 | 51.4 | 67.1 | 70.9 | 47.6 | 36.0 | 59.1 |
67
+ | | Cambrian-1 | 60.2 | 79.3 | 40.0 | 48.0 | 73.6 | 73.7 | 1610 | 75.7 | 74.4 | 64.3 | 71.3 | 73.8 | 61.9 | 72.8 | 76.8 | 52.2 | 41.3 | 63.0 |
68
+ | | [Ealge-X4-13B-Plus](https://huggingface.co/NVEagle/Eagle-X4-13B-Plus) | 63.0 | 82.0 | 41.0 | 54.4 | 74.0 | 74.6 | 1651 | 75.7 | 74.8 | 65.3 | 75.1 | 77.6 | 61.9 | 75.5 | 85.4 | 61.4 | 58.0 | 64.8 |
69
+ | Yi-34B | Mini-Gemini-HD | 62.4 | 77.7 | 48.0 | 43.4 | 80.5 | 76.2 | 1659 | 80.6 | 75.3 | 65.8 | 68.1 | 67.6 | 51.8 | 74.1 | 78.9 | 52.3 | 37.3 | 67.2 |
70
+ | | LLaVA-NeXT | 62.5 | 81.8 | 46.7 | 46.5 | 74.9 | 76.0 | 1633 | 79.3 | 75.9 | 67.1 | 67.7 | 68.7 | 54.5 | 69.5 | 78.1 | 54.2 | 47.3 | 61.0 |
71
+ | | Cambrian-1 | 67.0 | 85.6 | 49.7 | 53.2 | 79.7 | 76.8 | 1689 | 81.4 | 75.3 | 65.8 | 71.9 | 75.6 | 60.0 | 76.7 | 75.5 | 60.3 | 52.7 | 67.8 |
72
+ | | [Ealge-X5-34B-Plus](https://huggingface.co/NVEagle/Eagle-X5-34B-Plus) | 68.6 | 85.5 | 51.8 | 57.9 | 79.1 | 76.3 | 1677 | 81.0 | 75.6 | 64.9 | 75.4 | 77.2 | 62.4 | 78.8 | 83.0 | 68.3 | 67.0 | 69.5 |
73
+
74
+
75
+ ## Visual Examples
76
+
77
+ ### Knowledge & General VQA
78
+
79
+ <div align="center">
80
+ <img src="assets/visual/VQA1.png" width="80%">
81
+ </div><br>
82
+
83
+ <div align="center">
84
+ <img src="assets/visual/VQA2.png" width="80%">
85
+ </div><br>
86
+
87
+ <div align="center">
88
+ <img src="assets/visual/VQA3.png" width="80%">
89
+ </div>
90
+
91
+ ### Autonomous Driving
92
+
93
+ <div align="center">
94
+ <img src="assets/visual/AV1.png" width="90%">
95
+ </div><br>
96
+
97
+ <div align="center">
98
+ <img src="assets/visual/AV2.png" width="90%">
99
+ </div>
100
+
101
+ ### Infographic, Chart, OCR & Document Understanding
102
+
103
+ <div align="center">
104
+ <img src="assets/visual/Doc1.png" width="80%">
105
+ </div><br>
106
+
107
+ <div align="center">
108
+ <img src="assets/visual/Doc2.png" width="80%">
109
+ </div><br>
110
+
111
+ <div align="center">
112
+ <img src="assets/visual/Doc3.png" width="80%">
113
+ </div>
114
+
115
+
116
+ ## Install
117
+ Please following the guide here to prepare the environment on **Linux OS**.
118
+ <!-- currently does not support windows and MacOS -->
119
+
120
+ 1. Clone this repository
121
+ ```bash
122
+ git clone https://github.com/NVlabs/EAGLE.git
123
+ cd EAGLE
124
+ ```
125
+
126
+ 2. Create environment and install package
127
+ ```Shell
128
+ conda create -n eagle python=3.10 -y
129
+ conda activate eagle
130
+ pip install --upgrade pip # enable PEP 660 support
131
+ pip install -r requirements.txt
132
+ pip install .
133
+ ```
134
+
135
+ 3. Install additional packages for training cases
136
+ ```
137
+ pip install flash-attn --no-build-isolation
138
+ ```
139
+ If you have any questions about the environment setup, please follow the instruction [video](https://www.youtube.com/watch?si=20yjQlthlKPTC87s&v=0-md0S9GDJA&feature=youtu.be).
140
+
141
+
142
+ ## Training Data
143
+
144
+ ### Pre-training
145
+ We use the same pretraining data as LLaVA v1.5, please download the data from [here](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K).
146
+
147
+ ### Supervised Fine-tuning
148
+ We have compiled all the data and images used in our supervised fine-tuning together. Please download the data from [here](https://huggingface.co/datasets/shi-labs/Eagle-1.8M).
149
+ After cloning this dataset, please run the following commands to extract all the images:
150
+ ```
151
+ cd Eagle-1.8M
152
+ cat images.tar.part_* > images.tar.gz
153
+ tar -xvf images.tar.gz
154
+ ```
155
+
156
+ Please note that while the images have been packaged for convenience, the original dataset licenses remain unchanged. By downloading our data, you agree to the licensing terms of each source dataset. A detailed list of the data sources used in our fine-tuning data mixture is provided below:
157
+
158
+ | Version | Dataset Name | Sample Number | Note |
159
+ |-----------------|-----------------|---------------|--------------------------------------|
160
+ | | LLaVA v1.5 | 665k | Multi-modal conversation |
161
+ | | DocVQA | 39k | Document understanding |
162
+ | | synDog-EN | 50k | OCR |
163
+ | | ChartQA | 28k | Chart understanding |
164
+ | | DVQA | 25k | Chart understanding |
165
+ | | AI2D | 15k | Diagram Understanding |
166
+ | | ShareGPT-4V | 100k | Detailed caption generated by GPT-4V |
167
+ | | laion-gpt4v * | 11k | Detailed caption generated by GPT-4V |
168
+ | | LVIS-Instruct4V | 220k | Multi-modal conversation |
169
+ | | LRV-Instruct | 150k | Multi-modal conversation |
170
+ | | Geo170k | 120k | Math |
171
+ | | LLaVAR | 20k | OCR |
172
+ | | Visual7W | 70k | Visual Question Answering |
173
+ | | Open-Hermes 2.5 | 300k | Text |
174
+ | Initial Version | Total | 1.8M | |
175
+
176
+ \* We have done manual inspection to ensure that the dataset does not contain any CSAM content.
177
+
178
+ To pretrain or fine-tune our model on the [Cambrian-1](https://huggingface.co/collections/nyu-visionx/cambrian-data-6667ce801e179b4fbe774e11) dataset, please prepare the data according to their instructions. Then, convert the `jsonl` files into the `json` file by running the following python code:
179
+ ```python
180
+ import json
181
+
182
+ source_file = "Cambrian7M_withsystemprompt.jsonl"
183
+ dst_file = "Cambrian7M_withsystemprompt.json"
184
+
185
+ annotations = []
186
+ with open(source_file, "r") as f:
187
+ for line in f:
188
+ annotations.append(json.loads(line))
189
+
190
+ with open(dst_file, "w") as f:
191
+ json.dump(annotations, f)
192
+ ```
193
+
194
+
195
+
196
+ ## Checkpoint Preparation
197
+ Please provide the pretrained model weights for EVA-02 vision tower pretrained on detection task. You can download the checkpoint [here](https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/det/eva02_L_coco_det_sys_o365.pth) and place it in the `checkpoints/pretrained_models/` directory.
198
+
199
+ The weights of other models, including Vicuna, Segment Anything Model, Pix2Struct, ConvNeXt, and CLIP will be automatically downloaded from huggingface during the first run.
200
+
201
+
202
+ ## Training
203
+
204
+ The training process for Eagle follows a standard two-stage approach: pretraining and supervised fine-tuning. In the first stage, only the projector's weights are updated. In the second stage, all parameters are fine-tuned. The batch sizes for the pretraining and fine-tuning stages are 256 and 128, respectively. All settings and hyperparameters are identical to those in LLaVA-v1.5 except that we will unfrozen the vision tower's parameters during the second stage.
205
+
206
+ In default we use 32 NVIDIA A100 80G GPU to conduct the training. Please modify the `per_device_train_batch_size` and `gradient_accumulation_steps` if you are using different amount of GPUs.
207
+
208
+ ### Pre-training
209
+ If you are using a slurm cluster, please use the following command to submit a job.
210
+
211
+ ```
212
+ srun \
213
+ --partition $your_partition \
214
+ --gres "gpu:8" \
215
+ --ntasks_per_node 1 \
216
+ -N 4 \
217
+ --job-name $RUN_NAME \
218
+ "bash $CMD $RUN_NAME"
219
+ ```
220
+
221
+ You can specify the `RUN_NAME` and `CMD` variables to run different models according to the following table:
222
+ | Model | Language Model | Script |
223
+ |----------|----------------|-------------------------------------------|
224
+ | Eagle-X4 | Vicuna-7B | `scripts/pretrain-eagle-x4-vicuna-7b.sh` |
225
+ | Eagle-X4 | Vicuna-13B | `scripts/pretrain-eagle-x4-vicuna-13b.sh` |
226
+ | Eagle-X5 | Vicuna-7B | `scripts/pretrain-eagle-x5-vicuna-7b.sh` |
227
+ | Eagle-X5 | Vicuna-13B | `scripts/pretrain-eagle-x5-vicuna-13b.sh` |
228
+
229
+ Remember to set the `$PATH_TO_PRETRAINING_DATA` in each script to the downloaded pretraining data. After you have complete the pretraining, you will get a file named `mm_projector.bin` in the checkpoint folder.
230
+
231
+
232
+ ### Supervised Fine-tuning
233
+ After pretraining is complete, a projector weight file `` will be saved in the checkpoint directory. Please set the `$PATH_TO_PRETRAINED_PROJECTOR` to the path of this projector weights.
234
+
235
+ You can use the same sumbit code as the pretraining, and use the script in the following table to launch the supervised fine-tuning.
236
+ | Model | Language Model | Script |
237
+ |----------|----------------|------------------------------------------------|
238
+ | Eagle-X4 | Vicuna-7B | `scripts/finetune-eagle-x4-vicuna-7b-1.8m.sh` |
239
+ | Eagle-X4 | Vicuna-13B | `scripts/finetune-eagle-x4-vicuna-13b-1.8m.sh` |
240
+ | Eagle-X5 | Vicuna-7B | `scripts/finetune-eagle-x5-vicuna-7b-1.8m.sh` |
241
+ | Eagle-X5 | Vicuna-13B | `scripts/finetune-eagle-x5-vicuna-13b-1.8m.sh` |
242
+
243
+ Before submit the job, you should correctly set the `$PATH_TO_SFT_DATA` and `$PATH_TO_PRETRAINED_PROJECTOR` in each script.
244
+
245
+ ### Notes
246
+ If you have limited GPU resources or memory, please considering the following:
247
+
248
+ - use `scripts/zero3.json` or `scripts/zero3_offload.json` as the Deepspeed training config instead of the default `zero2.json`
249
+ - use gradient accumulation and reduce the per-device batch size
250
+
251
+
252
+ ## Inference
253
+ Our inference code is [here](https://github.com/NVlabs/EAGLE/tree/main/predict_demo.py). You can set you own 'image_path' [here](https://github.com/NVlabs/EAGLE/tree/main/predict_demo.py/#L38) and 'question' [here](https://github.com/NVlabs/EAGLE/tree/main/predict_demo.py/#L39).
254
+ ```
255
+ import os
256
+ import torch
257
+ import numpy as np
258
+ from eagle import conversation as conversation_lib
259
+ from eagle.constants import DEFAULT_IMAGE_TOKEN
260
+ from eagle.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
261
+ from eagle.conversation import conv_templates, SeparatorStyle
262
+ from eagle.model.builder import load_pretrained_model
263
+ from eagle.utils import disable_torch_init
264
+ from eagle.mm_utils import tokenizer_image_token, get_model_name_from_path, process_images, KeywordsStoppingCriteria
265
+ from PIL import Image
266
+ import argparse
267
+ from transformers import TextIteratorStreamer
268
+ from threading import Thread
269
+
270
+ model_path = "NVEagle/Eagle-X5-13B-Chat"
271
+ conv_mode = "vicuna_v1"
272
+ image_path = "assets/georgia-tech.jpeg"
273
+ input_prompt = "Describe this image."
274
+
275
+ model_name = get_model_name_from_path(model_path)
276
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path,None,model_name,False,False)
277
+ if model.config.mm_use_im_start_end:
278
+ input_prompt = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + input_prompt
279
+ else:
280
+ input_prompt = DEFAULT_IMAGE_TOKEN + '\n' + input_prompt
281
+
282
+ conv = conv_templates[conv_mode].copy()
283
+ conv.append_message(conv.roles[0], input_prompt)
284
+ conv.append_message(conv.roles[1], None)
285
+ prompt = conv.get_prompt()
286
+
287
+ image = Image.open(image_path).convert('RGB')
288
+ image_tensor = process_images([image], image_processor, model.config)[0]
289
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
290
+
291
+ input_ids = input_ids.to(device='cuda', non_blocking=True)
292
+ image_tensor = image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True)
293
+
294
+ with torch.inference_mode():
295
+ output_ids = model.generate(
296
+ input_ids.unsqueeze(0),
297
+ images=image_tensor.unsqueeze(0),
298
+ image_sizes=[image.size],
299
+ do_sample=True,
300
+ temperature=0.2,
301
+ top_p=0.5,
302
+ num_beams=1,
303
+ max_new_tokens=256,
304
+ use_cache=True)
305
+
306
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
307
+ print(f"Image:{image_path} \nPrompt:{input_prompt} \nOutput:{outputs}")
308
+ ```
309
+
310
+
311
+ ## Evaluation
312
+
313
+ ### Evaluation with LMMs-Eval
314
+ We evaluate MME, MMBench, SEED, MathVista, POPE, ScienceQA, GQA, OCRBench, TextVQA, and ChartQA using [LMMs-Eval](https://github.com/EvolvingLMMs-Lab/lmms-eval). For better reproducibility, we have included the specific version we used in this repository. Please follow their guidelines and use the following commands to perform the evaluation:
315
+
316
+ ```bash
317
+ bash scripts/eval_lmms_eval/eval-mme-seed-mmmu-pope-sqa-gqa-ocrbench-textvqa-chartqa.sh $REPO_ID_OR_LOCAL_PATH $MODEL_NAME $CONV_MODE
318
+ # MODEL_NAME can be any name, just to dinstinguish different runs.
319
+ # CONV_MODE should be the name of the conversation template during triaining, i.e., "vicuna_v1" for Vicuna, "llama3" for Llama3, and "yi_34b_chatml_direct" for Yi-34B.
320
+ ```
321
+
322
+
323
+ ## Gradio Demo
324
+ We set up an online demo [here](https://huggingface.co/spaces/NVEagle/Eagle-X5-13B-Chat). You can also run this demo on your own machine by running:
325
+ ```
326
+ python gradio_demo.py \
327
+ --model-path ${MODEL_CKPT}
328
+ --conv-mode vicuna_v1
329
+ ```
330
+
331
+
332
+ ## Citation
333
+ If you find this project useful, please cite our work:
334
+ ```
335
+ @article{shi2024eagle,
336
+ title = {Eagle: Exploring The Design Space for Multimodal LLMs with Mixture of Encoders},
337
+ author={Min Shi and Fuxiao Liu and Shihao Wang and Shijia Liao and Subhashree Radhakrishnan and De-An Huang and Hongxu Yin and Karan Sapra and Yaser Yacoob and Humphrey Shi and Bryan Catanzaro and Andrew Tao and Jan Kautz and Zhiding Yu and Guilin Liu},
338
+ journal={arXiv:2408.15998},
339
+ year={2024}
340
+ }
341
+ ```
342
+
343
+
344
+ ## License
345
+ [![Code License](https://img.shields.io/badge/Code%20License-Apache_2.0-green.svg)](https://github.com/tatsu-lab/stanford_alpaca/blob/main/LICENSE)
346
+ **Usage and License Notices**: This project utilizes certain datasets and checkpoints that are subject to their respective original licenses. Users must comply with all terms and conditions of these original licenses, including but not limited to the [OpenAI Terms of Use](https://openai.com/policies/terms-of-use) for the dataset and the specific licenses for base language models for checkpoints trained using the dataset (e.g. [Llama community license](https://ai.meta.com/llama/license/) for Llama-2, Llama-3, and Vicuna-v1.5). This project does not impose any additional constraints beyond those stipulated in the original licenses. Furthermore, users are reminded to ensure that their use of the dataset and checkpoints is in compliance with all applicable laws and regulations.
347
+
348
+
349
+ ## Acknowledgement
350
+
351
+ - [LLaVA](https://github.com/haotian-liu/LLaVA): the codebase we built upon. Thanks for the great pioneer open-source project!
352
+ - [LMMs-Eval](https://github.com/EvolvingLMMs-Lab/lmms-eval): many thanks to the LMMs-Lab for their wonderful and easy-to-use evaluation tools!
353
+ - [LLaVA-HR](https://github.com/luogen1996/LLaVA-HR): we borrow some code on flexible input CLIP encoder from LLaVA-HR!
354
+ - [Cambrian-1](https://cambrian-mllm.github.io): thanks Cambrian project contributors for their efforts in organizing open-source data for us!
355
+ - Thanks to the [VILA](https://github.com/NVlabs/VILA) team and the [RADIO](https://github.com/NVlabs/RADIO) team for their helps and discussions. Check out these awesome works from NVIDIA!
EAGLE/evaluate_lmms_eval.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ import sys
4
+ import copy
5
+ import json
6
+ import logging
7
+ import traceback
8
+ import argparse
9
+ import torch
10
+ import numpy as np
11
+ import datetime
12
+
13
+ import warnings
14
+ import traceback
15
+
16
+ warnings.simplefilter("ignore", category=DeprecationWarning)
17
+
18
+ from accelerate import Accelerator
19
+ from accelerate.utils import InitProcessGroupKwargs
20
+ from pathlib import Path
21
+ from typing import Union
22
+ import hashlib
23
+
24
+ from lmms_eval import evaluator, utils
25
+ from lmms_eval.tasks import initialize_tasks, include_path, get_task_dict
26
+ from lmms_eval.api.registry import ALL_TASKS
27
+ from lmms_eval.logging_utils import WandbLogger
28
+ from lmms_eval.utils import PathFormatter
29
+
30
+
31
+ eval_logger = logging.getLogger("lmms-eval")
32
+
33
+
34
+ def _handle_non_serializable(o):
35
+ if isinstance(o, np.int64) or isinstance(o, np.int32):
36
+ return int(o)
37
+ elif isinstance(o, set):
38
+ return list(o)
39
+ else:
40
+ return str(o)
41
+
42
+
43
+ def parse_eval_args() -> argparse.Namespace:
44
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
45
+ parser.add_argument("--config", default="", help="Path to a yaml file specifying all eval arguments, will ignore cli arguments if specified")
46
+ parser.add_argument("--model", default="hf", help="Name of model e.g. `hf`")
47
+ parser.add_argument(
48
+ "--tasks",
49
+ default=None,
50
+ help="To get full list of tasks, use the command lmms-eval --tasks list",
51
+ )
52
+ parser.add_argument(
53
+ "--model_args",
54
+ default="",
55
+ help="String arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
56
+ )
57
+ parser.add_argument(
58
+ "--num_fewshot",
59
+ type=int,
60
+ default=None,
61
+ help="Number of examples in few-shot context",
62
+ )
63
+ parser.add_argument("--batch_size", type=str, default=1)
64
+ parser.add_argument(
65
+ "--device",
66
+ type=str,
67
+ default=None,
68
+ help="Device to use (e.g. cuda, cuda:0, cpu)",
69
+ )
70
+ parser.add_argument(
71
+ "--output_path",
72
+ default=None,
73
+ type=str,
74
+ metavar="= [dir/file.jsonl] [DIR]",
75
+ help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.",
76
+ )
77
+ parser.add_argument(
78
+ "--limit",
79
+ type=float,
80
+ default=None,
81
+ help="Limit the number of examples per task. " "If <1, limit is a percentage of the total number of examples.",
82
+ )
83
+ parser.add_argument(
84
+ "--check_integrity",
85
+ action="store_true",
86
+ help="Whether to run the relevant part of the test suite for the tasks",
87
+ )
88
+ parser.add_argument(
89
+ "--show_task_to_terminal",
90
+ action="store_true",
91
+ default=False,
92
+ help="Prints the prompt for the first few documents",
93
+ )
94
+ parser.add_argument(
95
+ "--log_samples",
96
+ action="store_true",
97
+ default=False,
98
+ help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis",
99
+ )
100
+ parser.add_argument(
101
+ "--wandb_log_samples",
102
+ action="store_true",
103
+ default=False,
104
+ help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis to Weights and Biases",
105
+ )
106
+ parser.add_argument(
107
+ "--log_samples_suffix",
108
+ type=str,
109
+ default="",
110
+ help="Specify a suffix for the log_samples file name.",
111
+ )
112
+ parser.add_argument(
113
+ "--show_config",
114
+ action="store_true",
115
+ default=False,
116
+ help="If True, shows the the full config of all tasks at the end of the evaluation.",
117
+ )
118
+ parser.add_argument(
119
+ "--include_path",
120
+ type=str,
121
+ default=None,
122
+ help="Additional path to include if there are external tasks to include.",
123
+ )
124
+ parser.add_argument(
125
+ "--gen_kwargs",
126
+ default="",
127
+ help=("String arguments for model generation on greedy_until tasks," " e.g. `temperature=0,top_k=0,top_p=0`"),
128
+ )
129
+ parser.add_argument(
130
+ "--verbosity",
131
+ type=str,
132
+ default="INFO",
133
+ help="Log error when tasks are not registered.",
134
+ )
135
+ parser.add_argument(
136
+ "--wandb_args",
137
+ default="",
138
+ help="Comma separated string arguments passed to wandb.init, e.g. `project=lmms-eval,job_type=eval",
139
+ )
140
+ parser.add_argument(
141
+ "--timezone",
142
+ default="Asia/Singapore",
143
+ help="Timezone for datetime string, e.g. Asia/Singapore, America/New_York, America/Los_Angeles",
144
+ )
145
+ args = parser.parse_args()
146
+ return args
147
+
148
+
149
+ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
150
+ if not args:
151
+ args = parse_eval_args()
152
+
153
+ # Check if no arguments were passed after parsing
154
+ if len(sys.argv) == 1:
155
+ print("┌───────────────────────────────────────────────────────────────────────────────┐")
156
+ print("│ Please provide arguments to evaluate the model. e.g. │")
157
+ print("│ `lmms-eval --model llava --model_path liuhaotian/llava-v1.6-7b --tasks okvqa` │")
158
+ print("│ Use `lmms-eval --help` for more information. │")
159
+ print("└───────────────────────────────────────────────────────────────────────────────┘")
160
+ sys.exit(1)
161
+
162
+ set_loggers(args)
163
+ eval_logger = logging.getLogger("lmms-eval")
164
+ eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
165
+ eval_logger.info(f"Verbosity set to {args.verbosity}")
166
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
167
+
168
+ args_list = []
169
+ results_list = []
170
+ if args.config:
171
+ if not os.path.exists(args.config):
172
+ raise ValueError(f"Config file does not exist: {args.config}")
173
+
174
+ with open(args.config, "r") as file:
175
+ config_args = yaml.safe_load(file)
176
+ config_args = [config_args] if type(config_args) != list else config_args
177
+ # multiple configs, create args list first
178
+ for config in config_args:
179
+ args_copy = argparse.Namespace(**vars(args))
180
+ for key, value in config.items():
181
+ setattr(args_copy, key, value)
182
+ args_list.append(args_copy)
183
+ else:
184
+ args_list.append(args)
185
+
186
+ # initialize Accelerator
187
+ kwargs_handler = InitProcessGroupKwargs(timeout=datetime.timedelta(seconds=60000))
188
+ accelerator = Accelerator(kwargs_handlers=[kwargs_handler])
189
+ if accelerator.is_main_process:
190
+ is_main_process = True
191
+ else:
192
+ is_main_process = False
193
+
194
+ for args in args_list:
195
+ try:
196
+ if is_main_process and args.wandb_args: # thoughtfully we should only init wandb once, instead of multiple ranks to avoid network traffics and unwanted behaviors.
197
+ wandb_logger = WandbLogger(args)
198
+
199
+ # TO REMOVE
200
+ print(args)
201
+ results, samples = cli_evaluate_single(args)
202
+ results_list.append(results)
203
+
204
+ accelerator.wait_for_everyone()
205
+ if is_main_process and args.wandb_args:
206
+ wandb_logger.post_init(results)
207
+ wandb_logger.log_eval_result()
208
+ if args.wandb_log_samples and samples is not None:
209
+ wandb_logger.log_eval_samples(samples)
210
+
211
+ wandb_logger.finish()
212
+
213
+ except Exception as e:
214
+ traceback.print_exc()
215
+ eval_logger.error(f"Error during evaluation: {e}")
216
+ traceback.print_exc()
217
+ results_list.append(None)
218
+
219
+ for args, results in zip(args_list, results_list):
220
+ # cli_evaluate will return none if the process is not the main process (rank 0)
221
+ if results is not None:
222
+ print_results(args, results)
223
+
224
+
225
+ def cli_evaluate_single(args: Union[argparse.Namespace, None] = None) -> None:
226
+ eval_logger = logging.getLogger("lmms-eval")
227
+ eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
228
+ eval_logger.info(f"Verbosity set to {args.verbosity}")
229
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
230
+
231
+ initialize_tasks(args.verbosity)
232
+
233
+ if args.limit:
234
+ eval_logger.warning(" --limit SHOULD ONLY BE USED FOR TESTING." "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.")
235
+ if args.include_path is not None:
236
+ eval_logger.info(f"Including path: {args.include_path}")
237
+ include_path(args.include_path)
238
+
239
+ if args.tasks is None:
240
+ task_names = ALL_TASKS
241
+ elif args.tasks == "list":
242
+ eval_logger.info("Available Tasks:\n - {}".format(f"\n - ".join(sorted(ALL_TASKS))))
243
+ sys.exit()
244
+ elif args.tasks == "list_with_num":
245
+ log_message = (
246
+ "\n" + "=" * 70 + "\n" + "\n\tYou are trying to check all the numbers in each task." + "\n\tThis action will download the complete dataset." + "\n\tIf the results are not clear initially, call this again." + "\n\n" + "=" * 70
247
+ )
248
+ eval_logger.info(log_message)
249
+ task_dict = get_task_dict([task for task in sorted(ALL_TASKS)], model_name="llava")
250
+ for task_name in task_dict.keys():
251
+ task_obj = task_dict[task_name]
252
+ if type(task_obj) == tuple:
253
+ group, task_obj = task_obj
254
+ if task_obj is None:
255
+ continue
256
+ eval_logger.info(f"\nTask : {task_obj.config.task}\n - #num : {len(task_obj.test_docs()) if task_obj.has_test_docs() else len(task_obj.validation_docs())}")
257
+ sys.exit()
258
+ else:
259
+ tasks_list = args.tasks.split(",")
260
+ eval_logger.info(f"Evaluating on {len(tasks_list)} tasks.")
261
+ task_names = utils.pattern_match(tasks_list, ALL_TASKS)
262
+ task_missing = [task for task in tasks_list if task not in task_names and "*" not in task] # we don't want errors if a wildcard ("*") task name was used
263
+
264
+ if task_missing:
265
+ missing = ", ".join(task_missing)
266
+ eval_logger.error(
267
+ f"Tasks were not found: {missing}. Try `lmms-eval --tasks list` for list of available tasks",
268
+ )
269
+ # eval_logger.warn(f"Tasks {missing} were not found. Try `lmms-eval --tasks list` for list of available tasks.")
270
+
271
+ eval_logger.info(f"Selected Tasks: {task_names}")
272
+
273
+ # set datetime before evaluation
274
+ datetime_str = utils.get_datetime_str(timezone=args.timezone)
275
+ if args.output_path:
276
+ hash_input = f"{args.model_args}".encode("utf-8")
277
+ hash_output = hashlib.sha256(hash_input).hexdigest()[:6]
278
+ path = Path(args.output_path)
279
+ path = path.expanduser().resolve().joinpath(f"{datetime_str}_{args.log_samples_suffix}_{args.model}_model_args_{hash_output}")
280
+ args.output_path = path
281
+
282
+ elif args.log_samples and not args.output_path:
283
+ assert args.output_path, "Specify --output_path"
284
+
285
+ results = evaluator.simple_evaluate(
286
+ model=args.model,
287
+ model_args=args.model_args,
288
+ tasks=task_names,
289
+ num_fewshot=args.num_fewshot,
290
+ batch_size=args.batch_size,
291
+ device=args.device,
292
+ limit=args.limit,
293
+ check_integrity=args.check_integrity,
294
+ show_task_to_terminal=args.show_task_to_terminal,
295
+ log_samples=args.log_samples,
296
+ gen_kwargs=args.gen_kwargs,
297
+ cli_args=args,
298
+ )
299
+
300
+ if results is not None:
301
+ if args.log_samples:
302
+ samples = results.pop("samples")
303
+ else:
304
+ samples = None
305
+ dumped = json.dumps(results, indent=4, default=_handle_non_serializable)
306
+ if args.show_config:
307
+ print(dumped)
308
+
309
+ if args.output_path:
310
+ args.output_path.mkdir(parents=True, exist_ok=True)
311
+ result_file_path = path.joinpath("results.json")
312
+ if result_file_path.exists():
313
+ eval_logger.warning(f"Output file {result_file_path} already exists and will be overwritten.")
314
+
315
+ result_file_path.open("w").write(dumped)
316
+ if args.log_samples:
317
+ for task_name, config in results["configs"].items():
318
+ filename = args.output_path.joinpath(f"{task_name}.json")
319
+ # Structure the data with 'args' and 'logs' keys
320
+ data_to_dump = {"args": vars(args), "model_configs": config, "logs": sorted(samples[task_name], key=lambda x: x["doc_id"])} # Convert Namespace to dict
321
+ samples_dumped = json.dumps(data_to_dump, indent=4, default=_handle_non_serializable)
322
+ filename.open("w").write(samples_dumped)
323
+ eval_logger.info(f"Saved samples to {filename}")
324
+
325
+ return results, samples
326
+ return None, None
327
+
328
+
329
+ def print_results(args, results):
330
+ print(f"{args.model} ({args.model_args}),\ngen_kwargs: ({args.gen_kwargs}),\nlimit: {args.limit},\nnum_fewshot: {args.num_fewshot},\nbatch_size: {args.batch_size}")
331
+ print(evaluator.make_table(results))
332
+ if "groups" in results:
333
+ print(evaluator.make_table(results, "groups"))
334
+
335
+
336
+ def set_loggers(args):
337
+ eval_logger = logging.getLogger("lmms-eval")
338
+ ch = logging.StreamHandler()
339
+ formatter = PathFormatter("%(asctime)s [%(pathname)s:%(lineno)d] %(levelname)s %(message)s", "%m-%d %H:%M:%S", timezone=args.timezone)
340
+ ch.setFormatter(formatter)
341
+ eval_logger.addHandler(ch)
342
+
343
+
344
+ if __name__ == "__main__":
345
+ cli_evaluate()
Emu/README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+ <h2>Emu: Generative Multimodal Models from BAAI</h2>
4
+
5
+
6
+ </div>
7
+
8
+ ---
9
+
10
+ <div align='center'>
11
+ <img src="./Emu2/assets/short_teaser.png" class="interpolation-image" alt="comparison_fewshot." height="80%" width="80%" />
12
+ </div>
13
+
14
+
15
+
16
+ - [**Emu1**](Emu1) (ICLR 2024, 2023/07) - Generative Pretraining in Multimodality
17
+
18
+ - [**Emu2**](Emu2) (CVPR 2024, 2023/12) - Generative Multimodal Models are In-Context Learners
19
+
20
+ - [**Emu3**](https://github.com/baaivision/Emu3) (arXiv 2024, 2024/09) - Next-Token Prediction is All You Need 🔥🔥🔥
21
+
22
+ ## News
23
+ - 2024.9 We introduce **Emu3**, a new suite of state-of-the-art multimodal models trained solely with next-token prediction. 🔥🔥🔥
24
+ - 2024.2 **Emu1 and Emu2 are accepted by ICLR 2024 and CVPR 2024 respectively! 🎉**
25
+ - 2023.12 Inference code, model and demo of Emu2 are available. Enjoy the [demo](http://218.91.113.230:9002/).
26
+ - 2023.12 We have released Emu2, open and largest generative multimodal models that achieve new state of the art on multimodal understanding and generation tasks.
27
+ - 2023.7 Inference code and model of Emu are available.
28
+ - 2023.7 We have released Emu, a multimodal generalist that can seamlessly generate images and texts in multimodal context.
29
+
30
+
31
+ ## Hightlights
32
+ - State-of-the-art performance
33
+ - Next-generation capabilities
34
+ - A base model for diverse tasks
35
+
36
+ We hope to foster the growth of our community through open-sourcing and promoting collaboration👬. Let's step towards multimodal intelligence together🍻.
37
+
38
+
39
+ ## Contact
40
+ - **We are hiring** at all levels at BAAI Vision Team, including full-time researchers, engineers and interns.
41
+ If you are interested in working with us on **foundation model, visual perception and multimodal learning**, please contact [Xinlong Wang](https://www.xloong.wang/) (`[email protected]`).
42
+
43
+
44
+ ## Misc
45
+
46
+ <div align="center">
47
+
48
+ [![Stargazers repo roster for @baaivision/Emu](https://bytecrank.com/nastyox/reporoster/php/stargazersSVG.php?user=baaivision&repo=Emu)](https://github.com/baaivision/Emu/stargazers)
49
+
50
+
51
+ [![Forkers repo roster for @baaivision/Emu](https://bytecrank.com/nastyox/reporoster/php/forkersSVG.php?user=baaivision&repo=Emu)](https://github.com/baaivision/Emu/network/members)
52
+
53
+ [![Star History Chart](https://api.star-history.com/svg?repos=baaivision/Emu&type=Date)](https://star-history.com/#baaivision/Emu&Date)
54
+
55
+ </div>
LLM2CLIP/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Microsoft Open Source Code of Conduct
2
+
3
+ This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
4
+
5
+ Resources:
6
+
7
+ - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
8
+ - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
9
+ - Contact [[email protected]](mailto:[email protected]) with questions or concerns
LLaVA/.gitattributes ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://git-scm.com/docs/gitattributes
2
+
3
+ # Set the default behavior, in case people don't have core.autocrlf set.
4
+ # https://git-scm.com/docs/gitattributes#_end_of_line_conversion
5
+ * text=auto
6
+
7
+ # common python attributes, taken from https://github.com/alexkaratarakis/gitattributes/blob/710900479a2bedeec7003d381719521ffbb18bf8/Python.gitattributes
8
+ # Source files
9
+ # ============
10
+ *.pxd text diff=python
11
+ *.py text diff=python
12
+ *.py3 text diff=python
13
+ *.pyw text diff=python
14
+ *.pyx text diff=python
15
+ *.pyz text diff=python
16
+ *.pyi text diff=python
17
+
18
+ # Binary files
19
+ # ============
20
+ *.db binary
21
+ *.p binary
22
+ *.pkl binary
23
+ *.pickle binary
24
+ *.pyc binary export-ignore
25
+ *.pyo binary export-ignore
26
+ *.pyd binary
27
+
28
+ # Jupyter notebook
29
+ *.ipynb text eol=lf
LLaVA/.gitignore ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__
3
+ *.pyc
4
+ *.egg-info
5
+ dist
6
+
7
+ # Log
8
+ *.log
9
+ *.log.*
10
+ *.json
11
+ *.jsonl
12
+
13
+ # Data
14
+ !**/alpaca-data-conversation.json
15
+
16
+ # Editor
17
+ .idea
18
+ *.swp
19
+
20
+ # Other
21
+ .DS_Store
22
+ wandb
23
+ output
24
+
25
+ checkpoints
26
+ ckpts*
27
+
28
+ .ipynb_checkpoints
29
+ *.ipynb
30
+
31
+ # DevContainer
32
+ !.devcontainer/*
33
+
34
+ # Demo
35
+ serve_images/
PaddleMIX/.gitmodules ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [submodule "PaddleNLP"]
2
+ path = PaddleNLP
3
+ url = https://github.com/PaddlePaddle/PaddleNLP
PaddleMIX/CITATION.cff ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This CITATION.cff file was generated with cffinit.
2
+ # Visit https://bit.ly/cffinit to generate yours today!
3
+
4
+ cff-version: 1.2.0
5
+ title: Paddle Multimodal Integration and eXploration
6
+ message: >-
7
+ If you use this repository, please cite it using the metadata from this file.
8
+ type: software
9
+ authors:
10
+ - given-names: PaddleMIX Authors
11
+ repository-code: 'https://github.com/PaddlePaddle/PaddleMIX'
12
+ repository: 'https://github.com/PaddlePaddle/PaddleMIX'
13
+ keywords:
14
+ - paddlemix
15
+ license: Apache-2.0
PaddleMIX/README.md ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 简体中文 | [English](README_EN.md)
2
+
3
+ <p align="center">
4
+ <img src="https://github.com/PaddlePaddle/PaddleMIX/assets/22989727/2cd19298-1c52-4d73-a0f7-dcdab6a8ec90" align="middle" width = "600" />
5
+ </p>
6
+
7
+ <p align="center">
8
+ <a href="https://github.com/PaddlePaddle/PaddleMix/releases"><img src="https://img.shields.io/github/v/release/PaddlePaddle/PaddleMix?color=ffa"></a>
9
+ <a href="./LICENSE"><img src="https://img.shields.io/badge/license-Apache%202-dfd.svg"></a>
10
+ <a href=""><img src="https://img.shields.io/badge/python-3.7+-aff.svg"></a>
11
+ <a href=""><img src="https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-pink.svg"></a>
12
+ <a href="#📌社区交流"><img src="https://img.shields.io/badge/微信-小助手加群-green?logo=wechat&amp"></a>
13
+ <a href="https://github.com/PaddlePaddle/PaddleMIX/stargazers"><img src="https://img.shields.io/github/stars/PaddlePaddle/PaddleMIX?color=ccf"></a>
14
+
15
+ </p>
16
+ </div>
17
+
18
+ ## 💌目录
19
+ - [💌目录](#目录)
20
+ - [📰新闻](#新闻)
21
+ - [📣最新进展](#最新进展)
22
+ - [🌈简介](#简介)
23
+ - [✨主要特性](#主要特性)
24
+ - [📱丰富的多模态功能](#丰富的多模态功能)
25
+ - [🧩简洁的开发体验](#简洁的开发体验)
26
+ - [💡高性能分布式训推能力](#高性能分布式训推能力)
27
+ - [🔧特色功能与工具](#特色功能与工具)
28
+ - [🔍安装](#安装)
29
+ - [🔥教程](#教程)
30
+ - [🤔FAQ](#faq)
31
+ - [📱模型库](#模型库)
32
+ - [📝许可证书](#许可证书)
33
+ - [📌社区交流](#社区交流)
34
+
35
+
36
+ ## 📰新闻
37
+ **🔥2024.11.21日 - 2024.12.22日 PaddleMIX开发项目挑战(已结束)**
38
+
39
+ - ✨「体验官招募」PaddleMIX开发项目挑战
40
+ 点击链接报名🔗:https://aistudio.baidu.com/activitydetail/1503019366
41
+ 🏆投稿至飞桨星河社区项目大厅,加精获得PaddleMIX体验官认证证书及京东卡激励
42
+ 欢迎大家投稿~
43
+ <details>
44
+ <summary>点击展开活动海报</summary>
45
+ <p align="center">
46
+ <img src='https://github.com/user-attachments/assets/27e0bbe3-0ff8-49ef-bd39-81a31a2b288b' width="25%">
47
+ </p>
48
+ </details>
49
+
50
+ ## 📣最新进展
51
+
52
+ <!-- 📚《飞桨多模态大模型开发套件PaddleMIX 2.1 震撼发布》,图文音视频场景全覆盖,多模态高效助力产业创新。超大规模训练支持,覆盖图文预训练、文生图、跨模态视觉任务,覆盖金融、教育、电商、医疗等产业场景。8月8日(周四)20:00 带你直播了解多模态大模型最新架构,深度解析PaddleMIX高性能模型库,手把手演示LLaVA模型训推全流程。[报名链接](https://www.wjx.top/vm/wKqysjx.aspx?udsid=449688) -->
53
+
54
+
55
+ **🎉 2024.12.17 支持[GOT-OCR2_0](./paddlemix/examples/GOT_OCR_2_0)推理和训练**
56
+
57
+ **🎉 2024.12.17 支持[InternVL2_5(1B、2B、4B、8B)](./paddlemix/examples/internvl2)推理**
58
+
59
+ **🎉 2024.11.27 支持[Janus/JanusFlow](./paddlemix/examples/janus)推理**
60
+
61
+ **🎉 2024.11.21 支持[MiniCPM-V-2_6](./paddlemix/examples/minicpm-v-2_6)推理**
62
+
63
+ **🎉 2024.11.8 支持[DenseConnector](./paddlemix/examples/llava_denseconnector)和[Aquila-VL-2B-llava-qwen](./paddlemix/examples/llava_onevision/)推理**
64
+
65
+ **🎉 2024.11.1 支持[LLaVA-OneVision](./paddlemix/examples/llava_onevision/)和[LLaVA-Critic](./paddlemix/examples/llava_critic/)推理**
66
+
67
+ **🎉 2024.10.31 喜迎外部开发者的[创作教程页面](paddlemix_applications.md)更新**
68
+
69
+ * 🌟 自9月6日发起大模型套件精品项目征集活动以来,我们收到了30个优质开发者项目,其中25个精品项目已通过平台评估并成功加精。
70
+
71
+ * 🙏 衷心感谢各位开发者基于套件的精彩创作!🚀 诚挚邀请您也来分享您的创意 - 欢迎将教程发布到公开网页或[飞桨AI Studio](https://aistudio.baidu.com/aistudio/community/multimodal?from=singlemessage)社区!
72
+
73
+ <details>
74
+ <summary>点击展开更多</summary>
75
+
76
+ **🔥2024.10.11 发布PaddleMIX v2.1**
77
+ * 支持[PaddleNLP 3.0 beta](https://github.com/PaddlePaddle/PaddleNLP/releases/tag/v3.0.0-beta0)版本,抢先体验其最新功能。
78
+ * 新增[Qwen2-VL](./paddlemix/examples/qwen2_vl/)、[InternVL2](./paddlemix/examples/internvl2/)、[Stable Diffusion 3 (SD3)](https://github.com/PaddlePaddle/PaddleMIX/blob/develop/ppdiffusers/examples/dreambooth/README_sd3.md)等前沿模型。
79
+ * 发布自研多模数据能力标签模型[PP-InsCapTagger](./paddlemix/datacopilot/example/pp_inscaptagger/);可用于数据的分析和过滤,试验案例表明在保持模型效果的条件下可减少50%的数据量,大幅提高训练效率。
80
+
81
+ * 多模态大模型InternVL2、LLaVA、SD3、SDXL适配昇腾910B,提供国产计算芯片上的训推能力。
82
+
83
+
84
+ **2024.07.25 发布PaddleMIX v2.0**
85
+ * 多模态理解:新增LLaVA系列,Qwen-VL等;新增Auto模块统一SFT训练流程;新增mixtoken训练策略,SFT吞吐量提升5.6倍。
86
+ * 多模态生成:发布[PPDiffusers 0.24.1](./ppdiffusers/README.md)版本,支持视频生成能力,文生图模型新增LCM。新增飞桨版peft,accelerate后端。提供基于飞桨开发的ComfyUI插件。
87
+ * 多模态数据处理工具箱[DataCopilot](./paddlemix/datacopilot/):支持自定义数据结构,数据转换,离线格式检查;支持基本的统计信息,数据可视化功能。
88
+
89
+ **2023.10.7 发布 PaddleMIX v1.0**
90
+ * 新增图文预训练模型分布式训练能力,BLIP-2支持千亿规模训练
91
+ * 新增跨模态应用流水线[AppFlow](./applications/README.md),一键支持自动标注,图像编辑,音生图等11种跨模态应用
92
+ * [PPDiffusers](./ppdiffusers/README.md)发布 0.19.3 版本,新增SDXL及相关任务
93
+
94
+ </details>
95
+
96
+ ---
97
+
98
+ ## 🌈简介
99
+
100
+ PaddleMIX是基于飞桨的多模态大模型开发套件,聚合图像、文本、视频等多种模态,覆盖视觉语言预训练,微调,文生图,文生视频,多模态理解等丰富的多模态任务。它提供开箱即用的开发体验,同时支持灵活定制,满足不同需求,助力探索通用人工智能。
101
+
102
+ <p align="center">
103
+ <img src="https://github.com/user-attachments/assets/764b32a4-3933-4ef8-a0b2-dd425af49ef8" align="middle" width = 100% />
104
+ </p>
105
+
106
+ PaddleMIX工具链包括数据处理、模型开发、预训练、精调和推理部署,支持主流多模态模型如 EVA-CLIP、BLIP-2、Stable Diffusion 等。通过跨模态任务流水线 AppFlow 和文生图应用 pipeline,开发者可以快速构建多模态应用。
107
+
108
+ ### 多模态理解效果示例如下:
109
+
110
+ <img src="https://github.com/user-attachments/assets/4c9a0427-57c7-4e1b-80f0-428c03119cc3"></img>
111
+
112
+
113
+ 多模态理解🤝融合了视觉👀和语言💬处理能力。包含基础感知、细粒度图像理解和复杂视觉推理🧠等功能。我们的[模型库](#模型库)调用提供了单图、多图和视频推理的功能实际应用,功能包括自然图像摘要📝、问答🤔、OCR🔍、情感识别❤️😢、专业图像分析🔬和代码解析💻。这些技术可应用于教育📚、医疗🏥、工业🏭等多个领域,实现从静态图像🖼️到动态视频🎥的全面智能分析。欢迎您的体验和探索~
114
+
115
+ ### 多模态生成效果示例如下:
116
+ <div style="display: flex; justify-content: center; gap: 5px;">
117
+ <img src="https://github.com/user-attachments/assets/f4768f08-f7a3-45e0-802c-c91554dc5dfc" style="height: 250px; object-fit: fill;">
118
+ <img src="https://github.com/user-attachments/assets/9bf4a333-af57-4ddd-a514-617dea8da435" style="height: 250px; object-fit: fill;">
119
+ </div>
120
+
121
+
122
+ 多模态生成✍️融合了文本💬与视觉👀的创造能力。涵盖了从文字生成图像🖼️到文字生成视频🎥的各类技术,包括 Stable Diffusion 3、Open-Sora等先进模型。我们在[ppdiffusers](ppdiffusers/README.md)提供了单图生成、多图合成和视频生成的实际应用,功能涉及艺术创作🎨、动画制作📽️、内容生成📝等。通过这些技术,可以在教育📚、娱乐🎮、广告📺等领域实现从静态图像到动态视频的创意生成。欢迎您的体验和探索~
123
+
124
+ ### 特色应用效果示例如下(点击标题可快速跳转在线体验):
125
+ | [**ComfyUI创作工作流**](https://aistudio.baidu.com/community/app/106043) | [**艺术风格二维码模型**](https://aistudio.baidu.com/community/app/1339) | [**Mix叠图**](https://aistudio.baidu.com/community/app/1340) |
126
+ | :--------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------: |
127
+ | <img src='https://github.com/PaddlePaddle/PaddleMIX/assets/35400185/36ba7261-1744-41a4-b1cb-c9e99f6931f2' width="300px"> | <img src='https://github.com/PaddlePaddle/Paddle/assets/22989727/ba091291-a1ee-49dc-a1af-fc501c62bfc8' width="300px"> | <img src='https://github.com/PaddlePaddle/Paddle/assets/22989727/a71be5a0-b0f3-4aa8-bc20-740ea8ae6785' width="300px"> |
128
+ | [**二次元文生图**](https://aistudio.baidu.com/community/app/2/webUI?source=appCenter) | [**AI绘画|50+Lora风格叠加**](https://aistudio.baidu.com/community/app/2848/webUI?source=appCenter) | [**ControlNet|图片局部重绘**](https://aistudio.baidu.com/community/app/1981/webUI?source=appCenter) |
129
+ | <img src='https://github.com/user-attachments/assets/a4af8f8a-08c7-4da7-8575-9dbfedaba56c' width="200px"> | <img src='https://github.com/user-attachments/assets/fa92c229-a885-46a1-b23f-a076855c93ec' width="200px"> | <img src='https://github.com/user-attachments/assets/78625876-d8ec-4c15-ae96-655c50f562ab' width="200px"> |
130
+
131
+
132
+
133
+
134
+
135
+ -----
136
+
137
+
138
+
139
+
140
+
141
+
142
+
143
+
144
+ ## ✨主要特性
145
+
146
+ ### 📱丰富的多模态功能
147
+ PaddleMIX支持大量最新主流的算法基准以及预训练模型,覆盖图文预训练,文生图,跨模态视觉任务,实现图像编辑、图像描述、数据标注等多样功能。`传送门`:[📱模型库](#模型库)
148
+
149
+ ### 🧩简洁的开发体验
150
+ PaddleMIX 提供统一的模型开发接口,支持开发者快速集成和定制模型。借助 Auto 模块,用户可以高效加载预训练模型、实现 Tokenization,并通过简化的 API 轻松完成模型的训练、微调(SFT)、推理与部署。此外,Auto 模块支持开发者自定义模型的自动化集成,确保灵活性与可扩展性,同时提升开发效率。
151
+
152
+ ### 💡高性能分布式训推能力
153
+ PaddleMIX提供高性能分布式训练与推理能力,融合✨Fused Linear✨、✨Flash Attention✨等加速算子,支持🌀BF16混合精度训练和4D混合并行策略,并通过优化推理性能,包括卷积布局、GroupNorm融合及旋转位置编码优化,显著提升大规模预训练和高效推理性能。
154
+
155
+ <img src="https://github.com/user-attachments/assets/9ab9540a-fa89-41cb-838d-95df86e33382" width = 100% />
156
+
157
+
158
+
159
+ ### 🔧特色功能与工具
160
+ 多模态数据处理工具箱DataCopilot,加速模型迭代升级。让开发者根据特定任务以低代码量实现数据的基本操作。`传送门`:[🏆特色模型|工具](#特色模型工具)
161
+
162
+
163
+ ## 🔍安装
164
+ ### 1. 克隆PaddleMIX仓库
165
+ ```
166
+ git clone https://github.com/PaddlePaddle/PaddleMIX
167
+ cd PaddleMIX
168
+ ```
169
+
170
+ ### 2. 创建虚拟环境
171
+ ```
172
+ conda create -n paddlemix python=3.10 -y
173
+ conda activate paddlemix
174
+ ```
175
+ ### 3. ‼️安装PaddlePaddle
176
+
177
+ #### 方法 1: 一键安装(GPU/CPU推荐)
178
+
179
+ - CUDA 11.x或12.3
180
+ - PaddlePaddle 3.0.0b1
181
+ ```
182
+ sh build_paddle_env.sh
183
+ ```
184
+
185
+ #### 方法 2: 手动安装
186
+ 关于PaddlePaddle安装的详细教程请查看[Installation](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/linux-pip.html)。
187
+
188
+ ### 4. ‼️安装依赖
189
+
190
+ #### 方法 1: 一键安装(推荐)
191
+
192
+ 运行以下命令来自动安装所有必要的依赖:
193
+ ```
194
+ sh build_env.sh
195
+ ```
196
+
197
+ #### 方法 2: 手动安装
198
+ ```bash
199
+ # 安装 PaddleMIX
200
+ pip install -e .
201
+ # 安装 ppdiffusers
202
+ cd ppdiffusers
203
+ pip install -e .
204
+ cd ..
205
+ ```
206
+ ### 5. ‼️验证安装
207
+ ```bash
208
+ sh check_env.sh
209
+
210
+ 环境和依赖推荐版本:
211
+ - paddlepaddle: 3.0.0b2或develop版本
212
+ - paddlenlp: 3.0.0b2
213
+ - ppdiffusers: 0.29.0
214
+ - huggingface_hub: 0.23.0
215
+ ```
216
+
217
+ ### 6. 安装自定义算子(可选)
218
+ * 部分模型需要安装自定义算子(FastLayerNorm、FusedLayerNorm),例如EVA-CLIP、DIT_LLAMA等。
219
+ * 非CUDA环境(例如昇腾环境)则跳过
220
+ ```bash
221
+ cd paddlemix/external_ops
222
+ python setup.py install
223
+ ```
224
+
225
+ ## 🔥教程
226
+
227
+ **快速开始**
228
+ - [多模态理解:新手入门体验 [示例:InternVL2模型]](paddlemix/examples/internvl2/README.md)
229
+ - [多模态生成:零基础上手指南 [示例:Stable Diffusion模型]](ppdiffusers/examples/stable_diffusion/README.md)
230
+ - [跨模态任务流水线:快速开始](applications/README.md/#快速开始)
231
+
232
+ **实操演练&范例**
233
+ - [LLaVA模型:从训练到推理的全流程实践](https://aistudio.baidu.com/projectdetail/7917712)
234
+ - [SDXL应用:打造专属奥运海报生成器](https://aistudio.baidu.com/projectdetail/8251202)
235
+ - [飞桨PaddleMIX跨模态AI应用:项目分类汇总](./paddlemix_applications.md)
236
+
237
+ **多硬件使用**
238
+ - 昇腾910B支持的模型列表和使用方式,可以参考[昇腾硬件使用](./docs/hardware_support/ascend_usage.md)
239
+
240
+
241
+ **数据准备&训练微调**
242
+ - [模型训练与微调技巧](paddlemix/tools/README.md)
243
+
244
+ **推理部署**
245
+ - [部署指南:从开发到生产环境](deploy/README.md)
246
+
247
+
248
+ ## 📱模型库
249
+ <table align="center">
250
+ <tbody>
251
+ <tr align="center" valign="center">
252
+ <td>
253
+ <b>多模态理解</b>
254
+ </td>
255
+ <td>
256
+ <b>多模态生成</b>
257
+ </td>
258
+ <td>
259
+ <b>多模态大一统</b>
260
+ </td>
261
+ </tr>
262
+ <tr valign="top">
263
+ <td>
264
+ <ul>
265
+ </ul>
266
+ <li><b>图文预训练</b></li>
267
+ <ul>
268
+ <li><a href="paddlemix/examples/clip">CLIP</a></li>
269
+ <li><a href="paddlemix/examples/evaclip">EVA-CLIP</a></li>
270
+ <li><a href="paddlemix/examples/llava">LLaVA-1.5</a></li>
271
+ <li><a href="paddlemix/examples/llava">LLaVA-1.6</a></li>
272
+ <li><a href="paddlemix/examples/llava">LLaVA-NeXT</a></li>
273
+ <li><a href="paddlemix/examples/llava_onevision">LLaVA-onevision</a></li>
274
+ <li><a href="paddlemix/examples/llava_onevision">Aquila-VL-2B-llava-qwen</a></li>
275
+ <li><a href="paddlemix/examples/llava_critic">LLaVA-Critic</a></li>
276
+ <li><a href="paddlemix/examples/llava_denseconnector">LLaVA-DenseConnector</a></li>
277
+ <li><a href="paddlemix/examples/qwen_vl">Qwen-VL</a></li>
278
+ <li><a href="paddlemix/examples/qwen2_vl">Qwen2-VL</a></li>
279
+ <li><a href="paddlemix/examples/internvl2">InternVL2</a></li>
280
+ <li><a href="paddlemix/examples/minimonkey">Mini-Monkey</a></li>
281
+ <li><a href="paddlemix/examples/coca">CoCa</a></li>
282
+ <li><a href="paddlemix/examples/blip2">BLIP-2</a></li>
283
+ <li><a href="paddlemix/examples/minigpt4">miniGPT-4</a></li>
284
+ <li><a href="paddlemix/examples/visualglm">VIsualGLM</a></li>
285
+ <li><a href="paddlemix/examples/cogvlm">CogVLM && CogAgent</a></li>
286
+ <li><a href="paddlemix/examples/internlm_xcomposer2">InternLM-XComposer2</a></li>
287
+ </ul>
288
+ </ul>
289
+ <li><b>开放世界视觉模型</b></li>
290
+ <ul>
291
+ <li><a href="paddlemix/examples/groundingdino">Grounding DINO</a></li>
292
+ <li><a href="paddlemix/examples/sam">SAM</a></li>
293
+ <li><a href="paddlemix/examples/YOLO-World">YOLO-World</a></li>
294
+ </ul>
295
+ </ul>
296
+ <li><b>更多模态预训练模型</b></li>
297
+ <ul>
298
+ <li><a href="paddlemix/examples/imagebind">ImageBind</a></li>
299
+ </ul>
300
+ </ul>
301
+ <li><b>数据分析</b></li>
302
+ <ul>
303
+ <li><a href="./paddlemix/datacopilot/example/pp_inscaptagger/">PP-InsCapTagger</a></li>
304
+ </ul>
305
+ </td>
306
+ <td>
307
+ <ul>
308
+ </ul>
309
+ <li><b>文生图</b></li>
310
+ <ul>
311
+ <li><a href="ppdiffusers/examples/stable_diffusion">Stable Diffusion</a></li>
312
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/stable_diffusion_xl">SDXL</a></li>
313
+ <li><a href="ppdiffusers/examples/dreambooth/README_sd3.md">Stable Diffusion 3 (SD3)</a></li>
314
+ <li><a href="ppdiffusers/examples/controlnet">ControlNet</a></li>
315
+ <li><a href="ppdiffusers/examples/t2i-adapter">T2I-Adapter</a></li>
316
+ <li><a href="ppdiffusers/examples/text_to_image_laion400m">LDM</a></li>
317
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/consistency_models">Consistency Models</a></li>
318
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/deepfloyd_if">DeepFloyd IF</a></li>
319
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/shap_e">Shap-E</a></li>
320
+ <li><a href="ppdiffusers/examples/kandinsky2_2">Kandinsky-2</a></li>
321
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/wuerstchen">Würstchen</a></li>
322
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/hotshot_xl">Hotshot-XL</a></li>
323
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/latent_consistency_models">LCMs</a></li>
324
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/unidiffuser">Unidiffuser</a></li>
325
+ <li><a href="ppdiffusers/examples/class_conditional_image_generation/DiT">DiT</a></li>
326
+ <li><a href="ppdiffusers/examples/HunyuanDiT">HunyuanDiT</a></li>
327
+ </ul>
328
+ </ul>
329
+ <li><b>文生视频</b></li>
330
+ <ul>
331
+ <li><a href="ppdiffusers/examples/text_to_video_lvdm">LVDM</a></li>
332
+ <li><a href="ppdiffusers/examples/stable_video_diffusion">SVD</a></li>
333
+ <li><a href="ppdiffusers/examples/AnimateAnyone">AnimateAnyone</a></li>
334
+ <li><a href="ppdiffusers/examples/Open-Sora">OpenSora</a></li>
335
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/animatediff">AnimateDiff</a></li>
336
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/text_to_video_synthesis">zeroscope_v2_XL</a></li>
337
+ <li><a href="ppdiffusers/examples/cogvideo">CogVideoX</a></li>
338
+ </ul>
339
+ </ul>
340
+ <li><b>音频生成</b></li>
341
+ <ul>
342
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/audioldm">AudioLDM</a></li>
343
+ <li><a href="ppdiffusers/ppdiffusers/pipelines/audioldm2">AudioLDM2</a></li>
344
+ </ul>
345
+ </td>
346
+ <td>
347
+ <ul>
348
+ </ul>
349
+ <li><b>统一多模态模型</b></li>
350
+ <ul>
351
+ <li><a href="paddlemix/examples/janus">Janus</a></li>
352
+ </ul>
353
+ </td>
354
+ </tr>
355
+ </tbody>
356
+ </table>
357
+
358
+ 更多模型能力,可参考[模型能力矩阵](./paddlemix/examples/README.md)
359
+
360
+
361
+ ## 🏆特色模型|工具
362
+
363
+ ### 💎跨模态任务流水线AppFlow
364
+ <details>
365
+ <summary><b> 简介(点击展开)</b></summary>
366
+
367
+ AppFlow作为PaddleMIX的跨模态应用任务流水线,具备强大的功能与易用性。通过接入LLaVA、Stable Diffusion等前沿算法,AppFlow已全面覆盖图像、文本、音频、视频等多种模态,并通过流水线式的灵活组合,构建了10余种多模态应用,涵盖图文生成、文本视频生成、文本音频生成、图像理解等多个方面,为用户提供丰富的demo示例。AppFlow的特色在于其一键预测功能,用户无需繁琐训练与大量编码,��需简单命令即可完成模型推理,极大地降低了使用门槛。同时,AppFlow充分利用飞桨框架动静统一优势,用户只需设置简单参数,即可自动完成模型的动转静导出及高性能推理,提高工作效率并优化模型性能,实现一站式应用部署。
368
+
369
+ `传送门`:[应用文档示例](applications/README.md/#快速开始)。
370
+
371
+ </details>
372
+
373
+ ### 💎多模态数据处理工具箱DataCopilot
374
+ <details>
375
+ <summary><b> 简介(点击展开)</b></summary>
376
+
377
+ 在真实的应用场景有大量使用专有数据微调多模态大模型来提升模型效果的需求,此过程中数据要素成为核心。基于此PaddleMIX提供了数据处理和分析的工具DataCopilot,使开发者可在PaddleMIX套件完成端到端的开发体验。
378
+
379
+ PP-InsCapTagger(Instance Capability Tagger) 是 DataCopilot 基于 PaddleMIX 实现的数据集能力标签模型,用于为多模态数据实例能力打标,通过实例能力分布对数据集进行优化,可以提高模型训练效率,为数据集分析和评价提供了一种高效的方案。 结合模型推理打标结果对LLaVA SFT数据集进行优化,可以**提高LLaVA模型SFT阶段50%的训练效率。**
380
+
381
+ `传送门`:[应用文档示例](paddlemix/datacopilot/readme.md)。
382
+
383
+ </details>
384
+
385
+ <details>
386
+ <summary><b> PP-InsCapTagger(点击展开)</b></summary>
387
+
388
+ | Model | ScienceQA | TextVQA | VQAv2 | GQA | MMMU | MME |
389
+ |----------------------------------|-----------------------------------------|----------------------------------------|----------------------------------------|----------------------------------------|----------------------------------------|-----------------------------------------|
390
+ | llava-1.5-7b (origin) | 66.8 | 58.2 | 78.5 | 62 | - | - |
391
+ | llava-1.5-7b (rerun) | 69.01 | 57.6 | 79 | 62.95 | 36.89 | 1521<br>323 |
392
+ | llava-1.5-7b (random 50%) | 67.31 | 55.6 | 76.89 | 61.01 | 34.67 | 1421<br>286 |
393
+ | **llava-1.5-7b (our 50%)** | **70.24** *(+2.93)* | **57.12** *(+1.52)* | **78.32** *(+1.43)* | **62.14** *(+1.13)* | **37.11** *(+2.44)* | **1476** *(+55)*<br>**338** *(+52)* |
394
+
395
+
396
+ `传送门`:[应用文档示例](paddlemix/datacopilot/example/pp_inscaptagger/readme.md)。
397
+ </details>
398
+
399
+
400
+ ## 🤔FAQ
401
+ 关于我们项目的一些常见问题解答,请参考[FAQ](docs/FAQ.md)。如果您的问题没有得到解答,请随时在[Issues](https://github.com/PaddlePaddle/PaddleMIX/issues)中提出
402
+
403
+
404
+ ## 📝许可证书
405
+
406
+ 本项目的发布受[Apache 2.0 license](LICENSE)许可认证。
407
+
408
+ ## 📌社区交流
409
+
410
+ - 微信扫描二维码并填写问卷,即可加入交流群与众多社区开发者以及官方团队深度交流。
411
+ <div align="center">
412
+ <img src="https://github.com/user-attachments/assets/ecf292da-9ac6-41cb-84b6-df726ef4522d" width="300" height="300" />
413
+ </div>
PaddleMIX/paddlemix_applications.md ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🎨 飞桨PaddleMIX项目分类汇总
2
+
3
+ ## 🎯 一、多模态创作类
4
+
5
+ 1. [PaddleMIX开学季海报生成](https://aistudio.baidu.com/projectdetail/8240734)
6
+ - 🌇 类型:文生图
7
+ - 🎓 功能:开学季和教师节主题海报生成
8
+ - 🔥 热度:🔥🔥🔥
9
+
10
+ 2. [PaddleMIX黑神话悟空海报生成](https://aistudio.baidu.com/projectdetail/8555550)
11
+ - 🎨 类型:文生图
12
+ - 🎮 功能:黑神话悟空主题海报生成
13
+ - 🔥 热度:🔥🔥🔥
14
+
15
+ 3. [AI智能节日海报](https://aistudio.baidu.com/projectdetail/8293232)
16
+ - 🎨 类型:文生图
17
+ - 🎉 功能:文心大模型与PaddleMIX驱动的AI智能海报生成
18
+ - 🔥 热度:🔥🔥
19
+
20
+ 4. [AI建筑设计助手](https://aistudio.baidu.com/projectdetail/8288584)
21
+ - 🏗️ 类型:图像渲染
22
+ - 🎯 功能:建筑设计图快速渲染解决方案
23
+ - 🔥 热度:🔥🔥
24
+
25
+ 5. [PaddleMIX教师节文图生成器](https://aistudio.baidu.com/projectdetail/8530911)
26
+ - 📚 类型:文生图
27
+ - 🎓 功能:教师节主题创意生成
28
+ - 🔥 热度:🔥🔥
29
+
30
+ 6. [文本+音乐生成图片](https://aistudio.baidu.com/projectdetail/8314111)
31
+ - 🎵 类型:多模态生成
32
+ - 🖼️ 功能:基于文本和音乐的图像生成
33
+ - 🔥 热度:🔥🔥
34
+
35
+ 7. [个性化GIF生成](https://aistudio.baidu.com/projectdetail/8321341)
36
+ - 🌠 类型:文生图
37
+ - 🍡 功能:基于Hotshot-XL模型的GIF生成
38
+ - 🔥 热度:🔥🔥
39
+
40
+ 8. [绿心智绘:环保创意海报](https://aistudio.baidu.com/projectdetail/8362605)
41
+ - 🌻 类型:文生图
42
+ - 🍀 功能:环保创意海报生成
43
+ - 🔥 热度:🔥🔥
44
+
45
+ 9. [墨韵融生:多模态水墨创艺](https://aistudio.baidu.com/projectdetail/8515323)
46
+ - 🌉 类型:文生图
47
+ - 🎨 功能:水墨画作品生成
48
+ - 🔥 热度:🔥
49
+
50
+ 10. [PaddleMIX的PPT生成器](https://aistudio.baidu.com/projectdetail/8545186)
51
+ - 💻 类型:幻灯片生成
52
+ - 💡 功能:内容生成、图片设计和PPT文件渲染
53
+ - 🔥 热度:🔥
54
+
55
+ 11. [那兔海报](https://aistudio.baidu.com/projectdetail/8331103)
56
+ - 🐰 类型:文生图
57
+ - 🔅 功能:那兔海报生成
58
+ - 🔥 热度:🔥
59
+
60
+ ## 🎬 二、视频创作类
61
+ 12. [PaddleMIX AnimateAnyone模型应用](https://aistudio.baidu.com/projectdetail/8320311)
62
+ - 🎥 类型:图生视频
63
+ - 🎭 功能:依据图片创作视频
64
+ - 🔥 热度:🔥🔥
65
+
66
+ 13. [OpenSora模型应用](https://aistudio.baidu.com/projectdetail/8534189)
67
+ - 🎦 类型:文生视频/图生视频
68
+ - 🎪 功能:多模态视频生成
69
+ - 🔥 热度:🔥🔥
70
+
71
+ 14. [openSora和SVD视频渲染引擎](https://aistudio.baidu.com/projectdetail/8535627)
72
+ - 🎮 类型:图像转视频
73
+ - 🎨 功能:图像转换视频渲染
74
+ - 🔥 热度:🔥🔥
75
+
76
+ ## 🎵 三、音乐与音频类
77
+ 15. [PaddleMIX音乐快速生成器](https://aistudio.baidu.com/projectdetail/8556362)
78
+ - 🎧 类型:音乐生成
79
+ - 🎺 功能:基于文本的音乐生成
80
+ - 🔥 热度:🔥🔥🔥
81
+
82
+ 16. [PaddleMIX照片配音生成](https://aistudio.baidu.com/projectdetail/7454890)
83
+ - 🎸 类型:音乐生成
84
+ - 🎼 功能:照片的配音生成
85
+ - 🔥 热度:🔥🔥🔥
86
+
87
+ 17. [智能视频配乐](https://aistudio.baidu.com/projectdetail/8351571)
88
+ - 🎼 类型:音频生成
89
+ - 🎹 功能:视频氛围分析与背景音乐生成
90
+ - 🔥 热度:🔥🔥
91
+
92
+ 18. [国庆节音乐贺卡](https://aistudio.baidu.com/projectdetail/8536882)
93
+ - 🎸 类型:音乐生成
94
+ - 🎺 功能:国庆专属电子音乐贺卡
95
+ - 🔥 热度:🔥🔥
96
+
97
+ 19. [月圆之夜:音乐与中秋的邂逅](https://aistudio.baidu.com/projectdetail/8311291)
98
+ - 🌕 类型:音乐生成
99
+ - 🎶 功能:中秋节主题音乐创作
100
+ - 🔥 热度:🔥🔥
101
+
102
+ 20. [音画交响:图片配乐新纪元](https://aistudio.baidu.com/projectdetail/8398422)
103
+ - 🎻 类型:音乐生成
104
+ - 🎺 功能:照片的配乐生成
105
+ - 🔥 热度:🔥
106
+
107
+ 21. [国庆庆典:音乐与国庆辉煌交响](https://aistudio.baidu.com/projectdetail/8356701)
108
+ - 🎹 类型:音乐生成
109
+ - 🎺 功能:基于文本和照片的配乐生成
110
+ - 🔥 热度:🔥
111
+
112
+ ## 🎊 四、节日主题创作
113
+ 22. [国庆写真生成器](https://aistudio.baidu.com/projectdetail/8334575)
114
+ - 🎀 类型:图像生成
115
+ - 👗 功能:专属国庆节写真生成
116
+ - 🔥 热度:🔥🔥🔥
117
+
118
+ 23. [中秋民俗舞火龙](https://aistudio.baidu.com/projectdetail/8318522)
119
+ - 🐉 类型:图像生成
120
+ - 🔥 功能:舞火龙庆典图绘制
121
+ - 🔥 热度:🔥🔥🔥
122
+
123
+ 24. [兔绘爱国情:童心海报梦工厂](https://aistudio.baidu.com/projectdetail/8288021)
124
+ - 🐰 类型:图像生成
125
+ - 🎨 功能:爱国主题童趣海报
126
+ - 🔥 热度:🔥🔥
127
+
128
+ 25. [创艺·幻绘月饼艺术图](https://aistudio.baidu.com/projectdetail/8540279)
129
+ - 🥮 类型:图像生成
130
+ - 🎨 功能:月饼艺术创作
131
+ - 🔥 热度:🔥🔥
132
+
133
+ 26. [PaddleMIX中秋贺卡生成器](https://aistudio.baidu.com/projectdetail/8541174)
134
+ - 🌙 类型:图像生成
135
+ - ✉️ 功能:中秋节贺卡定制
136
+ - 🔥 热度:🔥🔥
137
+
138
+ 27. [国庆英雄颂](https://aistudio.baidu.com/projectdetail/8352450)
139
+ - 🇨🇳 类型:多模态创作
140
+ - 📣 功能:历史的英雄故事以生动的声音和视觉形式呈现
141
+ - 🔥 热度:🔥🔥
142
+
143
+ 28. [国庆烟火绘梦](https://aistudio.baidu.com/projectdetail/8352778)
144
+ - 🎆 类型:文生图
145
+ - 🎇 功能:数字烟花的生成
146
+ - 🔥 热度:🔥🔥
147
+
148
+ 29. [绘梦国庆-缤纷国庆贺卡](https://aistudio.baidu.com/projectdetail/8542328)
149
+ - 🎉 类型:图像生成
150
+ - 🎊 功能:个性化国庆贺卡创作
151
+ - 🔥 热度:🔥
152
+
153
+ 30. [感恩有你-感恩节海报设计](https://aistudio.baidu.com/projectdetail/8500805)
154
+ - 🍔 类型:图像生成
155
+ - 🍖 功能:感恩节海报生成
156
+ - 🔥 热度:🔥
157
+
158
+ 31. [魔境创想-万圣节贺卡生成](https://aistudio.baidu.com/projectdetail/8500805)
159
+ - 🎃 类型:图像生成
160
+ - 👻 功能:个性化的万圣节贺卡生成
161
+ - 🔥 热度:🔥
162
+
163
+ ## 🤖 五、智能助手类
164
+ 32. [炉石传说卡牌设计助手](https://aistudio.baidu.com/projectdetail/8543362)
165
+ - 🎮 类型:多模态设计
166
+ - 🃏 功能:基于ErnieSDK的游戏卡牌设计
167
+ - 🔥 热度:🔥🔥
168
+
169
+ 33. [智能宠物识别与养护助手](https://aistudio.baidu.com/projectdetail/8299163)
170
+ - 🐾 类型:图像识别/分析
171
+ - 🐱 功能:基于QwenVL的宠物护理
172
+ - 🔥 热度:🔥🔥
173
+
174
+ ## 💡 六、创新应用类
175
+ 34. [多模态数字艺术创生](https://aistudio.baidu.com/projectdetail/8382636)
176
+ - 🎨 类型:艺术创作
177
+ - 🖼️ 功能:数字艺术创作与鉴赏
178
+ - 🔥 热度:🔥🔥
179
+
180
+ 35. [教学辅助多模态图文生成](https://aistudio.baidu.com/projectdetail/8372762)
181
+ - 📚 类型:教育应用
182
+ - 🎓 功能:教学辅助创作工具
183
+ - 🔥 热度:🔥🔥
184
+
185
+ 36. ["海参纪念币"文创设计](https://aistudio.baidu.com/projectdetail/8386127)
186
+ - 💰 类型:文创设计
187
+ - 🎨 功能:纪念币艺术设计
188
+ - 🔥 热度:🔥🔥
189
+
190
+ 37. [艺术风格迁移](https://aistudio.baidu.com/projectdetail/8549441)
191
+ - 🎨 类型:风格迁移
192
+ - 🖼️ 功能:多模态艺术风格迁移
193
+ - 🔥 热度:🔥🔥
194
+
195
+ 38. [爱国卡通角色故事创作](https://aistudio.baidu.com/projectdetail/8356273)
196
+ - 📚 类型:多模态创作
197
+ - 🎭 功能:基于ERNIE SDK的角色故事创作
198
+ - 🔥 热度:🔥
199
+
200
+ 39. [云顶天宫:盗墓笔记场景重现](https://aistudio.baidu.com/projectdetail/8486711)
201
+ - 📚 类型:多模态创作
202
+ - 🎭 功能:将小说中场景深度还原
203
+ - 🔥 热度:🔥
204
+
205
+ 40. [绘梦长空·风筝韵事](https://aistudio.baidu.com/projectdetail/8404865)
206
+ - 🎉 类型:图像理解
207
+ - 📚 功能:讲述中国传统风筝背后的动人故事与制作工艺
208
+ - 🔥 热度:🔥
209
+
210
+ ## 七、其他
211
+ 41. [基于PaddleMIX的数据集行为标签分类器训推实例](https://aistudio.baidu.com/projectdetail/7917712)
212
+ - 🎓 类型:图像理解
213
+ - 🌋 功能:基于LLaVA的行为分类
214
+ - 🔥 热度:🔥🔥🔥
215
+
216
+ 42. [PaddleMIX入门AIGC应用](https://aistudio.baidu.com/projectdetail/7583868)
217
+ - 📟 类型:多模态生成
218
+ - 📷 功能:图像生成、音频生成、图像变换、图像超分辨率
219
+ - 🔥 热度:🔥🔥
220
+
221
+ 43. [PaddleMIX DiT高性能推理实战](https://aistudio.baidu.com/projectdetail/8261962)
222
+ - 🎨 类型:图像生成
223
+ - 📚 功能:基于DiT结构的图像生成
224
+ - 🔥 热度:🔥🔥
225
+
226
+ 44. [PaddleMIX玩转Stable Diffusion 3](https://aistudio.baidu.com/projectdetail/8494762)
227
+ - 🔔 类型:图像生成
228
+ - 🎮 功能:基于Stable Diffusion 3的图像生成
229
+ - 🔥 热度:🔥🔥
230
+
231
+ 45. [基于PaddleMIX的数据集行为标签分类器训推实例](https://aistudio.baidu.com/projectdetail/8229498)
232
+ - 🎈 类型:图像理解
233
+ - 💎 功能:LLaVA模型训练与使用
234
+ - 🔥 热度:🔥🔥
235
+
236
+ 46. [PaddleMIX入门视频生成](https://aistudio.baidu.com/projectdetail/8221041)
237
+ - 🎥 类型:视频生成
238
+ - 📺 功能:基于SVD和OpenSora的视频生成
239
+ - 🔥 热度:🔥
240
+
241
+ 47. [基于PaddleMIX的高速文生图推理](https://aistudio.baidu.com/projectdetail/8232803)
242
+ - 🎓 类型:图像生成
243
+ - 🎨 功能:LCM模型的快速图像生成
244
+ - 🔥 热度:🔥
VILA/Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvcr.io/nvidia/pytorch:24.06-py3
2
+
3
+ WORKDIR /app
4
+
5
+ RUN curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -o ~/miniconda.sh \
6
+ && sh ~/miniconda.sh -b -p /opt/conda \
7
+ && rm ~/miniconda.sh
8
+
9
+ ENV PATH /opt/conda/bin:$PATH
10
+ COPY pyproject.toml pyproject.toml
11
+ COPY llava llava
12
+
13
+ COPY environment_setup.sh environment_setup.sh
14
+ RUN bash environment_setup.sh vila
15
+
16
+
17
+ COPY server.py server.py
18
+ CMD ["conda", "run", "-n", "vila", "--no-capture-output", "python", "-u", "-W", "ignore", "server.py"]
VILA/server.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import base64
3
+ import os
4
+ import re
5
+ import time
6
+ import uuid
7
+ from contextlib import asynccontextmanager
8
+ from io import BytesIO
9
+ from typing import List, Literal, Optional, Union, get_args
10
+
11
+ import requests
12
+ import torch
13
+ import uvicorn
14
+ from fastapi import FastAPI
15
+ from fastapi.responses import JSONResponse
16
+ from PIL import Image as PILImage
17
+ from PIL.Image import Image
18
+ from pydantic import BaseModel
19
+
20
+ from llava.constants import (
21
+ DEFAULT_IM_END_TOKEN,
22
+ DEFAULT_IM_START_TOKEN,
23
+ DEFAULT_IMAGE_TOKEN,
24
+ IMAGE_PLACEHOLDER,
25
+ IMAGE_TOKEN_INDEX,
26
+ )
27
+ from llava.conversation import SeparatorStyle, conv_templates
28
+ from llava.mm_utils import KeywordsStoppingCriteria, get_model_name_from_path, process_images, tokenizer_image_token
29
+ from llava.model.builder import load_pretrained_model
30
+ from llava.utils import disable_torch_init
31
+
32
+
33
+ class TextContent(BaseModel):
34
+ type: Literal["text"]
35
+ text: str
36
+
37
+
38
+ class ImageURL(BaseModel):
39
+ url: str
40
+
41
+
42
+ class ImageContent(BaseModel):
43
+ type: Literal["image_url"]
44
+ image_url: ImageURL
45
+
46
+
47
+ IMAGE_CONTENT_BASE64_REGEX = re.compile(r"^data:image/(png|jpe?g);base64,(.*)$")
48
+
49
+
50
+ class ChatMessage(BaseModel):
51
+ role: Literal["user", "assistant"]
52
+ content: Union[str, List[Union[TextContent, ImageContent]]]
53
+
54
+
55
+ class ChatCompletionRequest(BaseModel):
56
+ model: Literal[
57
+ "VILA1.5-3B",
58
+ "VILA1.5-3B-AWQ",
59
+ "VILA1.5-3B-S2",
60
+ "VILA1.5-3B-S2-AWQ",
61
+ "Llama-3-VILA1.5-8B",
62
+ "Llama-3-VILA1.5-8B-AWQ",
63
+ "VILA1.5-13B",
64
+ "VILA1.5-13B-AWQ",
65
+ "VILA1.5-40B",
66
+ "VILA1.5-40B-AWQ",
67
+ ]
68
+ messages: List[ChatMessage]
69
+ max_tokens: Optional[int] = 512
70
+ top_p: Optional[float] = 0.9
71
+ temperature: Optional[float] = 0.2
72
+ stream: Optional[bool] = False
73
+ use_cache: Optional[bool] = True
74
+ num_beams: Optional[int] = 1
75
+
76
+
77
+ model = None
78
+ model_name = None
79
+ tokenizer = None
80
+ image_processor = None
81
+ context_len = None
82
+
83
+
84
+ def load_image(image_url: str) -> Image:
85
+ if image_url.startswith("http") or image_url.startswith("https"):
86
+ response = requests.get(image_url)
87
+ image = PILImage.open(BytesIO(response.content)).convert("RGB")
88
+ else:
89
+ match_results = IMAGE_CONTENT_BASE64_REGEX.match(image_url)
90
+ if match_results is None:
91
+ raise ValueError(f"Invalid image url: {image_url}")
92
+ image_base64 = match_results.groups()[1]
93
+ image = PILImage.open(BytesIO(base64.b64decode(image_base64))).convert("RGB")
94
+ return image
95
+
96
+
97
+ def get_literal_values(cls, field_name: str):
98
+ field_type = cls.__annotations__.get(field_name)
99
+ if field_type is None:
100
+ raise ValueError(f"{field_name} is not a valid field name")
101
+ if hasattr(field_type, "__origin__") and field_type.__origin__ is Literal:
102
+ return get_args(field_type)
103
+ raise ValueError(f"{field_name} is not a Literal type")
104
+
105
+
106
+ VILA_MODELS = get_literal_values(ChatCompletionRequest, "model")
107
+
108
+
109
+ def normalize_image_tags(qs: str) -> str:
110
+ image_token_se = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
111
+ if IMAGE_PLACEHOLDER in qs:
112
+ if model.config.mm_use_im_start_end:
113
+ qs = re.sub(IMAGE_PLACEHOLDER, image_token_se, qs)
114
+ else:
115
+ qs = re.sub(IMAGE_PLACEHOLDER, DEFAULT_IMAGE_TOKEN, qs)
116
+
117
+ if DEFAULT_IMAGE_TOKEN not in qs:
118
+ print("No image was found in input messages. Continuing with text only prompt.")
119
+ return qs
120
+
121
+
122
+ @asynccontextmanager
123
+ async def lifespan(app: FastAPI):
124
+ global model, model_name, tokenizer, image_processor, context_len
125
+ disable_torch_init()
126
+ model_path = app.args.model_path
127
+ model_name = get_model_name_from_path(model_path)
128
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, model_name, None)
129
+ print(f"Model {model_name} loaded successfully. Context length: {context_len}")
130
+ yield
131
+
132
+
133
+ app = FastAPI(lifespan=lifespan)
134
+
135
+
136
+ # Load model upon startup
137
+ @app.post("/chat/completions")
138
+ async def chat_completions(request: ChatCompletionRequest):
139
+ try:
140
+ global model, tokenizer, image_processor, context_len
141
+
142
+ if request.model != model_name:
143
+ raise ValueError(
144
+ f"The endpoint is configured to use the model {model_name}, "
145
+ f"but the request model is {request.model}"
146
+ )
147
+ max_tokens = request.max_tokens
148
+ temperature = request.temperature
149
+ top_p = request.top_p
150
+ use_cache = request.use_cache
151
+ num_beams = request.num_beams
152
+
153
+ messages = request.messages
154
+ conv_mode = app.args.conv_mode
155
+
156
+ images = []
157
+
158
+ conv = conv_templates[conv_mode].copy()
159
+ user_role = conv.roles[0]
160
+ assistant_role = conv.roles[1]
161
+
162
+ for message in messages:
163
+ if message.role == "user":
164
+ prompt = ""
165
+
166
+ if isinstance(message.content, str):
167
+ prompt += message.content
168
+ if isinstance(message.content, list):
169
+ for content in message.content:
170
+ if content.type == "text":
171
+ prompt += content.text
172
+ if content.type == "image_url":
173
+ image = load_image(content.image_url.url)
174
+ images.append(image)
175
+ prompt += IMAGE_PLACEHOLDER
176
+
177
+ normalized_prompt = normalize_image_tags(prompt)
178
+ conv.append_message(user_role, normalized_prompt)
179
+ if message.role == "assistant":
180
+ prompt = message.content
181
+ conv.append_message(assistant_role, prompt)
182
+
183
+ prompt_text = conv.get_prompt()
184
+ print("Prompt input: ", prompt_text)
185
+
186
+ # support generation with text only inputs
187
+ if len(images) == 0:
188
+ images_input = None
189
+ else:
190
+ images_tensor = process_images(images, image_processor, model.config).to(model.device, dtype=torch.float16)
191
+ images_input = [images_tensor]
192
+
193
+ input_ids = (
194
+ tokenizer_image_token(prompt_text, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt")
195
+ .unsqueeze(0)
196
+ .to(model.device)
197
+ )
198
+
199
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
200
+ keywords = [stop_str]
201
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
202
+
203
+ with torch.inference_mode():
204
+ output_ids = model.generate(
205
+ input_ids,
206
+ images=images_input,
207
+ do_sample=True if temperature > 0 else False,
208
+ temperature=temperature,
209
+ top_p=top_p,
210
+ num_beams=num_beams,
211
+ max_new_tokens=max_tokens,
212
+ use_cache=use_cache,
213
+ stopping_criteria=[stopping_criteria],
214
+ )
215
+
216
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
217
+ outputs = outputs.strip()
218
+ if outputs.endswith(stop_str):
219
+ outputs = outputs[: -len(stop_str)]
220
+ outputs = outputs.strip()
221
+ print("\nAssistant: ", outputs)
222
+
223
+ resp_content = [TextContent(type="text", text=outputs)]
224
+ return {
225
+ "id": uuid.uuid4().hex,
226
+ "object": "chat.completion",
227
+ "created": time.time(),
228
+ "model": request.model,
229
+ "choices": [{"message": ChatMessage(role="assistant", content=resp_content)}],
230
+ }
231
+ except Exception as e:
232
+ return JSONResponse(
233
+ status_code=500,
234
+ content={"error": str(e)},
235
+ )
236
+
237
+
238
+ if __name__ == "__main__":
239
+
240
+ host = os.getenv("VILA_HOST", "0.0.0.0")
241
+ port = os.getenv("VILA_PORT", 8000)
242
+ model_path = os.getenv("VILA_MODEL_PATH", "Efficient-Large-Model/VILA1.5-3B")
243
+ conv_mode = os.getenv("VILA_CONV_MODE", "vicuna_v1")
244
+ workers = os.getenv("VILA_WORKERS", 1)
245
+
246
+ parser = argparse.ArgumentParser()
247
+ parser.add_argument("--host", type=str, default=host)
248
+ parser.add_argument("--port", type=int, default=port)
249
+ parser.add_argument("--model-path", type=str, default=model_path)
250
+ parser.add_argument("--conv-mode", type=str, default=conv_mode)
251
+ parser.add_argument("--workers", type=int, default=workers)
252
+ app.args = parser.parse_args()
253
+
254
+ uvicorn.run(app, host=host, port=port, workers=workers)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/commands/env.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import platform
16
+ from argparse import ArgumentParser
17
+
18
+ from ..utils import is_paddle_available, is_paddlenlp_available, is_ppxformers_available
19
+ from ..version import VERSION as version
20
+ from . import BasePPDiffusersCLICommand
21
+
22
+
23
+ def info_command_factory(_):
24
+ return EnvironmentCommand()
25
+
26
+
27
+ class EnvironmentCommand(BasePPDiffusersCLICommand):
28
+ @staticmethod
29
+ def register_subcommand(parser: ArgumentParser):
30
+ download_parser = parser.add_parser("env")
31
+ download_parser.set_defaults(func=info_command_factory)
32
+
33
+ def run(self):
34
+ import huggingface_hub
35
+
36
+ hub_version = huggingface_hub.__version__
37
+
38
+ pd_version = "not installed"
39
+ pd_cuda_available = "NA"
40
+ if is_paddle_available():
41
+ import paddle
42
+
43
+ pd_version = paddle.__version__
44
+ pd_cuda_available = paddle.device.is_compiled_with_cuda()
45
+
46
+ paddlenlp_version = "not installed"
47
+ if is_paddlenlp_available:
48
+ import paddlenlp
49
+
50
+ paddlenlp_version = paddlenlp.__version__
51
+
52
+ from ppdiffusers.accelerate import __version__ as accelerate_version
53
+ from ppdiffusers.peft import __version__ as peft_version
54
+
55
+ xformers_commit_id = "not installed"
56
+ if is_ppxformers_available():
57
+ import paddle
58
+
59
+ xformers_commit_id = paddle.__git_commit__
60
+
61
+ info = {
62
+ "`ppdiffusers` version": version,
63
+ "Platform": platform.platform(),
64
+ "Python version": platform.python_version(),
65
+ "Paddle version (GPU?)": f"{pd_version} ({pd_cuda_available})",
66
+ "Huggingface_hub version": hub_version,
67
+ "PaddleNLP version": paddlenlp_version,
68
+ "PP-Accelerate version": accelerate_version,
69
+ "PP-Peft version": peft_version,
70
+ "PP-xFormers commit id": xformers_commit_id,
71
+ "Using GPU in script?": "<fill in>",
72
+ "Using distributed or parallel set-up in script?": "<fill in>",
73
+ }
74
+
75
+ print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
76
+ print(self.format_dict(info))
77
+
78
+ return info
79
+
80
+ @staticmethod
81
+ def format_dict(d):
82
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/experimental/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # 🧨 Diffusers Experimental
2
+
3
+ We are adding experimental code to support novel applications and usages of the Diffusers library.
4
+ Currently, the following experiments are supported:
5
+ * Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model.
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/experimental/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .rl import ValueGuidedRLPipeline
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import PPDIFFUSERS_SLOW_IMPORT, _LazyModule
18
+ from ..utils.import_utils import is_paddle_available, is_paddlenlp_available
19
+
20
+ _import_structure = {}
21
+
22
+ if is_paddle_available():
23
+ _import_structure["single_file"] = ["FromOriginalControlnetMixin", "FromOriginalVAEMixin"]
24
+ _import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
25
+ _import_structure["utils"] = ["AttnProcsLayers"]
26
+
27
+ if is_paddlenlp_available():
28
+ _import_structure["single_file"].extend(["FromSingleFileMixin", "FromCkptMixin"])
29
+ _import_structure["lora"] = ["LoraLoaderMixin", "StableDiffusionXLLoraLoaderMixin", "SD3LoraLoaderMixin"]
30
+ _import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
31
+ _import_structure["ip_adapter"] = ["IPAdapterMixin"]
32
+ # NOTE: this will removed in the future
33
+ _import_structure["deprecate"] = ["text_encoder_lora_state_dict", "text_encoder_attn_modules"]
34
+
35
+ if TYPE_CHECKING or PPDIFFUSERS_SLOW_IMPORT:
36
+ if is_paddle_available():
37
+ from .single_file import FromOriginalControlnetMixin, FromOriginalVAEMixin
38
+ from .unet import UNet2DConditionLoadersMixin
39
+ from .utils import AttnProcsLayers
40
+
41
+ if is_paddlenlp_available():
42
+ # NOTE: this will removed in the future
43
+ from .deprecate import (
44
+ text_encoder_attn_modules,
45
+ text_encoder_lora_state_dict,
46
+ )
47
+ from .ip_adapter import IPAdapterMixin
48
+ from .lora import LoraLoaderMixin, SD3LoraLoaderMixin, StableDiffusionXLLoraLoaderMixin
49
+ from .single_file import FromCkptMixin, FromSingleFileMixin
50
+ from .textual_inversion import TextualInversionLoaderMixin
51
+ else:
52
+ import sys
53
+
54
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/deprecate.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from ..utils import deprecate
16
+ from ..utils.import_utils import is_paddlenlp_available
17
+
18
+
19
+ def text_encoder_lora_state_dict(text_encoder):
20
+ deprecate(
21
+ "text_encoder_load_state_dict in `models`",
22
+ "0.45.0",
23
+ "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
24
+ )
25
+ state_dict = {}
26
+
27
+ for name, module in text_encoder_attn_modules(text_encoder):
28
+ for k, v in module.q_proj.lora_linear_layer.state_dict().items():
29
+ state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
30
+
31
+ for k, v in module.k_proj.lora_linear_layer.state_dict().items():
32
+ state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
33
+
34
+ for k, v in module.v_proj.lora_linear_layer.state_dict().items():
35
+ state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
36
+
37
+ for k, v in module.out_proj.lora_linear_layer.state_dict().items():
38
+ state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
39
+
40
+ return state_dict
41
+
42
+
43
+ if is_paddlenlp_available():
44
+
45
+ def text_encoder_attn_modules(text_encoder):
46
+ deprecate(
47
+ "text_encoder_attn_modules in `models`",
48
+ "0.45.0",
49
+ "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
50
+ )
51
+ from ppdiffusers.transformers import CLIPTextModel, CLIPTextModelWithProjection
52
+
53
+ attn_modules = []
54
+
55
+ if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
56
+ for i, layer in enumerate(text_encoder.text_model.encoder.layers):
57
+ name = f"text_model.encoder.layers.{i}.self_attn"
58
+ mod = layer.self_attn
59
+ attn_modules.append((name, mod))
60
+ else:
61
+ raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
62
+
63
+ return attn_modules
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/ip_adapter.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from typing import Dict, Union
16
+
17
+ import paddle
18
+ from safetensors import safe_open
19
+
20
+ from ..utils import (
21
+ DIFFUSERS_CACHE,
22
+ FROM_AISTUDIO,
23
+ FROM_DIFFUSERS,
24
+ FROM_HF_HUB,
25
+ HF_HUB_OFFLINE,
26
+ PPDIFFUSERS_CACHE,
27
+ _get_model_file,
28
+ is_paddlenlp_available,
29
+ logging,
30
+ smart_load,
31
+ )
32
+
33
+ if is_paddlenlp_available():
34
+ from ppdiffusers.transformers import (
35
+ CLIPImageProcessor,
36
+ CLIPVisionModelWithProjection,
37
+ )
38
+
39
+ from ..models.attention_processor import (
40
+ IPAdapterAttnProcessor,
41
+ IPAdapterAttnProcessor2_5,
42
+ )
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class IPAdapterMixin:
48
+ """Mixin for handling IP Adapters."""
49
+
50
+ def load_ip_adapter(
51
+ self,
52
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]],
53
+ subfolder: str = "",
54
+ weight_name: str = None,
55
+ **kwargs,
56
+ ):
57
+ """
58
+ Parameters:
59
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
60
+ Can be either:
61
+
62
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
63
+ the Hub.
64
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
65
+ with [`ModelMixin.save_pretrained`].
66
+ - A [torch state
67
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
68
+
69
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
70
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
71
+ is not used.
72
+ force_download (`bool`, *optional*, defaults to `False`):
73
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
74
+ cached versions if they exist.
75
+ resume_download (`bool`, *optional*, defaults to `False`):
76
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
77
+ incompletely downloaded files are deleted.
78
+ proxies (`Dict[str, str]`, *optional*):
79
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
80
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
81
+ local_files_only (`bool`, *optional*, defaults to `False`):
82
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
83
+ won't be downloaded from the Hub.
84
+ use_auth_token (`str` or *bool*, *optional*):
85
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
86
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
87
+ revision (`str`, *optional*, defaults to `"main"`):
88
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
89
+ allowed by Git.
90
+ subfolder (`str`, *optional*, defaults to `""`):
91
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
92
+ """
93
+
94
+ # Load the main state dict first.
95
+ from_hf_hub = kwargs.pop("from_hf_hub", FROM_HF_HUB)
96
+ from_aistudio = kwargs.pop("from_aistudio", FROM_AISTUDIO)
97
+ cache_dir = kwargs.pop("cache_dir", None)
98
+ if cache_dir is None:
99
+ if from_aistudio:
100
+ cache_dir = None # TODO, check aistudio cache
101
+ elif from_hf_hub:
102
+ cache_dir = DIFFUSERS_CACHE
103
+ else:
104
+ cache_dir = PPDIFFUSERS_CACHE
105
+ from_diffusers = kwargs.pop("from_diffusers", FROM_DIFFUSERS)
106
+ force_download = kwargs.pop("force_download", False)
107
+ resume_download = kwargs.pop("resume_download", False)
108
+ proxies = kwargs.pop("proxies", None)
109
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
110
+ use_auth_token = kwargs.pop("use_auth_token", None)
111
+ revision = kwargs.pop("revision", None)
112
+ if subfolder is None:
113
+ subfolder = ""
114
+ user_agent = {
115
+ "file_type": "attn_procs_weights",
116
+ "framework": "pytorch" if from_diffusers else "paddle",
117
+ }
118
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
119
+ model_file = _get_model_file(
120
+ pretrained_model_name_or_path_or_dict,
121
+ weights_name=weight_name,
122
+ cache_dir=cache_dir,
123
+ force_download=force_download,
124
+ resume_download=resume_download,
125
+ proxies=proxies,
126
+ local_files_only=local_files_only,
127
+ use_auth_token=use_auth_token,
128
+ revision=revision,
129
+ subfolder=subfolder,
130
+ user_agent=user_agent,
131
+ from_hf_hub=from_hf_hub,
132
+ from_aistudio=from_aistudio,
133
+ )
134
+ if weight_name.endswith(".safetensors"):
135
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
136
+ with safe_open(model_file, framework="np") as f:
137
+ metadata = f.metadata()
138
+ if metadata is None:
139
+ metadata = {}
140
+ if metadata.get("format", "pt") not in ["pt", "pd", "np"]:
141
+ raise OSError(
142
+ f"The safetensors archive passed at {model_file} does not contain the valid metadata. Make sure "
143
+ "you save your model with the `save_pretrained` method."
144
+ )
145
+ data_format = metadata.get("format", "pt")
146
+ if data_format == "pt" and not from_diffusers:
147
+ logger.warning(
148
+ "Detect the weight is in diffusers format, but currently, `from_diffusers` is set to `False`. To proceed, we will change the value of `from_diffusers` to `True`!"
149
+ )
150
+ from_diffusers = True
151
+ for key in f.keys():
152
+ if key.startswith("image_proj."):
153
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
154
+ elif key.startswith("ip_adapter."):
155
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
156
+ else:
157
+ state_dict = smart_load(model_file, return_numpy=True, return_is_torch_weight=True)
158
+ is_torch_weight = state_dict.pop("is_torch_weight", False)
159
+ if not from_diffusers and is_torch_weight:
160
+ logger.warning(
161
+ "Detect the weight is in diffusers format, but currently, `from_diffusers` is set to `False`. To proceed, we will change the value of `from_diffusers` to `True`!"
162
+ )
163
+ from_diffusers = True
164
+ else:
165
+ state_dict = pretrained_model_name_or_path_or_dict
166
+
167
+ keys = list(state_dict.keys())
168
+ if sorted(keys) != ["image_proj", "ip_adapter"]:
169
+ raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
170
+
171
+ # load CLIP image encoer here if it has not been registered to the pipeline yet
172
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
173
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
174
+ logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
175
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
176
+ pretrained_model_name_or_path_or_dict,
177
+ subfolder=os.path.join(subfolder, "image_encoder"),
178
+ from_hf_hub=from_hf_hub,
179
+ from_aistudio=from_aistudio,
180
+ # from_diffusers=from_diffusers, # we must disable this !
181
+ ).to(dtype=self.dtype)
182
+ self.image_encoder = image_encoder
183
+ else:
184
+ raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
185
+
186
+ # create feature extractor if it has not been registered to the pipeline yet
187
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
188
+ self.feature_extractor = CLIPImageProcessor()
189
+
190
+ # load ip-adapter into unet
191
+ self.unet._load_ip_adapter_weights(state_dict, from_diffusers=from_diffusers)
192
+
193
+ def set_ip_adapter_scale(self, scale):
194
+ for attn_processor in self.unet.attn_processors.values():
195
+ if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_5)):
196
+ attn_processor.scale = scale
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/lora.py ADDED
@@ -0,0 +1,1871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from contextlib import nullcontext
16
+ from functools import partial
17
+ from typing import Callable, Dict, List, Optional, Union
18
+ from pathlib import Path
19
+
20
+ import numpy as np
21
+ import paddle
22
+ from huggingface_hub import model_info
23
+ from packaging import version
24
+ from paddle import nn
25
+
26
+ from ..models.modeling_pytorch_paddle_utils import (
27
+ convert_paddle_state_dict_to_pytorch,
28
+ convert_pytorch_state_dict_to_paddle,
29
+ )
30
+ from ..models.modeling_utils import faster_set_state_dict
31
+ from ..utils import (
32
+ DIFFUSERS_CACHE,
33
+ FROM_AISTUDIO,
34
+ FROM_DIFFUSERS,
35
+ FROM_HF_HUB,
36
+ HF_HUB_OFFLINE,
37
+ LOW_CPU_MEM_USAGE_DEFAULT,
38
+ PPDIFFUSERS_CACHE,
39
+ TO_DIFFUSERS,
40
+ USE_PEFT_BACKEND,
41
+ _get_model_file,
42
+ convert_state_dict_to_peft,
43
+ convert_state_dict_to_ppdiffusers,
44
+ convert_unet_state_dict_to_peft,
45
+ delete_adapter_layers,
46
+ deprecate,
47
+ get_adapter_name,
48
+ get_peft_kwargs,
49
+ is_paddlenlp_available,
50
+ is_safetensors_available,
51
+ is_torch_available,
52
+ logging,
53
+ recurse_remove_peft_layers,
54
+ scale_lora_layers,
55
+ set_adapter_layers,
56
+ set_weights_and_activate_adapters,
57
+ )
58
+ from ..version import VERSION as __version__
59
+ from .lora_conversion_utils import (
60
+ _convert_kohya_lora_to_diffusers,
61
+ _maybe_map_sgm_blocks_to_diffusers,
62
+ )
63
+
64
+ if is_safetensors_available():
65
+ from safetensors.numpy import save_file as np_safe_save_file
66
+
67
+ if is_torch_available():
68
+ from safetensors.torch import save_file as torch_safe_save_file
69
+
70
+ if is_torch_available():
71
+ import torch
72
+
73
+ if is_paddlenlp_available():
74
+ from paddlenlp.transformers import PretrainedModel
75
+
76
+ from ..models.lora import (
77
+ PatchedLoraProjection,
78
+ text_encoder_attn_modules,
79
+ text_encoder_mlp_modules,
80
+ )
81
+
82
+
83
+ logger = logging.get_logger(__name__)
84
+
85
+ TEXT_ENCODER_NAME = "text_encoder"
86
+ UNET_NAME = "unet"
87
+ TRANSFORMER_NAME = "transformer"
88
+
89
+ TORCH_LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
90
+ TORCH_LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
91
+
92
+ PADDLE_LORA_WEIGHT_NAME = "paddle_lora_weights.pdparams"
93
+ PADDLE_LORA_WEIGHT_NAME_SAFE = "paddle_lora_weights.safetensors"
94
+
95
+
96
+ LORA_DEPRECATION_MESSAGE = "You are using an old version of LoRA backend. This will be deprecated in the next releases in favor of PEFT make sure to install the latest PEFT and transformers packages in the future."
97
+
98
+
99
+ class LoraLoaderMixin:
100
+ r"""
101
+ Load LoRA layers into [`UNet2DConditionModel`] and
102
+ [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
103
+ """
104
+
105
+ text_encoder_name = TEXT_ENCODER_NAME
106
+ unet_name = UNET_NAME
107
+ num_fused_loras = 0
108
+
109
+ def load_lora_weights(
110
+ self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]], adapter_name=None, **kwargs
111
+ ):
112
+ """
113
+ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
114
+ `self.text_encoder`.
115
+
116
+ All kwargs are forwarded to `self.lora_state_dict`.
117
+
118
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
119
+
120
+ See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
121
+ `self.unet`.
122
+
123
+ See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
124
+ into `self.text_encoder`.
125
+
126
+ Parameters:
127
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
128
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
129
+ kwargs (`dict`, *optional*):
130
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
131
+ adapter_name (`str`, *optional*):
132
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
133
+ `default_{i}` where i is the total number of adapters being loaded.
134
+ """
135
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
136
+ state_dict, network_alphas, from_diffusers = self.lora_state_dict(
137
+ pretrained_model_name_or_path_or_dict, **kwargs
138
+ )
139
+
140
+ is_correct_format = all("lora" in key for key in state_dict.keys())
141
+ if not is_correct_format:
142
+ raise ValueError("Invalid LoRA checkpoint.")
143
+
144
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", LOW_CPU_MEM_USAGE_DEFAULT)
145
+
146
+ self.load_lora_into_unet(
147
+ state_dict,
148
+ network_alphas=network_alphas,
149
+ unet=getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet,
150
+ low_cpu_mem_usage=low_cpu_mem_usage,
151
+ adapter_name=adapter_name,
152
+ _pipeline=self,
153
+ from_diffusers=from_diffusers,
154
+ )
155
+ self.load_lora_into_text_encoder(
156
+ state_dict,
157
+ network_alphas=network_alphas,
158
+ text_encoder=getattr(self, self.text_encoder_name)
159
+ if not hasattr(self, "text_encoder")
160
+ else self.text_encoder,
161
+ lora_scale=self.lora_scale,
162
+ low_cpu_mem_usage=low_cpu_mem_usage,
163
+ adapter_name=adapter_name,
164
+ _pipeline=self,
165
+ from_diffusers=from_diffusers,
166
+ )
167
+
168
+ @classmethod
169
+ def lora_state_dict(
170
+ cls,
171
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]],
172
+ **kwargs,
173
+ ):
174
+ r"""
175
+ Return state dict for lora weights and the network alphas.
176
+
177
+ <Tip warning={true}>
178
+
179
+ We support loading A1111 formatted LoRA checkpoints in a limited capacity.
180
+
181
+ This function is experimental and might change in the future.
182
+
183
+ </Tip>
184
+
185
+ Parameters:
186
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
187
+ Can be either:
188
+
189
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
190
+ the Hub.
191
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
192
+ with [`ModelMixin.save_pretrained`].
193
+ - A [torch state
194
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
195
+
196
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
197
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
198
+ is not used.
199
+ force_download (`bool`, *optional*, defaults to `False`):
200
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
201
+ cached versions if they exist.
202
+ resume_download (`bool`, *optional*, defaults to `False`):
203
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
204
+ incompletely downloaded files are deleted.
205
+ proxies (`Dict[str, str]`, *optional*):
206
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
207
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
208
+ local_files_only (`bool`, *optional*, defaults to `False`):
209
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
210
+ won't be downloaded from the Hub.
211
+ use_auth_token (`str` or *bool*, *optional*):
212
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
213
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
214
+ revision (`str`, *optional*, defaults to `"main"`):
215
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
216
+ allowed by Git.
217
+ subfolder (`str`, *optional*, defaults to `""`):
218
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
219
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
220
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
221
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
222
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
223
+ argument to `True` will raise an error.
224
+ mirror (`str`, *optional*):
225
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
226
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
227
+ information.
228
+
229
+ """
230
+ # Load the main state dict first which has the LoRA layers for either of
231
+ # UNet and text encoder or both.
232
+ from_hf_hub = kwargs.pop("from_hf_hub", FROM_HF_HUB)
233
+ from_aistudio = kwargs.pop("from_aistudio", FROM_AISTUDIO)
234
+ cache_dir = kwargs.pop("cache_dir", None)
235
+ if cache_dir is None:
236
+ if from_aistudio:
237
+ cache_dir = None # TODO, check aistudio cache
238
+ elif from_hf_hub:
239
+ cache_dir = DIFFUSERS_CACHE
240
+ else:
241
+ cache_dir = PPDIFFUSERS_CACHE
242
+ from_diffusers = kwargs.pop("from_diffusers", FROM_DIFFUSERS)
243
+ force_download = kwargs.pop("force_download", False)
244
+ resume_download = kwargs.pop("resume_download", False)
245
+ proxies = kwargs.pop("proxies", None)
246
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
247
+ use_auth_token = kwargs.pop("use_auth_token", None)
248
+ revision = kwargs.pop("revision", None)
249
+ subfolder = kwargs.pop("subfolder", None)
250
+ weight_name = kwargs.pop("weight_name", None)
251
+ unet_config = kwargs.pop("unet_config", None)
252
+ use_safetensors = kwargs.pop("use_safetensors", None)
253
+
254
+ if use_safetensors is None:
255
+ use_safetensors = True
256
+
257
+ user_agent = {
258
+ "file_type": "attn_procs_weights",
259
+ "framework": "pytorch" if from_diffusers else "paddle",
260
+ }
261
+
262
+ model_file = None
263
+ state_dict = {}
264
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
265
+ # Let's first try to load .safetensors weights
266
+ if (use_safetensors and weight_name is None) or (
267
+ weight_name is not None and weight_name.endswith(".safetensors")
268
+ ):
269
+ # Here we're relaxing the loading check to enable more Inference API
270
+ # friendliness where sometimes, it's not at all possible to automatically
271
+ # determine `weight_name`.
272
+ # if weight_name is None:
273
+ # weight_name = cls._best_guess_weight_name(
274
+ # pretrained_model_name_or_path_or_dict, file_extension=".safetensors"
275
+ # )
276
+ try:
277
+ model_file = _get_model_file(
278
+ pretrained_model_name_or_path_or_dict,
279
+ weights_name=(weight_name or TORCH_LORA_WEIGHT_NAME_SAFE)
280
+ if from_diffusers
281
+ else ((weight_name or PADDLE_LORA_WEIGHT_NAME_SAFE)),
282
+ cache_dir=cache_dir,
283
+ force_download=force_download,
284
+ resume_download=resume_download,
285
+ proxies=proxies,
286
+ local_files_only=local_files_only,
287
+ use_auth_token=use_auth_token,
288
+ revision=revision,
289
+ subfolder=subfolder,
290
+ user_agent=user_agent,
291
+ from_aistudio=from_aistudio,
292
+ from_hf_hub=from_hf_hub,
293
+ )
294
+ except Exception:
295
+ model_file = None
296
+
297
+ if model_file is None:
298
+ # if weight_name is None:
299
+ # weight_name = cls._best_guess_weight_name(
300
+ # pretrained_model_name_or_path_or_dict, file_extension=".bin"
301
+ # )
302
+ if model_file is None:
303
+ model_file = _get_model_file(
304
+ pretrained_model_name_or_path_or_dict,
305
+ weights_name=(weight_name or TORCH_LORA_WEIGHT_NAME)
306
+ if from_diffusers
307
+ else ((weight_name or PADDLE_LORA_WEIGHT_NAME)),
308
+ cache_dir=cache_dir,
309
+ force_download=force_download,
310
+ resume_download=resume_download,
311
+ proxies=proxies,
312
+ local_files_only=local_files_only,
313
+ use_auth_token=use_auth_token,
314
+ revision=revision,
315
+ subfolder=subfolder,
316
+ user_agent=user_agent,
317
+ from_aistudio=from_aistudio,
318
+ from_hf_hub=from_hf_hub,
319
+ )
320
+
321
+ assert model_file is not None, "Could not find the model file!"
322
+ # TODO, check this
323
+ from ppdiffusers.utils import smart_load
324
+
325
+ state_dict = smart_load(model_file, return_is_torch_weight=True)
326
+ is_torch_weight = state_dict.pop("is_torch_weight", False)
327
+ if not from_diffusers and is_torch_weight:
328
+ logger.warning(
329
+ "Detect the weight is in diffusers format, but currently, `from_diffusers` is set to `False`. To proceed, we will change the value of `from_diffusers` to `True`!"
330
+ )
331
+ from_diffusers = True
332
+ else:
333
+ state_dict = pretrained_model_name_or_path_or_dict
334
+
335
+ network_alphas = None
336
+ # TODO: replace it with a method from `state_dict_utils`
337
+ if all(
338
+ (
339
+ k.startswith("lora_te_")
340
+ or k.startswith("lora_unet_")
341
+ or k.startswith("lora_te1_")
342
+ or k.startswith("lora_te2_")
343
+ )
344
+ for k in state_dict.keys()
345
+ ):
346
+ # Map SDXL blocks correctly.
347
+ if unet_config is not None:
348
+ # use unet config to remap block numbers
349
+ state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config)
350
+ state_dict, network_alphas = _convert_kohya_lora_to_diffusers(state_dict)
351
+
352
+ return state_dict, network_alphas, from_diffusers
353
+
354
+ @classmethod
355
+ def _best_guess_weight_name(cls, pretrained_model_name_or_path_or_dict, file_extension=".safetensors"):
356
+ targeted_files = []
357
+
358
+ if os.path.isfile(pretrained_model_name_or_path_or_dict):
359
+ return
360
+ elif os.path.isdir(pretrained_model_name_or_path_or_dict):
361
+ targeted_files = [
362
+ f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)
363
+ ]
364
+ else:
365
+ files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings
366
+ targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)]
367
+ if len(targeted_files) == 0:
368
+ return
369
+
370
+ # "scheduler" does not correspond to a LoRA checkpoint.
371
+ # "optimizer" does not correspond to a LoRA checkpoint
372
+ # only top-level checkpoints are considered and not the other ones, hence "checkpoint".
373
+ unallowed_substrings = {"scheduler", "optimizer", "checkpoint"}
374
+ targeted_files = list(
375
+ filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files)
376
+ )
377
+
378
+ if any(f.endswith(TORCH_LORA_WEIGHT_NAME) for f in targeted_files):
379
+ targeted_files = list(filter(lambda x: x.endswith(TORCH_LORA_WEIGHT_NAME), targeted_files))
380
+ elif any(f.endswith(TORCH_LORA_WEIGHT_NAME_SAFE) for f in targeted_files):
381
+ targeted_files = list(filter(lambda x: x.endswith(TORCH_LORA_WEIGHT_NAME_SAFE), targeted_files))
382
+
383
+ if len(targeted_files) > 1:
384
+ raise ValueError(
385
+ f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}."
386
+ )
387
+ weight_name = targeted_files[0]
388
+ return weight_name
389
+
390
+ @classmethod
391
+ def _optionally_disable_offloading(cls, _pipeline):
392
+ """
393
+ Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.
394
+
395
+ Args:
396
+ _pipeline (`DiffusionPipeline`):
397
+ The pipeline to disable offloading for.
398
+
399
+ Returns:
400
+ tuple:
401
+ A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
402
+ """
403
+ pass
404
+
405
+ @classmethod
406
+ def load_lora_into_unet(
407
+ cls,
408
+ state_dict,
409
+ network_alphas,
410
+ unet,
411
+ low_cpu_mem_usage=None,
412
+ adapter_name=None,
413
+ _pipeline=None,
414
+ from_diffusers=None,
415
+ ):
416
+ """
417
+ This will load the LoRA layers specified in `state_dict` into `unet`.
418
+
419
+ Parameters:
420
+ state_dict (`dict`):
421
+ A standard state dict containing the lora layer parameters. The keys can either be indexed directly
422
+ into the unet or prefixed with an additional `unet` which can be used to distinguish between text
423
+ encoder lora layers.
424
+ network_alphas (`Dict[str, float]`):
425
+ See `LoRALinearLayer` for more details.
426
+ unet (`UNet2DConditionModel`):
427
+ The UNet model to load the LoRA layers into.
428
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
429
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
430
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
431
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
432
+ argument to `True` will raise an error.
433
+ adapter_name (`str`, *optional*):
434
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
435
+ `default_{i}` where i is the total number of adapters being loaded.
436
+ """
437
+ if from_diffusers is None:
438
+ from_diffusers = FROM_DIFFUSERS
439
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else LOW_CPU_MEM_USAGE_DEFAULT
440
+ # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
441
+ # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as
442
+ # their prefixes.
443
+ keys = list(state_dict.keys())
444
+
445
+ if all(key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in keys):
446
+ # Load the layers corresponding to UNet.
447
+ logger.info(f"Loading {cls.unet_name}.")
448
+
449
+ unet_keys = [k for k in keys if k.startswith(cls.unet_name)]
450
+ state_dict = {k.replace(f"{cls.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
451
+
452
+ if network_alphas is not None:
453
+ alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.unet_name)]
454
+ network_alphas = {
455
+ k.replace(f"{cls.unet_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
456
+ }
457
+
458
+ else:
459
+ # Otherwise, we're dealing with the old format. This means the `state_dict` should only
460
+ # contain the module names of the `unet` as its keys WITHOUT any prefix.
461
+ warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet.{module_name}': params for module_name, params in old_state_dict.items()}`."
462
+ logger.warn(warn_message)
463
+
464
+ if USE_PEFT_BACKEND and len(state_dict.keys()) > 0:
465
+ from ppdiffusers.peft import (
466
+ LoraConfig,
467
+ inject_adapter_in_model,
468
+ set_peft_model_state_dict,
469
+ )
470
+
471
+ if adapter_name in getattr(unet, "peft_config", {}):
472
+ raise ValueError(
473
+ f"Adapter name {adapter_name} already in use in the Unet - please select a new adapter name."
474
+ )
475
+
476
+ state_dict = convert_unet_state_dict_to_peft(state_dict)
477
+
478
+ if network_alphas is not None:
479
+ # The alphas state dict have the same structure as Unet, thus we convert it to peft format using
480
+ # `convert_unet_state_dict_to_peft` method.
481
+ network_alphas = convert_unet_state_dict_to_peft(network_alphas)
482
+
483
+ rank = {}
484
+ index = 1 if from_diffusers else 0
485
+ for key, val in state_dict.items():
486
+ if "lora_B" in key:
487
+ rank[key] = val.shape[index]
488
+
489
+ lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True)
490
+ lora_config = LoraConfig(**lora_config_kwargs)
491
+
492
+ # adapter_name
493
+ if adapter_name is None:
494
+ adapter_name = get_adapter_name(unet)
495
+
496
+ # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
497
+ # otherwise loading LoRA weights will lead to an error
498
+
499
+ inject_adapter_in_model(lora_config, unet, adapter_name=adapter_name)
500
+ incompatible_keys = set_peft_model_state_dict(
501
+ unet, state_dict, adapter_name, from_diffusers=from_diffusers
502
+ ) # NOTE: new add from_diffusers
503
+
504
+ if incompatible_keys is not None:
505
+ # check only for unexpected keys
506
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
507
+ if unexpected_keys:
508
+ logger.warning(
509
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
510
+ f" {unexpected_keys}. "
511
+ )
512
+
513
+ unet.load_attn_procs(
514
+ state_dict,
515
+ network_alphas=network_alphas,
516
+ low_cpu_mem_usage=low_cpu_mem_usage,
517
+ _pipeline=_pipeline,
518
+ from_diffusers=from_diffusers,
519
+ )
520
+
521
+ @classmethod
522
+ def load_lora_into_text_encoder(
523
+ cls,
524
+ state_dict,
525
+ network_alphas,
526
+ text_encoder,
527
+ prefix=None,
528
+ lora_scale=1.0,
529
+ low_cpu_mem_usage=None,
530
+ adapter_name=None,
531
+ _pipeline=None,
532
+ from_diffusers=None,
533
+ ):
534
+ """
535
+ This will load the LoRA layers specified in `state_dict` into `text_encoder`
536
+
537
+ Parameters:
538
+ state_dict (`dict`):
539
+ A standard state dict containing the lora layer parameters. The key should be prefixed with an
540
+ additional `text_encoder` to distinguish between unet lora layers.
541
+ network_alphas (`Dict[str, float]`):
542
+ See `LoRALinearLayer` for more details.
543
+ text_encoder (`CLIPTextModel`):
544
+ The text encoder model to load the LoRA layers into.
545
+ prefix (`str`):
546
+ Expected prefix of the `text_encoder` in the `state_dict`.
547
+ lora_scale (`float`):
548
+ How much to scale the output of the lora linear layer before it is added with the output of the regular
549
+ lora layer.
550
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
551
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
552
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
553
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
554
+ argument to `True` will raise an error.
555
+ adapter_name (`str`, *optional*):
556
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
557
+ `default_{i}` where i is the total number of adapters being loaded.
558
+ """
559
+ if from_diffusers is None:
560
+ from_diffusers = FROM_DIFFUSERS
561
+ low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else LOW_CPU_MEM_USAGE_DEFAULT
562
+
563
+ # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
564
+ # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
565
+ # their prefixes.
566
+ keys = list(state_dict.keys())
567
+ prefix = cls.text_encoder_name if prefix is None else prefix
568
+
569
+ # Safe prefix to check with.
570
+ if any(cls.text_encoder_name in key for key in keys):
571
+ # Load the layers corresponding to text encoder and make necessary adjustments.
572
+ text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix]
573
+ text_encoder_lora_state_dict = {
574
+ k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys
575
+ }
576
+
577
+ if len(text_encoder_lora_state_dict) > 0:
578
+ logger.info(f"Loading {prefix}.")
579
+ rank = {}
580
+ text_encoder_lora_state_dict = convert_state_dict_to_ppdiffusers(text_encoder_lora_state_dict)
581
+ index = 1 if from_diffusers else 0
582
+ if USE_PEFT_BACKEND:
583
+ # convert state dict
584
+ text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict)
585
+
586
+ for name, _ in text_encoder_attn_modules(text_encoder):
587
+ rank_key = f"{name}.out_proj.lora_B.weight"
588
+ rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[index]
589
+
590
+ patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
591
+ if patch_mlp:
592
+ for name, _ in text_encoder_mlp_modules(text_encoder):
593
+ rank_key_fc1 = f"{name}.fc1.lora_B.weight"
594
+ rank_key_fc2 = f"{name}.fc2.lora_B.weight"
595
+
596
+ rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[index]
597
+ rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[index]
598
+ else:
599
+ for name, _ in text_encoder_attn_modules(text_encoder):
600
+ rank_key = f"{name}.out_proj.lora_linear_layer.up.weight"
601
+ rank.update({rank_key: text_encoder_lora_state_dict[rank_key].shape[index]})
602
+
603
+ patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
604
+ if patch_mlp:
605
+ for name, _ in text_encoder_mlp_modules(text_encoder):
606
+ rank_key_fc1 = f"{name}.fc1.lora_linear_layer.up.weight"
607
+ rank_key_fc2 = f"{name}.fc2.lora_linear_layer.up.weight"
608
+ rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[index]
609
+ rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[index]
610
+ if network_alphas is not None:
611
+ alpha_keys = [
612
+ k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix
613
+ ]
614
+ network_alphas = {
615
+ k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
616
+ }
617
+
618
+ if USE_PEFT_BACKEND:
619
+ from ppdiffusers.peft import LoraConfig
620
+
621
+ lora_config_kwargs = get_peft_kwargs(
622
+ rank, network_alphas, text_encoder_lora_state_dict, is_unet=False
623
+ )
624
+
625
+ lora_config = LoraConfig(**lora_config_kwargs)
626
+
627
+ # adapter_name
628
+ if adapter_name is None:
629
+ adapter_name = get_adapter_name(text_encoder)
630
+
631
+ # inject LoRA layers and load the state dict
632
+ # in transformers we automatically check whether the adapter name is already in use or not
633
+ text_encoder.load_adapter(
634
+ adapter_name=adapter_name,
635
+ adapter_state_dict=text_encoder_lora_state_dict,
636
+ peft_config=lora_config,
637
+ from_diffusers=from_diffusers, # new add from diffusers
638
+ )
639
+
640
+ # scale LoRA layers with `lora_scale`
641
+ scale_lora_layers(text_encoder, weight=lora_scale)
642
+ else:
643
+ cls._modify_text_encoder(
644
+ text_encoder,
645
+ lora_scale,
646
+ network_alphas,
647
+ rank=rank,
648
+ patch_mlp=patch_mlp,
649
+ low_cpu_mem_usage=low_cpu_mem_usage,
650
+ )
651
+
652
+ if from_diffusers:
653
+ convert_pytorch_state_dict_to_paddle(text_encoder, text_encoder_lora_state_dict)
654
+ faster_set_state_dict(text_encoder, text_encoder_lora_state_dict)
655
+
656
+ text_encoder.to(dtype=text_encoder.dtype)
657
+
658
+ @property
659
+ def lora_scale(self) -> float:
660
+ # property function that returns the lora scale which can be set at run time by the pipeline.
661
+ # if _lora_scale has not been set, return 1
662
+ return self._lora_scale if hasattr(self, "_lora_scale") else 1.0
663
+
664
+ def _remove_text_encoder_monkey_patch(self):
665
+ if USE_PEFT_BACKEND:
666
+ remove_method = recurse_remove_peft_layers
667
+ else:
668
+ remove_method = self._remove_text_encoder_monkey_patch_classmethod
669
+
670
+ if hasattr(self, "text_encoder"):
671
+ remove_method(self.text_encoder)
672
+
673
+ # In case text encoder have no Lora attached
674
+ if USE_PEFT_BACKEND and getattr(self.text_encoder, "peft_config", None) is not None:
675
+ del self.text_encoder.peft_config
676
+ self.text_encoder._pp_peft_config_loaded = None
677
+ if hasattr(self, "text_encoder_2"):
678
+ remove_method(self.text_encoder_2)
679
+ if USE_PEFT_BACKEND:
680
+ del self.text_encoder_2.peft_config
681
+ self.text_encoder_2._pp_peft_config_loaded = None
682
+
683
+ @classmethod
684
+ def _remove_text_encoder_monkey_patch_classmethod(cls, text_encoder):
685
+ if version.parse(__version__) > version.parse("0.23"):
686
+ deprecate("_remove_text_encoder_monkey_patch_classmethod", "0.25", LORA_DEPRECATION_MESSAGE)
687
+
688
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
689
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
690
+ attn_module.q_proj.lora_linear_layer = None
691
+ attn_module.k_proj.lora_linear_layer = None
692
+ attn_module.v_proj.lora_linear_layer = None
693
+ attn_module.out_proj.lora_linear_layer = None
694
+
695
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
696
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
697
+ mlp_module.fc1.lora_linear_layer = None
698
+ mlp_module.fc2.lora_linear_layer = None
699
+
700
+ @classmethod
701
+ def _modify_text_encoder(
702
+ cls,
703
+ text_encoder,
704
+ lora_scale=1,
705
+ network_alphas=None,
706
+ rank: Union[Dict[str, int], int] = 4,
707
+ dtype=None,
708
+ patch_mlp=False,
709
+ low_cpu_mem_usage=False,
710
+ ):
711
+ r"""
712
+ Monkey-patches the forward passes of attention modules of the text encoder.
713
+ """
714
+ if version.parse(__version__) > version.parse("0.23"):
715
+ deprecate("_modify_text_encoder", "0.25", LORA_DEPRECATION_MESSAGE)
716
+
717
+ def create_patched_linear_lora(model, network_alpha, rank, dtype, lora_parameters):
718
+ linear_layer = model.regular_linear_layer if isinstance(model, PatchedLoraProjection) else model
719
+ ctx = paddle.LazyGuard if low_cpu_mem_usage else nullcontext
720
+ with ctx():
721
+ model = PatchedLoraProjection(linear_layer, lora_scale, network_alpha, rank, dtype=dtype)
722
+
723
+ lora_parameters.extend(model.lora_linear_layer.parameters())
724
+ return model
725
+
726
+ # First, remove any monkey-patch that might have been applied before
727
+ cls._remove_text_encoder_monkey_patch_classmethod(text_encoder)
728
+
729
+ lora_parameters = []
730
+ network_alphas = {} if network_alphas is None else network_alphas
731
+ is_network_alphas_populated = len(network_alphas) > 0
732
+
733
+ for name, attn_module in text_encoder_attn_modules(text_encoder):
734
+ query_alpha = network_alphas.pop(name + ".to_q_lora.down.weight.alpha", None)
735
+ key_alpha = network_alphas.pop(name + ".to_k_lora.down.weight.alpha", None)
736
+ value_alpha = network_alphas.pop(name + ".to_v_lora.down.weight.alpha", None)
737
+ out_alpha = network_alphas.pop(name + ".to_out_lora.down.weight.alpha", None)
738
+
739
+ if isinstance(rank, dict):
740
+ current_rank = rank.pop(f"{name}.out_proj.lora_linear_layer.up.weight")
741
+ else:
742
+ current_rank = rank
743
+
744
+ attn_module.q_proj = create_patched_linear_lora(
745
+ attn_module.q_proj, query_alpha, current_rank, dtype, lora_parameters
746
+ )
747
+ attn_module.k_proj = create_patched_linear_lora(
748
+ attn_module.k_proj, key_alpha, current_rank, dtype, lora_parameters
749
+ )
750
+ attn_module.v_proj = create_patched_linear_lora(
751
+ attn_module.v_proj, value_alpha, current_rank, dtype, lora_parameters
752
+ )
753
+ attn_module.out_proj = create_patched_linear_lora(
754
+ attn_module.out_proj, out_alpha, current_rank, dtype, lora_parameters
755
+ )
756
+
757
+ if patch_mlp:
758
+ for name, mlp_module in text_encoder_mlp_modules(text_encoder):
759
+ fc1_alpha = network_alphas.pop(name + ".fc1.lora_linear_layer.down.weight.alpha", None)
760
+ fc2_alpha = network_alphas.pop(name + ".fc2.lora_linear_layer.down.weight.alpha", None)
761
+
762
+ current_rank_fc1 = rank.pop(f"{name}.fc1.lora_linear_layer.up.weight")
763
+ current_rank_fc2 = rank.pop(f"{name}.fc2.lora_linear_layer.up.weight")
764
+
765
+ mlp_module.fc1 = create_patched_linear_lora(
766
+ mlp_module.fc1, fc1_alpha, current_rank_fc1, dtype, lora_parameters
767
+ )
768
+ mlp_module.fc2 = create_patched_linear_lora(
769
+ mlp_module.fc2, fc2_alpha, current_rank_fc2, dtype, lora_parameters
770
+ )
771
+
772
+ if is_network_alphas_populated and len(network_alphas) > 0:
773
+ raise ValueError(
774
+ f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
775
+ )
776
+
777
+ return lora_parameters
778
+
779
+ @classmethod
780
+ def save_lora_weights(
781
+ cls,
782
+ save_directory: Union[str, os.PathLike],
783
+ unet_lora_layers: Dict[str, Union[nn.Layer, paddle.Tensor]] = None,
784
+ text_encoder_lora_layers: Dict[str, nn.Layer] = None,
785
+ is_main_process: bool = True,
786
+ weight_name: str = None,
787
+ save_function: Callable = None,
788
+ safe_serialization: bool = True,
789
+ to_diffusers=None,
790
+ ):
791
+ r"""
792
+ Save the LoRA parameters corresponding to the UNet and text encoder.
793
+
794
+ Arguments:
795
+ save_directory (`str` or `os.PathLike`):
796
+ Directory to save LoRA parameters to. Will be created if it doesn't exist.
797
+ unet_lora_layers (`Dict[str, nn.Layer]` or `Dict[str, paddle.Tensor]`):
798
+ State dict of the LoRA layers corresponding to the `unet`.
799
+ text_encoder_lora_layers (`Dict[str, nn.Layer]` or `Dict[str, paddle.Tensor]`):
800
+ State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
801
+ encoder LoRA state dict because it comes from 🤗 Transformers.
802
+ is_main_process (`bool`, *optional*, defaults to `True`):
803
+ Whether the process calling this is the main process or not. Useful during distributed training and you
804
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
805
+ process to avoid race conditions.
806
+ save_function (`Callable`):
807
+ The function to use to save the state dictionary. Useful during distributed training when you need to
808
+ replace `torch.save` with another method. Can be configured with the environment variable
809
+ `DIFFUSERS_SAVE_MODE`.
810
+ safe_serialization (`bool`, *optional*, defaults to `True`):
811
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
812
+ """
813
+ if to_diffusers is None:
814
+ to_diffusers = TO_DIFFUSERS
815
+ # Create a flat dictionary.
816
+ state_dict = {}
817
+
818
+ # Populate the dictionary.
819
+ if unet_lora_layers is not None:
820
+ weights = unet_lora_layers.state_dict() if isinstance(unet_lora_layers, nn.Layer) else unet_lora_layers
821
+ if to_diffusers and isinstance(unet_lora_layers, nn.Layer):
822
+ convert_paddle_state_dict_to_pytorch(unet_lora_layers, weights)
823
+
824
+ unet_lora_state_dict = {f"{cls.unet_name}.{module_name}": param for module_name, param in weights.items()}
825
+
826
+ state_dict.update(unet_lora_state_dict)
827
+
828
+ if text_encoder_lora_layers is not None:
829
+ weights = (
830
+ text_encoder_lora_layers.state_dict()
831
+ if isinstance(text_encoder_lora_layers, nn.Layer)
832
+ else text_encoder_lora_layers
833
+ )
834
+ if to_diffusers and isinstance(text_encoder_lora_layers, nn.Layer):
835
+ convert_paddle_state_dict_to_pytorch(text_encoder_lora_layers, weights)
836
+
837
+ text_encoder_lora_state_dict = {
838
+ f"{cls.text_encoder_name}.{module_name}": param for module_name, param in weights.items()
839
+ }
840
+ state_dict.update(text_encoder_lora_state_dict)
841
+
842
+ # Save the model
843
+ cls.write_lora_layers(
844
+ state_dict=state_dict,
845
+ save_directory=save_directory,
846
+ is_main_process=is_main_process,
847
+ weight_name=weight_name,
848
+ save_function=save_function,
849
+ safe_serialization=safe_serialization,
850
+ to_diffusers=to_diffusers, # only change save weights name and save function
851
+ )
852
+
853
+ @staticmethod
854
+ def write_lora_layers(
855
+ state_dict: Dict[str, paddle.Tensor],
856
+ save_directory: str,
857
+ is_main_process: bool,
858
+ weight_name: str,
859
+ save_function: Callable,
860
+ safe_serialization: bool,
861
+ to_diffusers=None,
862
+ ):
863
+ if to_diffusers is None:
864
+ to_diffusers = TO_DIFFUSERS
865
+ if os.path.isfile(save_directory):
866
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
867
+ return
868
+
869
+ os.makedirs(save_directory, exist_ok=True)
870
+
871
+ if weight_name is None:
872
+ if to_diffusers:
873
+ if safe_serialization:
874
+ weight_name = TORCH_LORA_WEIGHT_NAME_SAFE
875
+ else:
876
+ weight_name = TORCH_LORA_WEIGHT_NAME
877
+ else:
878
+ if safe_serialization:
879
+ weight_name = PADDLE_LORA_WEIGHT_NAME_SAFE
880
+ else:
881
+ weight_name = PADDLE_LORA_WEIGHT_NAME
882
+ else:
883
+ if "paddle" in weight_name.lower() or "pdparams" in weight_name.lower():
884
+ to_diffusers = False
885
+ elif "torch" in weight_name.lower() or "bin" in weight_name.lower():
886
+ to_diffusers = True
887
+
888
+ # choose save_function
889
+ if save_function is None:
890
+ if to_diffusers:
891
+ if not is_torch_available() and not safe_serialization:
892
+ safe_serialization = True
893
+ logger.warning(
894
+ "PyTorch is not installed, and `safe_serialization` is currently set to `False`. "
895
+ "To ensure proper model saving, we will automatically set `safe_serialization=True`. "
896
+ "If you want to keep `safe_serialization=False`, please make sure PyTorch is installed."
897
+ )
898
+ if safe_serialization:
899
+ if is_torch_available():
900
+ save_function = partial(torch_safe_save_file, metadata={"format": "pt"})
901
+ else:
902
+ save_function = partial(np_safe_save_file, metadata={"format": "pt"})
903
+ else:
904
+ save_function = torch.save
905
+ else:
906
+ if safe_serialization:
907
+ state_dict = {k: np.ascontiguousarray(v) for k, v in state_dict.items()}
908
+ save_function = partial(np_safe_save_file, metadata={"format": "pd"})
909
+ else:
910
+ save_function = paddle.save
911
+
912
+ # we have transpose state_dict!
913
+
914
+ save_function(state_dict, os.path.join(save_directory, weight_name))
915
+ logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
916
+
917
+ def unload_lora_weights(self):
918
+ """
919
+ Unloads the LoRA parameters.
920
+
921
+ Examples:
922
+
923
+ ```python
924
+ >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
925
+ >>> pipeline.unload_lora_weights()
926
+ >>> ...
927
+ ```
928
+ """
929
+ if not USE_PEFT_BACKEND:
930
+ if version.parse(__version__) > version.parse("0.23"):
931
+ logger.warn(
932
+ "You are using `unload_lora_weights` to disable and unload lora weights. If you want to iteratively enable and disable adapter weights,"
933
+ "you can use `pipe.enable_lora()` or `pipe.disable_lora()`. After installing the latest version of PEFT."
934
+ )
935
+
936
+ for _, module in self.unet.named_sublayers(include_self=True):
937
+ if hasattr(module, "set_lora_layer"):
938
+ module.set_lora_layer(None)
939
+ else:
940
+ recurse_remove_peft_layers(self.unet)
941
+ if hasattr(self.unet, "peft_config"):
942
+ del self.unet.peft_config
943
+
944
+ # Safe to call the following regardless of LoRA.
945
+ self._remove_text_encoder_monkey_patch()
946
+
947
+ def fuse_lora(
948
+ self,
949
+ fuse_unet: bool = True,
950
+ fuse_text_encoder: bool = True,
951
+ lora_scale: float = 1.0,
952
+ safe_fusing: bool = False,
953
+ ):
954
+ r"""
955
+ Fuses the LoRA parameters into the original parameters of the corresponding blocks.
956
+
957
+ <Tip warning={true}>
958
+
959
+ This is an experimental API.
960
+
961
+ </Tip>
962
+
963
+ Args:
964
+ fuse_unet (`bool`, defaults to `True`): Whether to fuse the UNet LoRA parameters.
965
+ fuse_text_encoder (`bool`, defaults to `True`):
966
+ Whether to fuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
967
+ LoRA parameters then it won't have any effect.
968
+ lora_scale (`float`, defaults to 1.0):
969
+ Controls how much to influence the outputs with the LoRA parameters.
970
+ safe_fusing (`bool`, defaults to `False`):
971
+ Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
972
+ """
973
+ if fuse_unet or fuse_text_encoder:
974
+ self.num_fused_loras += 1
975
+ if self.num_fused_loras > 1:
976
+ logger.warn(
977
+ "The current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.",
978
+ )
979
+
980
+ if fuse_unet:
981
+ self.unet.fuse_lora(lora_scale, safe_fusing=safe_fusing)
982
+
983
+ if USE_PEFT_BACKEND:
984
+ from ppdiffusers.peft.tuners.tuners_utils import BaseTunerLayer
985
+
986
+ def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False):
987
+ # TODO(Patrick, Younes): enable "safe" fusing
988
+ for module in text_encoder.sublayers(include_self=True):
989
+ if isinstance(module, BaseTunerLayer):
990
+ if lora_scale != 1.0:
991
+ module.scale_layer(lora_scale)
992
+
993
+ module.merge()
994
+
995
+ else:
996
+ if version.parse(__version__) > version.parse("0.23"):
997
+ deprecate("fuse_text_encoder_lora", "0.25", LORA_DEPRECATION_MESSAGE)
998
+
999
+ def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False):
1000
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
1001
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
1002
+ attn_module.q_proj._fuse_lora(lora_scale, safe_fusing)
1003
+ attn_module.k_proj._fuse_lora(lora_scale, safe_fusing)
1004
+ attn_module.v_proj._fuse_lora(lora_scale, safe_fusing)
1005
+ attn_module.out_proj._fuse_lora(lora_scale, safe_fusing)
1006
+
1007
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
1008
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
1009
+ mlp_module.fc1._fuse_lora(lora_scale, safe_fusing)
1010
+ mlp_module.fc2._fuse_lora(lora_scale, safe_fusing)
1011
+
1012
+ if fuse_text_encoder:
1013
+ if hasattr(self, "text_encoder"):
1014
+ fuse_text_encoder_lora(self.text_encoder, lora_scale, safe_fusing)
1015
+ if hasattr(self, "text_encoder_2"):
1016
+ fuse_text_encoder_lora(self.text_encoder_2, lora_scale, safe_fusing)
1017
+
1018
+ def unfuse_lora(self, unfuse_unet: bool = True, unfuse_text_encoder: bool = True):
1019
+ r"""
1020
+ Reverses the effect of
1021
+ [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora).
1022
+
1023
+ <Tip warning={true}>
1024
+
1025
+ This is an experimental API.
1026
+
1027
+ </Tip>
1028
+
1029
+ Args:
1030
+ unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
1031
+ unfuse_text_encoder (`bool`, defaults to `True`):
1032
+ Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
1033
+ LoRA parameters then it won't have any effect.
1034
+ """
1035
+ if unfuse_unet:
1036
+ if not USE_PEFT_BACKEND:
1037
+ self.unet.unfuse_lora()
1038
+ else:
1039
+ from ppdiffusers.peft.tuners.tuners_utils import BaseTunerLayer
1040
+
1041
+ for module in self.unet.sublayers(include_self=True):
1042
+ if isinstance(module, BaseTunerLayer):
1043
+ module.unmerge()
1044
+
1045
+ if USE_PEFT_BACKEND:
1046
+ from ppdiffusers.peft.tuners.tuners_utils import BaseTunerLayer
1047
+
1048
+ def unfuse_text_encoder_lora(text_encoder):
1049
+ for module in text_encoder.sublayers(include_self=True):
1050
+ if isinstance(module, BaseTunerLayer):
1051
+ module.unmerge()
1052
+
1053
+ else:
1054
+ if version.parse(__version__) > version.parse("0.23"):
1055
+ deprecate("unfuse_text_encoder_lora", "0.25", LORA_DEPRECATION_MESSAGE)
1056
+
1057
+ def unfuse_text_encoder_lora(text_encoder):
1058
+ for _, attn_module in text_encoder_attn_modules(text_encoder):
1059
+ if isinstance(attn_module.q_proj, PatchedLoraProjection):
1060
+ attn_module.q_proj._unfuse_lora()
1061
+ attn_module.k_proj._unfuse_lora()
1062
+ attn_module.v_proj._unfuse_lora()
1063
+ attn_module.out_proj._unfuse_lora()
1064
+
1065
+ for _, mlp_module in text_encoder_mlp_modules(text_encoder):
1066
+ if isinstance(mlp_module.fc1, PatchedLoraProjection):
1067
+ mlp_module.fc1._unfuse_lora()
1068
+ mlp_module.fc2._unfuse_lora()
1069
+
1070
+ if unfuse_text_encoder:
1071
+ if hasattr(self, "text_encoder"):
1072
+ unfuse_text_encoder_lora(self.text_encoder)
1073
+ if hasattr(self, "text_encoder_2"):
1074
+ unfuse_text_encoder_lora(self.text_encoder_2)
1075
+
1076
+ self.num_fused_loras -= 1
1077
+
1078
+ def set_adapters_for_text_encoder(
1079
+ self,
1080
+ adapter_names: Union[List[str], str],
1081
+ text_encoder: Optional["PretrainedModel"] = None, # noqa: F821
1082
+ text_encoder_weights: List[float] = None,
1083
+ ):
1084
+ """
1085
+ Sets the adapter layers for the text encoder.
1086
+
1087
+ Args:
1088
+ adapter_names (`List[str]` or `str`):
1089
+ The names of the adapters to use.
1090
+ text_encoder (`nn.Layer`, *optional*):
1091
+ The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
1092
+ attribute.
1093
+ text_encoder_weights (`List[float]`, *optional*):
1094
+ The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
1095
+ """
1096
+ if not USE_PEFT_BACKEND:
1097
+ raise ValueError("PEFT backend is required for this method.")
1098
+
1099
+ def process_weights(adapter_names, weights):
1100
+ if weights is None:
1101
+ weights = [1.0] * len(adapter_names)
1102
+ elif isinstance(weights, float):
1103
+ weights = [weights]
1104
+
1105
+ if len(adapter_names) != len(weights):
1106
+ raise ValueError(
1107
+ f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}"
1108
+ )
1109
+ return weights
1110
+
1111
+ adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
1112
+ text_encoder_weights = process_weights(adapter_names, text_encoder_weights)
1113
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1114
+ if text_encoder is None:
1115
+ raise ValueError(
1116
+ "The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead."
1117
+ )
1118
+ set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights)
1119
+
1120
+ def disable_lora_for_text_encoder(self, text_encoder: Optional["PretrainedModel"] = None):
1121
+ """
1122
+ Disables the LoRA layers for the text encoder.
1123
+
1124
+ Args:
1125
+ text_encoder (`nn.Layer`, *optional*):
1126
+ The text encoder module to disable the LoRA layers for. If `None`, it will try to get the
1127
+ `text_encoder` attribute.
1128
+ """
1129
+ if not USE_PEFT_BACKEND:
1130
+ raise ValueError("PEFT backend is required for this method.")
1131
+
1132
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1133
+ if text_encoder is None:
1134
+ raise ValueError("Text Encoder not found.")
1135
+ set_adapter_layers(text_encoder, enabled=False)
1136
+
1137
+ def enable_lora_for_text_encoder(self, text_encoder: Optional["PretrainedModel"] = None):
1138
+ """
1139
+ Enables the LoRA layers for the text encoder.
1140
+
1141
+ Args:
1142
+ text_encoder (`nn.Layer`, *optional*):
1143
+ The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
1144
+ attribute.
1145
+ """
1146
+ if not USE_PEFT_BACKEND:
1147
+ raise ValueError("PEFT backend is required for this method.")
1148
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
1149
+ if text_encoder is None:
1150
+ raise ValueError("Text Encoder not found.")
1151
+ set_adapter_layers(self.text_encoder, enabled=True)
1152
+
1153
+ def set_adapters(
1154
+ self,
1155
+ adapter_names: Union[List[str], str],
1156
+ adapter_weights: Optional[List[float]] = None,
1157
+ ):
1158
+ # Handle the UNET
1159
+ self.unet.set_adapters(adapter_names, adapter_weights)
1160
+
1161
+ # Handle the Text Encoder
1162
+ if hasattr(self, "text_encoder"):
1163
+ self.set_adapters_for_text_encoder(adapter_names, self.text_encoder, adapter_weights)
1164
+ if hasattr(self, "text_encoder_2"):
1165
+ self.set_adapters_for_text_encoder(adapter_names, self.text_encoder_2, adapter_weights)
1166
+
1167
+ def disable_lora(self):
1168
+ if not USE_PEFT_BACKEND:
1169
+ raise ValueError("PEFT backend is required for this method.")
1170
+
1171
+ # Disable unet adapters
1172
+ self.unet.disable_lora()
1173
+
1174
+ # Disable text encoder adapters
1175
+ if hasattr(self, "text_encoder"):
1176
+ self.disable_lora_for_text_encoder(self.text_encoder)
1177
+ if hasattr(self, "text_encoder_2"):
1178
+ self.disable_lora_for_text_encoder(self.text_encoder_2)
1179
+
1180
+ def enable_lora(self):
1181
+ if not USE_PEFT_BACKEND:
1182
+ raise ValueError("PEFT backend is required for this method.")
1183
+
1184
+ # Enable unet adapters
1185
+ self.unet.enable_lora()
1186
+
1187
+ # Enable text encoder adapters
1188
+ if hasattr(self, "text_encoder"):
1189
+ self.enable_lora_for_text_encoder(self.text_encoder)
1190
+ if hasattr(self, "text_encoder_2"):
1191
+ self.enable_lora_for_text_encoder(self.text_encoder_2)
1192
+
1193
+ def delete_adapters(self, adapter_names: Union[List[str], str]):
1194
+ """
1195
+ Args:
1196
+ Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s).
1197
+ adapter_names (`Union[List[str], str]`):
1198
+ The names of the adapter to delete. Can be a single string or a list of strings
1199
+ """
1200
+ if not USE_PEFT_BACKEND:
1201
+ raise ValueError("PEFT backend is required for this method.")
1202
+
1203
+ if isinstance(adapter_names, str):
1204
+ adapter_names = [adapter_names]
1205
+
1206
+ # Delete unet adapters
1207
+ self.unet.delete_adapters(adapter_names)
1208
+
1209
+ for adapter_name in adapter_names:
1210
+ # Delete text encoder adapters
1211
+ if hasattr(self, "text_encoder"):
1212
+ delete_adapter_layers(self.text_encoder, adapter_name)
1213
+ if hasattr(self, "text_encoder_2"):
1214
+ delete_adapter_layers(self.text_encoder_2, adapter_name)
1215
+
1216
+ def get_active_adapters(self) -> List[str]:
1217
+ """
1218
+ Gets the list of the current active adapters.
1219
+
1220
+ Example:
1221
+
1222
+ ```python
1223
+ from ppdiffusers import DiffusionPipeline
1224
+
1225
+ pipeline = DiffusionPipeline.from_pretrained(
1226
+ "stabilityai/stable-diffusion-xl-base-1.0",
1227
+ )
1228
+ pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
1229
+ pipeline.get_active_adapters()
1230
+ ```
1231
+ """
1232
+ if not USE_PEFT_BACKEND:
1233
+ raise ValueError(
1234
+ "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
1235
+ )
1236
+
1237
+ from ppdiffusers.peft.tuners.tuners_utils import BaseTunerLayer
1238
+
1239
+ active_adapters = []
1240
+
1241
+ for module in self.unet.sublayers(include_self=True):
1242
+ if isinstance(module, BaseTunerLayer):
1243
+ active_adapters = module.active_adapters
1244
+ break
1245
+
1246
+ return active_adapters
1247
+
1248
+ def get_list_adapters(self) -> Dict[str, List[str]]:
1249
+ """
1250
+ Gets the current list of all available adapters in the pipeline.
1251
+ """
1252
+ if not USE_PEFT_BACKEND:
1253
+ raise ValueError(
1254
+ "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
1255
+ )
1256
+
1257
+ set_adapters = {}
1258
+
1259
+ if hasattr(self, "text_encoder") and hasattr(self.text_encoder, "peft_config"):
1260
+ set_adapters["text_encoder"] = list(self.text_encoder.peft_config.keys())
1261
+
1262
+ if hasattr(self, "text_encoder_2") and hasattr(self.text_encoder_2, "peft_config"):
1263
+ set_adapters["text_encoder_2"] = list(self.text_encoder_2.peft_config.keys())
1264
+
1265
+ if hasattr(self, "unet") and hasattr(self.unet, "peft_config"):
1266
+ set_adapters["unet"] = list(self.unet.peft_config.keys())
1267
+
1268
+ return set_adapters
1269
+
1270
+ def set_lora_device(self, adapter_names: List[str], device: Union[str, int]) -> None:
1271
+ """
1272
+ Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case
1273
+ you want to load multiple adapters and free some GPU memory.
1274
+
1275
+ Args:
1276
+ adapter_names (`List[str]`):
1277
+ List of adapters to send device to.
1278
+ device (`Union[str, int]`):
1279
+ Device to send the adapters to. Can be either a torch device, a str or an integer.
1280
+ """
1281
+ if not USE_PEFT_BACKEND:
1282
+ raise ValueError("PEFT backend is required for this method.")
1283
+
1284
+ # from ppdiffusers.peft.tuners.tuners_utils import BaseTunerLayer
1285
+
1286
+ # Handle the UNET
1287
+ # for unet_module in self.unet.sublayers(include_self=True):
1288
+ # if isinstance(unet_module, BaseTunerLayer):
1289
+ # for adapter_name in adapter_names:
1290
+ # unet_module.lora_A[adapter_name].to(device)
1291
+ # unet_module.lora_B[adapter_name].to(device)
1292
+
1293
+ # Handle the text encoder
1294
+ # modules_to_process = []
1295
+ # if hasattr(self, "text_encoder"):
1296
+ # modules_to_process.append(self.text_encoder)
1297
+
1298
+ # if hasattr(self, "text_encoder_2"):
1299
+ # modules_to_process.append(self.text_encoder_2)
1300
+
1301
+ # for text_encoder in modules_to_process:
1302
+ # # loop over submodules
1303
+ # for text_encoder_module in text_encoder.sublayers(include_self=True):
1304
+ # if isinstance(text_encoder_module, BaseTunerLayer):
1305
+ # for adapter_name in adapter_names:
1306
+ # text_encoder_module.lora_A[adapter_name].to(device)
1307
+ # text_encoder_module.lora_B[adapter_name].to(device)
1308
+
1309
+
1310
+ class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):
1311
+ """This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL"""
1312
+
1313
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1314
+ def load_lora_weights(
1315
+ self,
1316
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]],
1317
+ adapter_name: Optional[str] = None,
1318
+ **kwargs,
1319
+ ):
1320
+ """
1321
+ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
1322
+ `self.text_encoder`.
1323
+
1324
+ All kwargs are forwarded to `self.lora_state_dict`.
1325
+
1326
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
1327
+
1328
+ See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
1329
+ `self.unet`.
1330
+
1331
+ See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
1332
+ into `self.text_encoder`.
1333
+
1334
+ Parameters:
1335
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
1336
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1337
+ adapter_name (`str`, *optional*):
1338
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
1339
+ `default_{i}` where i is the total number of adapters being loaded.
1340
+ kwargs (`dict`, *optional*):
1341
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1342
+ """
1343
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1344
+ # it here explicitly to be able to tell that it's coming from an SDXL
1345
+ # pipeline.
1346
+
1347
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
1348
+ state_dict, network_alphas, from_diffusers = self.lora_state_dict(
1349
+ pretrained_model_name_or_path_or_dict,
1350
+ unet_config=self.unet.config,
1351
+ **kwargs,
1352
+ )
1353
+ is_correct_format = all("lora" in key for key in state_dict.keys())
1354
+ if not is_correct_format:
1355
+ raise ValueError("Invalid LoRA checkpoint.")
1356
+
1357
+ self.load_lora_into_unet(
1358
+ state_dict,
1359
+ network_alphas=network_alphas,
1360
+ unet=self.unet,
1361
+ adapter_name=adapter_name,
1362
+ _pipeline=self,
1363
+ from_diffusers=from_diffusers,
1364
+ )
1365
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1366
+ if len(text_encoder_state_dict) > 0:
1367
+ self.load_lora_into_text_encoder(
1368
+ text_encoder_state_dict,
1369
+ network_alphas=network_alphas,
1370
+ text_encoder=self.text_encoder,
1371
+ prefix="text_encoder",
1372
+ lora_scale=self.lora_scale,
1373
+ adapter_name=adapter_name,
1374
+ _pipeline=self,
1375
+ from_diffusers=from_diffusers,
1376
+ )
1377
+
1378
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1379
+ if len(text_encoder_2_state_dict) > 0:
1380
+ self.load_lora_into_text_encoder(
1381
+ text_encoder_2_state_dict,
1382
+ network_alphas=network_alphas,
1383
+ text_encoder=self.text_encoder_2,
1384
+ prefix="text_encoder_2",
1385
+ lora_scale=self.lora_scale,
1386
+ adapter_name=adapter_name,
1387
+ _pipeline=self,
1388
+ from_diffusers=from_diffusers,
1389
+ )
1390
+
1391
+ @classmethod
1392
+ def save_lora_weights(
1393
+ cls,
1394
+ save_directory: Union[str, os.PathLike],
1395
+ unet_lora_layers: Dict[str, Union[nn.Layer, paddle.Tensor]] = None,
1396
+ text_encoder_lora_layers: Dict[str, Union[nn.Layer, paddle.Tensor]] = None,
1397
+ text_encoder_2_lora_layers: Dict[str, Union[nn.Layer, paddle.Tensor]] = None,
1398
+ is_main_process: bool = True,
1399
+ weight_name: str = None,
1400
+ save_function: Callable = None,
1401
+ safe_serialization: bool = True,
1402
+ to_diffusers: bool = None,
1403
+ ):
1404
+ r"""
1405
+ Save the LoRA parameters corresponding to the UNet and text encoder.
1406
+
1407
+ Arguments:
1408
+ save_directory (`str` or `os.PathLike`):
1409
+ Directory to save LoRA parameters to. Will be created if it doesn't exist.
1410
+ unet_lora_layers (`Dict[str, nn.Layer]` or `Dict[str, paddle.Tensor]`):
1411
+ State dict of the LoRA layers corresponding to the `unet`.
1412
+ text_encoder_lora_layers (`Dict[str, nn.Layer]` or `Dict[str, paddle.Tensor]`):
1413
+ State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
1414
+ encoder LoRA state dict because it comes from 🤗 Transformers.
1415
+ is_main_process (`bool`, *optional*, defaults to `True`):
1416
+ Whether the process calling this is the main process or not. Useful during distributed training and you
1417
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
1418
+ process to avoid race conditions.
1419
+ save_function (`Callable`):
1420
+ The function to use to save the state dictionary. Useful during distributed training when you need to
1421
+ replace `torch.save` with another method. Can be configured with the environment variable
1422
+ `DIFFUSERS_SAVE_MODE`.
1423
+ safe_serialization (`bool`, *optional*, defaults to `True`):
1424
+ Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
1425
+ """
1426
+ if to_diffusers is None:
1427
+ to_diffusers = TO_DIFFUSERS
1428
+
1429
+ state_dict = {}
1430
+
1431
+ def pack_weights(layers, prefix):
1432
+ layers_weights = layers.state_dict() if isinstance(layers, nn.Layer) else layers
1433
+ if to_diffusers and isinstance(layers, nn.Layer):
1434
+ convert_paddle_state_dict_to_pytorch(layers, layers_weights)
1435
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1436
+ return layers_state_dict
1437
+
1438
+ if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1439
+ raise ValueError(
1440
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1441
+ )
1442
+
1443
+ if unet_lora_layers:
1444
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1445
+
1446
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1447
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1448
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1449
+
1450
+ cls.write_lora_layers(
1451
+ state_dict=state_dict,
1452
+ save_directory=save_directory,
1453
+ is_main_process=is_main_process,
1454
+ weight_name=weight_name,
1455
+ save_function=save_function,
1456
+ safe_serialization=safe_serialization,
1457
+ to_diffusers=to_diffusers,
1458
+ )
1459
+
1460
+ def _remove_text_encoder_monkey_patch(self):
1461
+ if USE_PEFT_BACKEND:
1462
+ recurse_remove_peft_layers(self.text_encoder)
1463
+ # TODO: @younesbelkada handle this in transformers side
1464
+ if getattr(self.text_encoder, "peft_config", None) is not None:
1465
+ del self.text_encoder.peft_config
1466
+ self.text_encoder._pp_peft_config_loaded = None
1467
+
1468
+ recurse_remove_peft_layers(self.text_encoder_2)
1469
+ if getattr(self.text_encoder_2, "peft_config", None) is not None:
1470
+ del self.text_encoder_2.peft_config
1471
+ self.text_encoder_2._pp_peft_config_loaded = None
1472
+ else:
1473
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1474
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
1475
+
1476
+
1477
+ class SD3LoraLoaderMixin:
1478
+ r"""
1479
+ Load LoRA layers into [`SD3Transformer2DModel`].
1480
+ """
1481
+
1482
+ transformer_name = TRANSFORMER_NAME
1483
+ num_fused_loras = 0
1484
+
1485
+ def load_lora_weights(
1486
+ self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]], adapter_name=None, **kwargs
1487
+ ):
1488
+ """
1489
+ Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
1490
+ `self.text_encoder`.
1491
+ All kwargs are forwarded to `self.lora_state_dict`.
1492
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
1493
+ See [`~loaders.LoraLoaderMixin.load_lora_into_transformer`] for more details on how the state dict is loaded
1494
+ into `self.transformer`.
1495
+ Parameters:
1496
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
1497
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1498
+ kwargs (`dict`, *optional*):
1499
+ See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1500
+ adapter_name (`str`, *optional*):
1501
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
1502
+ `default_{i}` where i is the total number of adapters being loaded.
1503
+ """
1504
+ if not USE_PEFT_BACKEND:
1505
+ raise ValueError("PEFT backend is required for this method.")
1506
+
1507
+ # if a dict is passed, copy it instead of modifying it inplace
1508
+ if isinstance(pretrained_model_name_or_path_or_dict, dict):
1509
+ pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy()
1510
+
1511
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
1512
+ state_dict, from_diffusers = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)
1513
+
1514
+ is_correct_format = all("lora" in key or "dora_scale" in key for key in state_dict.keys())
1515
+ if not is_correct_format:
1516
+ raise ValueError("Invalid LoRA checkpoint.")
1517
+
1518
+ self.load_lora_into_transformer(
1519
+ state_dict,
1520
+ transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer,
1521
+ adapter_name=adapter_name,
1522
+ _pipeline=self,
1523
+ from_diffusers=from_diffusers,
1524
+ )
1525
+
1526
+ @classmethod
1527
+ def lora_state_dict(
1528
+ cls,
1529
+ pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]],
1530
+ **kwargs,
1531
+ ):
1532
+ r"""
1533
+ Return state dict for lora weights and the network alphas.
1534
+ <Tip warning={true}>
1535
+ We support loading A1111 formatted LoRA checkpoints in a limited capacity.
1536
+ This function is experimental and might change in the future.
1537
+ </Tip>
1538
+ Parameters:
1539
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
1540
+ Can be either:
1541
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
1542
+ the Hub.
1543
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
1544
+ with [`ModelMixin.save_pretrained`].
1545
+ - A [torch state
1546
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
1547
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
1548
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
1549
+ is not used.
1550
+ force_download (`bool`, *optional*, defaults to `False`):
1551
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
1552
+ cached versions if they exist.
1553
+ resume_download (`bool`, *optional*, defaults to `False`):
1554
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
1555
+ incompletely downloaded files are deleted.
1556
+ proxies (`Dict[str, str]`, *optional*):
1557
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
1558
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
1559
+ local_files_only (`bool`, *optional*, defaults to `False`):
1560
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
1561
+ won't be downloaded from the Hub.
1562
+ revision (`str`, *optional*, defaults to `"main"`):
1563
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
1564
+ allowed by Git.
1565
+ subfolder (`str`, *optional*, defaults to `""`):
1566
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
1567
+ """
1568
+ # Load the main state dict first which has the LoRA layers for either of
1569
+ # UNet and text encoder or both.
1570
+ from_hf_hub = kwargs.pop("from_hf_hub", FROM_HF_HUB)
1571
+ from_aistudio = kwargs.pop("from_aistudio", FROM_AISTUDIO)
1572
+ cache_dir = kwargs.pop("cache_dir", None)
1573
+ if cache_dir is None:
1574
+ if from_aistudio:
1575
+ cache_dir = None # TODO, check aistudio cache
1576
+ elif from_hf_hub:
1577
+ cache_dir = DIFFUSERS_CACHE
1578
+ else:
1579
+ cache_dir = PPDIFFUSERS_CACHE
1580
+ from_diffusers = kwargs.pop("from_diffusers", FROM_DIFFUSERS)
1581
+ force_download = kwargs.pop("force_download", False)
1582
+ resume_download = kwargs.pop("resume_download", False)
1583
+ proxies = kwargs.pop("proxies", None)
1584
+ local_files_only = kwargs.pop("local_files_only", None)
1585
+ use_auth_token = kwargs.pop("use_auth_token", None)
1586
+ revision = kwargs.pop("revision", None)
1587
+ subfolder = kwargs.pop("subfolder", None)
1588
+ weight_name = kwargs.pop("weight_name", None)
1589
+ use_safetensors = kwargs.pop("use_safetensors", None)
1590
+
1591
+ allow_pickle = False
1592
+ if use_safetensors is None:
1593
+ use_safetensors = True
1594
+ allow_pickle = True
1595
+
1596
+ user_agent = {
1597
+ "file_type": "attn_procs_weights",
1598
+ "framework": "pytorch" if from_diffusers else "paddle",
1599
+ }
1600
+
1601
+ model_file = None
1602
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
1603
+ # Let's first try to load .safetensors weights
1604
+ if (use_safetensors and weight_name is None) or (
1605
+ weight_name is not None and weight_name.endswith(".safetensors")
1606
+ ):
1607
+ try:
1608
+ model_file = _get_model_file(
1609
+ pretrained_model_name_or_path_or_dict,
1610
+ weights_name=(weight_name or TORCH_LORA_WEIGHT_NAME_SAFE)
1611
+ if from_diffusers
1612
+ else ((weight_name or PADDLE_LORA_WEIGHT_NAME_SAFE)),
1613
+ cache_dir=cache_dir,
1614
+ force_download=force_download,
1615
+ resume_download=resume_download,
1616
+ proxies=proxies,
1617
+ local_files_only=local_files_only,
1618
+ use_auth_token=use_auth_token,
1619
+ revision=revision,
1620
+ subfolder=subfolder,
1621
+ user_agent=user_agent,
1622
+ from_aistudio=from_aistudio,
1623
+ from_hf_hub=from_hf_hub,
1624
+ )
1625
+ except Exception:
1626
+ model_file = None
1627
+
1628
+ if model_file is None:
1629
+ model_file = _get_model_file(
1630
+ pretrained_model_name_or_path_or_dict,
1631
+ weights_name=(weight_name or TORCH_LORA_WEIGHT_NAME)
1632
+ if from_diffusers
1633
+ else ((weight_name or PADDLE_LORA_WEIGHT_NAME)),
1634
+ cache_dir=cache_dir,
1635
+ force_download=force_download,
1636
+ resume_download=resume_download,
1637
+ proxies=proxies,
1638
+ local_files_only=local_files_only,
1639
+ use_auth_token=use_auth_token,
1640
+ revision=revision,
1641
+ subfolder=subfolder,
1642
+ user_agent=user_agent,
1643
+ from_aistudio=from_aistudio,
1644
+ from_hf_hub=from_hf_hub,
1645
+ )
1646
+
1647
+ assert model_file is not None, "Could not find the model file!"
1648
+ # TODO, check this
1649
+ from ppdiffusers.utils import smart_load
1650
+
1651
+ state_dict = smart_load(model_file, return_is_torch_weight=True)
1652
+ is_torch_weight = state_dict.pop("is_torch_weight", False)
1653
+ if not from_diffusers and is_torch_weight:
1654
+ logger.warning(
1655
+ "Detect the weight is in diffusers format, but currently, `from_diffusers` is set to `False`. To proceed, we will change the value of `from_diffusers` to `True`!"
1656
+ )
1657
+ from_diffusers = True
1658
+ else:
1659
+ state_dict = pretrained_model_name_or_path_or_dict
1660
+
1661
+ return state_dict, from_diffusers
1662
+
1663
+ @classmethod
1664
+ def load_lora_into_transformer(cls, state_dict, transformer, adapter_name=None, _pipeline=None, from_diffusers=None,):
1665
+ """
1666
+ This will load the LoRA layers specified in `state_dict` into `transformer`.
1667
+ Parameters:
1668
+ state_dict (`dict`):
1669
+ A standard state dict containing the lora layer parameters. The keys can either be indexed directly
1670
+ into the unet or prefixed with an additional `unet` which can be used to distinguish between text
1671
+ encoder lora layers.
1672
+ transformer (`SD3Transformer2DModel`):
1673
+ The Transformer model to load the LoRA layers into.
1674
+ adapter_name (`str`, *optional*):
1675
+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
1676
+ `default_{i}` where i is the total number of adapters being loaded.
1677
+ """
1678
+ if from_diffusers is None:
1679
+ from_diffusers = FROM_DIFFUSERS
1680
+
1681
+ from ppdiffusers.peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
1682
+
1683
+ keys = list(state_dict.keys())
1684
+
1685
+ transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)]
1686
+ state_dict = {
1687
+ k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys
1688
+ }
1689
+
1690
+ if len(state_dict.keys()) > 0:
1691
+ if adapter_name in getattr(transformer, "peft_config", {}):
1692
+ raise ValueError(
1693
+ f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name."
1694
+ )
1695
+
1696
+ rank = {}
1697
+ for key, val in state_dict.items():
1698
+ if "lora_B" in key:
1699
+ rank[key] = val.shape[1]
1700
+
1701
+ lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=None, peft_state_dict=state_dict)
1702
+ if "use_dora" in lora_config_kwargs:
1703
+ raise ValueError(
1704
+ "ppdiffusers.peft does not support dora yet"
1705
+ )
1706
+ lora_config = LoraConfig(**lora_config_kwargs)
1707
+
1708
+ # adapter_name
1709
+ if adapter_name is None:
1710
+ adapter_name = get_adapter_name(transformer)
1711
+
1712
+ inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name)
1713
+ incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name)
1714
+
1715
+ if incompatible_keys is not None:
1716
+ # check only for unexpected keys
1717
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
1718
+ if unexpected_keys:
1719
+ logger.warning(
1720
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
1721
+ f" {unexpected_keys}. "
1722
+ )
1723
+
1724
+ # Unsafe code />
1725
+
1726
+ @classmethod
1727
+ def save_lora_weights(
1728
+ cls,
1729
+ save_directory: Union[str, os.PathLike],
1730
+ transformer_lora_layers: Dict[str, paddle.nn.Layer] = None,
1731
+ is_main_process: bool = True,
1732
+ weight_name: str = None,
1733
+ save_function: Callable = None,
1734
+ safe_serialization: bool = True,
1735
+ to_diffusers=None,
1736
+ ):
1737
+ r"""
1738
+ Save the LoRA parameters corresponding to the UNet and text encoder.
1739
+ Arguments:
1740
+ save_directory (`str` or `os.PathLike`):
1741
+ Directory to save LoRA parameters to. Will be created if it doesn't exist.
1742
+ transformer_lora_layers (`Dict[str, paddle.nn.Layer]` or `Dict[str, paddletorch.Tensor]`):
1743
+ State dict of the LoRA layers corresponding to the `transformer`.
1744
+ is_main_process (`bool`, *optional*, defaults to `True`):
1745
+ Whether the process calling this is the main process or not. Useful during distributed training and you
1746
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
1747
+ process to avoid race conditions.
1748
+ save_function (`Callable`):
1749
+ The function to use to save the state dictionary. Useful during distributed training when you need to
1750
+ replace `torch.save` or `paddle.save` with another method. Can be configured with the environment variable
1751
+ `DIFFUSERS_SAVE_MODE`.
1752
+ safe_serialization (`bool`, *optional*, defaults to `True`):
1753
+ Whether to save the model using `safetensors` or the traditional way.
1754
+ """
1755
+ if to_diffusers is None:
1756
+ to_diffusers = TO_DIFFUSERS
1757
+
1758
+ state_dict = {}
1759
+
1760
+ def pack_weights(layers, prefix):
1761
+ layers_weights = layers.state_dict() if isinstance(layers, nn.Layer) else layers
1762
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1763
+ return layers_state_dict
1764
+
1765
+ if not transformer_lora_layers:
1766
+ raise ValueError("You must pass `transformer_lora_layers`.")
1767
+
1768
+ if transformer_lora_layers:
1769
+ state_dict.update(pack_weights(transformer_lora_layers, cls.transformer_name))
1770
+
1771
+ # Save the model
1772
+ cls.write_lora_layers(
1773
+ state_dict=state_dict,
1774
+ save_directory=save_directory,
1775
+ is_main_process=is_main_process,
1776
+ weight_name=weight_name,
1777
+ save_function=save_function,
1778
+ safe_serialization=safe_serialization,
1779
+ to_diffusers=to_diffusers, # only change save weights name and save function
1780
+ )
1781
+
1782
+ @staticmethod
1783
+ def write_lora_layers(
1784
+ state_dict: Dict[str, paddle.Tensor],
1785
+ save_directory: str,
1786
+ is_main_process: bool,
1787
+ weight_name: str,
1788
+ save_function: Callable,
1789
+ safe_serialization: bool,
1790
+ to_diffusers=None,
1791
+ ):
1792
+ if to_diffusers is None:
1793
+ to_diffusers = TO_DIFFUSERS
1794
+
1795
+ if os.path.isfile(save_directory):
1796
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
1797
+ return
1798
+
1799
+ if save_function is None:
1800
+ if to_diffusers:
1801
+ if not is_torch_available() and not safe_serialization:
1802
+ safe_serialization = True
1803
+ logger.warning(
1804
+ "PyTorch is not installed, and `safe_serialization` is currently set to `False`. "
1805
+ "To ensure proper model saving, we will automatically set `safe_serialization=True`. "
1806
+ "If you want to keep `safe_serialization=False`, please make sure PyTorch is installed."
1807
+ )
1808
+ if safe_serialization:
1809
+ if is_torch_available():
1810
+ save_function = partial(torch_safe_save_file, metadata={"format": "pt"})
1811
+ else:
1812
+ save_function = partial(np_safe_save_file, metadata={"format": "pt"})
1813
+ else:
1814
+ save_function = torch.save
1815
+ else:
1816
+ if safe_serialization:
1817
+ state_dict = {k: np.ascontiguousarray(v) for k, v in state_dict.items()}
1818
+ save_function = partial(np_safe_save_file, metadata={"format": "pd"})
1819
+ else:
1820
+ save_function = paddle.save
1821
+
1822
+ os.makedirs(save_directory, exist_ok=True)
1823
+
1824
+ if weight_name is None:
1825
+ if to_diffusers:
1826
+ if safe_serialization:
1827
+ weight_name = TORCH_LORA_WEIGHT_NAME_SAFE
1828
+ else:
1829
+ weight_name = TORCH_LORA_WEIGHT_NAME
1830
+ else:
1831
+ if safe_serialization:
1832
+ weight_name = PADDLE_LORA_WEIGHT_NAME_SAFE
1833
+ else:
1834
+ weight_name = PADDLE_LORA_WEIGHT_NAME
1835
+ else:
1836
+ if "paddle" in weight_name.lower() or "pdparams" in weight_name.lower():
1837
+ to_diffusers = False
1838
+ elif "torch" in weight_name.lower() or "bin" in weight_name.lower():
1839
+ to_diffusers = True
1840
+
1841
+ save_path = Path(save_directory, weight_name).as_posix()
1842
+ save_function(state_dict, save_path)
1843
+ logger.info(f"Model weights saved in {save_path}")
1844
+
1845
+ def unload_lora_weights(self):
1846
+ """
1847
+ Unloads the LoRA parameters.
1848
+ Examples:
1849
+ ```python
1850
+ >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
1851
+ >>> pipeline.unload_lora_weights()
1852
+ >>> ...
1853
+ ```
1854
+ """
1855
+ transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer
1856
+ recurse_remove_peft_layers(transformer)
1857
+ if hasattr(transformer, "peft_config"):
1858
+ del transformer.peft_config
1859
+
1860
+ @classmethod
1861
+ def _optionally_disable_offloading(cls, _pipeline):
1862
+ """
1863
+ Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.
1864
+ Args:
1865
+ _pipeline (`DiffusionPipeline`):
1866
+ The pipeline to disable offloading for.
1867
+ Returns:
1868
+ tuple:
1869
+ A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
1870
+ """
1871
+ pass
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/lora_conversion_utils.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+
17
+ from ..utils import logging
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+
22
+ def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5):
23
+ # 1. get all state_dict_keys
24
+ all_keys = list(state_dict.keys())
25
+ sgm_patterns = ["input_blocks", "middle_block", "output_blocks"]
26
+
27
+ # 2. check if needs remapping, if not return original dict
28
+ is_in_sgm_format = False
29
+ for key in all_keys:
30
+ if any(p in key for p in sgm_patterns):
31
+ is_in_sgm_format = True
32
+ break
33
+
34
+ if not is_in_sgm_format:
35
+ return state_dict
36
+
37
+ # 3. Else remap from SGM patterns
38
+ new_state_dict = {}
39
+ inner_block_map = ["resnets", "attentions", "upsamplers"]
40
+
41
+ # Retrieves # of down, mid and up blocks
42
+ input_block_ids, middle_block_ids, output_block_ids = set(), set(), set()
43
+
44
+ for layer in all_keys:
45
+ if "text" in layer:
46
+ new_state_dict[layer] = state_dict.pop(layer)
47
+ else:
48
+ layer_id = int(layer.split(delimiter)[:block_slice_pos][-1])
49
+ if sgm_patterns[0] in layer:
50
+ input_block_ids.add(layer_id)
51
+ elif sgm_patterns[1] in layer:
52
+ middle_block_ids.add(layer_id)
53
+ elif sgm_patterns[2] in layer:
54
+ output_block_ids.add(layer_id)
55
+ else:
56
+ raise ValueError(f"Checkpoint not supported because layer {layer} not supported.")
57
+
58
+ input_blocks = {
59
+ layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key]
60
+ for layer_id in input_block_ids
61
+ }
62
+ middle_blocks = {
63
+ layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key]
64
+ for layer_id in middle_block_ids
65
+ }
66
+ output_blocks = {
67
+ layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key]
68
+ for layer_id in output_block_ids
69
+ }
70
+
71
+ # Rename keys accordingly
72
+ for i in input_block_ids:
73
+ block_id = (i - 1) // (unet_config.layers_per_block + 1)
74
+ layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1)
75
+
76
+ for key in input_blocks[i]:
77
+ inner_block_id = int(key.split(delimiter)[block_slice_pos])
78
+ inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers"
79
+ inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0"
80
+ new_key = delimiter.join(
81
+ key.split(delimiter)[: block_slice_pos - 1]
82
+ + [str(block_id), inner_block_key, inner_layers_in_block]
83
+ + key.split(delimiter)[block_slice_pos + 1 :]
84
+ )
85
+ new_state_dict[new_key] = state_dict.pop(key)
86
+
87
+ for i in middle_block_ids:
88
+ key_part = None
89
+ if i == 0:
90
+ key_part = [inner_block_map[0], "0"]
91
+ elif i == 1:
92
+ key_part = [inner_block_map[1], "0"]
93
+ elif i == 2:
94
+ key_part = [inner_block_map[0], "1"]
95
+ else:
96
+ raise ValueError(f"Invalid middle block id {i}.")
97
+
98
+ for key in middle_blocks[i]:
99
+ new_key = delimiter.join(
100
+ key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:]
101
+ )
102
+ new_state_dict[new_key] = state_dict.pop(key)
103
+
104
+ for i in output_block_ids:
105
+ block_id = i // (unet_config.layers_per_block + 1)
106
+ layer_in_block_id = i % (unet_config.layers_per_block + 1)
107
+
108
+ for key in output_blocks[i]:
109
+ inner_block_id = int(key.split(delimiter)[block_slice_pos])
110
+ inner_block_key = inner_block_map[inner_block_id]
111
+ inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0"
112
+ new_key = delimiter.join(
113
+ key.split(delimiter)[: block_slice_pos - 1]
114
+ + [str(block_id), inner_block_key, inner_layers_in_block]
115
+ + key.split(delimiter)[block_slice_pos + 1 :]
116
+ )
117
+ new_state_dict[new_key] = state_dict.pop(key)
118
+
119
+ if len(state_dict) > 0:
120
+ raise ValueError("At this point all state dict entries have to be converted.")
121
+
122
+ return new_state_dict
123
+
124
+
125
+ def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"):
126
+ unet_state_dict = {}
127
+ te_state_dict = {}
128
+ te2_state_dict = {}
129
+ network_alphas = {}
130
+
131
+ # every down weight has a corresponding up weight and potentially an alpha weight
132
+ lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")]
133
+ for key in lora_keys:
134
+ lora_name = key.split(".")[0]
135
+ lora_name_up = lora_name + ".lora_up.weight"
136
+ lora_name_alpha = lora_name + ".alpha"
137
+
138
+ if lora_name.startswith("lora_unet_"):
139
+ diffusers_name = key.replace("lora_unet_", "").replace("_", ".")
140
+
141
+ if "input.blocks" in diffusers_name:
142
+ diffusers_name = diffusers_name.replace("input.blocks", "down_blocks")
143
+ else:
144
+ diffusers_name = diffusers_name.replace("down.blocks", "down_blocks")
145
+
146
+ if "middle.block" in diffusers_name:
147
+ diffusers_name = diffusers_name.replace("middle.block", "mid_block")
148
+ else:
149
+ diffusers_name = diffusers_name.replace("mid.block", "mid_block")
150
+ if "output.blocks" in diffusers_name:
151
+ diffusers_name = diffusers_name.replace("output.blocks", "up_blocks")
152
+ else:
153
+ diffusers_name = diffusers_name.replace("up.blocks", "up_blocks")
154
+
155
+ diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks")
156
+ diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora")
157
+ diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora")
158
+ diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora")
159
+ diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora")
160
+ diffusers_name = diffusers_name.replace("proj.in", "proj_in")
161
+ diffusers_name = diffusers_name.replace("proj.out", "proj_out")
162
+ diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj")
163
+
164
+ # SDXL specificity.
165
+ if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name:
166
+ pattern = r"\.\d+(?=\D*$)"
167
+ diffusers_name = re.sub(pattern, "", diffusers_name, count=1)
168
+ if ".in." in diffusers_name:
169
+ diffusers_name = diffusers_name.replace("in.layers.2", "conv1")
170
+ if ".out." in diffusers_name:
171
+ diffusers_name = diffusers_name.replace("out.layers.3", "conv2")
172
+ if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name:
173
+ diffusers_name = diffusers_name.replace("op", "conv")
174
+ if "skip" in diffusers_name:
175
+ diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut")
176
+
177
+ # LyCORIS specificity.
178
+ if "time.emb.proj" in diffusers_name:
179
+ diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj")
180
+ if "conv.shortcut" in diffusers_name:
181
+ diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut")
182
+
183
+ # General coverage.
184
+ if "transformer_blocks" in diffusers_name:
185
+ if "attn1" in diffusers_name or "attn2" in diffusers_name:
186
+ diffusers_name = diffusers_name.replace("attn1", "attn1.processor")
187
+ diffusers_name = diffusers_name.replace("attn2", "attn2.processor")
188
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
189
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
190
+ elif "ff" in diffusers_name:
191
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
192
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
193
+ elif any(key in diffusers_name for key in ("proj_in", "proj_out")):
194
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
195
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
196
+ else:
197
+ unet_state_dict[diffusers_name] = state_dict.pop(key)
198
+ unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
199
+
200
+ elif lora_name.startswith("lora_te_"):
201
+ diffusers_name = key.replace("lora_te_", "").replace("_", ".")
202
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
203
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
204
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
205
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
206
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
207
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
208
+ if "self_attn" in diffusers_name:
209
+ te_state_dict[diffusers_name] = state_dict.pop(key)
210
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
211
+ elif "mlp" in diffusers_name:
212
+ # Be aware that this is the new diffusers convention and the rest of the code might
213
+ # not utilize it yet.
214
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
215
+ te_state_dict[diffusers_name] = state_dict.pop(key)
216
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
217
+
218
+ # (sayakpaul): Duplicate code. Needs to be cleaned.
219
+ elif lora_name.startswith("lora_te1_"):
220
+ diffusers_name = key.replace("lora_te1_", "").replace("_", ".")
221
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
222
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
223
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
224
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
225
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
226
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
227
+ if "self_attn" in diffusers_name:
228
+ te_state_dict[diffusers_name] = state_dict.pop(key)
229
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
230
+ elif "mlp" in diffusers_name:
231
+ # Be aware that this is the new diffusers convention and the rest of the code might
232
+ # not utilize it yet.
233
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
234
+ te_state_dict[diffusers_name] = state_dict.pop(key)
235
+ te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
236
+
237
+ # (sayakpaul): Duplicate code. Needs to be cleaned.
238
+ elif lora_name.startswith("lora_te2_"):
239
+ diffusers_name = key.replace("lora_te2_", "").replace("_", ".")
240
+ diffusers_name = diffusers_name.replace("text.model", "text_model")
241
+ diffusers_name = diffusers_name.replace("self.attn", "self_attn")
242
+ diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
243
+ diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
244
+ diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
245
+ diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
246
+ if "self_attn" in diffusers_name:
247
+ te2_state_dict[diffusers_name] = state_dict.pop(key)
248
+ te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
249
+ elif "mlp" in diffusers_name:
250
+ # Be aware that this is the new diffusers convention and the rest of the code might
251
+ # not utilize it yet.
252
+ diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
253
+ te2_state_dict[diffusers_name] = state_dict.pop(key)
254
+ te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
255
+
256
+ # Rename the alphas so that they can be mapped appropriately.
257
+ if lora_name_alpha in state_dict:
258
+ alpha = state_dict.pop(lora_name_alpha).item()
259
+ if lora_name_alpha.startswith("lora_unet_"):
260
+ prefix = "unet."
261
+ elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")):
262
+ prefix = "text_encoder."
263
+ else:
264
+ prefix = "text_encoder_2."
265
+ new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha"
266
+ network_alphas.update({new_name: alpha})
267
+
268
+ if len(state_dict) > 0:
269
+ raise ValueError(f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}")
270
+
271
+ logger.info("Kohya-style checkpoint detected.")
272
+ unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()}
273
+ te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()}
274
+ te2_state_dict = (
275
+ {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()}
276
+ if len(te2_state_dict) > 0
277
+ else None
278
+ )
279
+ if te2_state_dict is not None:
280
+ te_state_dict.update(te2_state_dict)
281
+
282
+ new_state_dict = {**unet_state_dict, **te_state_dict}
283
+ return new_state_dict, network_alphas
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/single_file.py ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import copy
15
+ import os
16
+ from io import BytesIO
17
+ from pathlib import Path
18
+
19
+ import paddle
20
+ import requests
21
+ from huggingface_hub import hf_hub_download
22
+ from huggingface_hub.file_download import _request_wrapper, hf_raise_for_status
23
+
24
+ from ..utils import (
25
+ BACKENDS_MAPPING,
26
+ DIFFUSERS_CACHE,
27
+ HF_HUB_OFFLINE,
28
+ PPDIFFUSERS_CACHE,
29
+ deprecate,
30
+ is_omegaconf_available,
31
+ logging,
32
+ ppdiffusers_url_download,
33
+ smart_load,
34
+ )
35
+
36
+ try:
37
+ from paddlenlp.transformers.model_utils import no_init_weights
38
+ except ImportError:
39
+ from ..utils.paddle_utils import no_init_weights
40
+
41
+ from ..models.modeling_utils import ContextManagers, faster_set_state_dict
42
+
43
+
44
+ def http_file_name(
45
+ url: str,
46
+ *,
47
+ proxies=None,
48
+ headers=None,
49
+ timeout=10.0,
50
+ max_retries=0,
51
+ ):
52
+ """
53
+ Get a remote file name.
54
+ """
55
+ headers = copy.deepcopy(headers) or {}
56
+ r = _request_wrapper(
57
+ method="GET",
58
+ url=url,
59
+ stream=True,
60
+ proxies=proxies,
61
+ headers=headers,
62
+ timeout=timeout,
63
+ # max_retries=max_retries,
64
+ )
65
+ hf_raise_for_status(r)
66
+ displayed_name = url.split("/")[-1]
67
+ content_disposition = r.headers.get("Content-Disposition")
68
+ if content_disposition is not None and "filename=" in content_disposition:
69
+ # Means file is on CDN
70
+ displayed_name = content_disposition.split("filename=")[-1]
71
+ return displayed_name
72
+
73
+
74
+ logger = logging.get_logger(__name__)
75
+
76
+
77
+ class FromSingleFileMixin:
78
+ """
79
+ Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`].
80
+ """
81
+
82
+ @classmethod
83
+ def from_ckpt(cls, *args, **kwargs):
84
+ deprecation_message = "The function `from_ckpt` is deprecated in favor of `from_single_file` and will be removed in ppdiffusers v.0.21. Please make sure to use `StableDiffusionPipeline.from_single_file(...)` instead."
85
+ deprecate("from_ckpt", "0.21.0", deprecation_message, standard_warn=False)
86
+ return cls.from_single_file(*args, **kwargs)
87
+
88
+ @classmethod
89
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
90
+ r"""
91
+ Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors`
92
+ format. The pipeline is set in evaluation mode (`model.eval()`) by default.
93
+
94
+ Parameters:
95
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
96
+ Can be either:
97
+ - A link to the `.ckpt` file (for example
98
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
99
+ - A path to a *file* containing all pipeline weights.
100
+ paddle_dtype (`str` or `paddle.dtype`, *optional*):
101
+ Override the default `paddle.dtype` and load the model with another dtype. If `"auto"` is passed, the
102
+ dtype is automatically derived from the model's weights.
103
+ force_download (`bool`, *optional*, defaults to `False`):
104
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
105
+ cached versions if they exist.
106
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
107
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
108
+ is not used.
109
+ resume_download (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
111
+ incompletely downloaded files are deleted.
112
+ proxies (`Dict[str, str]`, *optional*):
113
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
114
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
115
+ local_files_only (`bool`, *optional*, defaults to `False`):
116
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
117
+ won't be downloaded from the Hub.
118
+ use_auth_token (`str` or *bool*, *optional*):
119
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
120
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
121
+ revision (`str`, *optional*, defaults to `"main"`):
122
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
123
+ allowed by Git.
124
+ use_safetensors (`bool`, *optional*, defaults to `None`):
125
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
126
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
127
+ weights. If set to `False`, safetensors weights are not loaded.
128
+ extract_ema (`bool`, *optional*, defaults to `False`):
129
+ Whether to extract the EMA weights or not. Pass `True` to extract the EMA weights which usually yield
130
+ higher quality images for inference. Non-EMA weights are usually better for continuing finetuning.
131
+ upcast_attention (`bool`, *optional*, defaults to `None`):
132
+ Whether the attention computation should always be upcasted.
133
+ image_size (`int`, *optional*, defaults to 512):
134
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
135
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
136
+ prediction_type (`str`, *optional*):
137
+ The prediction type the model was trained on. Use `'epsilon'` for all Stable Diffusion v1 models and
138
+ the Stable Diffusion v2 base model. Use `'v_prediction'` for Stable Diffusion v2.
139
+ num_in_channels (`int`, *optional*, defaults to `None`):
140
+ The number of input channels. If `None`, it is automatically inferred.
141
+ scheduler_type (`str`, *optional*, defaults to `"pndm"`):
142
+ Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
143
+ "ddim"]`.
144
+ load_safety_checker (`bool`, *optional*, defaults to `True`):
145
+ Whether to load the safety checker or not.
146
+ text_encoder ([`~transformers.CLIPTextModel`], *optional*, defaults to `None`):
147
+ An instance of `CLIPTextModel` to use, specifically the
148
+ [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. If this
149
+ parameter is `None`, the function loads a new instance of `CLIPTextModel` by itself if needed.
150
+ vae (`AutoencoderKL`, *optional*, defaults to `None`):
151
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
152
+ this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
153
+ tokenizer ([`~transformers.CLIPTokenizer`], *optional*, defaults to `None`):
154
+ An instance of `CLIPTokenizer` to use. If this parameter is `None`, the function loads a new instance
155
+ of `CLIPTokenizer` by itself if needed.
156
+ original_config_file (`str`):
157
+ Path to `.yaml` config file corresponding to the original architecture. If `None`, will be
158
+ automatically inferred by looking for a key that only exists in SD2.0 models.
159
+ kwargs (remaining dictionary of keyword arguments, *optional*):
160
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
161
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
162
+ method. See example below for more information.
163
+
164
+ Examples:
165
+
166
+ ```py
167
+ >>> from ppdiffusers import StableDiffusionPipeline
168
+
169
+ >>> # Download pipeline from huggingface.co and cache.
170
+ >>> pipeline = StableDiffusionPipeline.from_single_file(
171
+ ... "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
172
+ ... )
173
+
174
+ >>> # Download pipeline from local file
175
+ >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt
176
+ >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly")
177
+
178
+ >>> # Enable float16
179
+ >>> pipeline = StableDiffusionPipeline.from_single_file(
180
+ ... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
181
+ ... paddle_dtype=paddle.float16,
182
+ ... )
183
+ ```
184
+ """
185
+ # import here to avoid circular dependency
186
+ from ..pipelines.stable_diffusion.convert_from_ckpt import (
187
+ download_from_original_stable_diffusion_ckpt,
188
+ )
189
+
190
+ from_hf_hub = any(p in pretrained_model_link_or_path for p in ["huggingface.co", "hf.co", "hf-mirror"])
191
+ cache_dir = (
192
+ kwargs.pop("cache_dir", DIFFUSERS_CACHE) if from_hf_hub else kwargs.pop("cache_dir", PPDIFFUSERS_CACHE)
193
+ )
194
+ original_config_file = kwargs.pop("original_config_file", None)
195
+ config_files = kwargs.pop("config_files", None)
196
+ resume_download = kwargs.pop("resume_download", False)
197
+ force_download = kwargs.pop("force_download", False)
198
+ proxies = kwargs.pop("proxies", None)
199
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
200
+ use_auth_token = kwargs.pop("use_auth_token", None)
201
+ revision = kwargs.pop("revision", None)
202
+ extract_ema = kwargs.pop("extract_ema", False)
203
+ image_size = kwargs.pop("image_size", None)
204
+ scheduler_type = kwargs.pop("scheduler_type", "pndm")
205
+ num_in_channels = kwargs.pop("num_in_channels", None)
206
+ upcast_attention = kwargs.pop("upcast_attention", None)
207
+ load_safety_checker = kwargs.pop("load_safety_checker", False)
208
+ prediction_type = kwargs.pop("prediction_type", None)
209
+ text_encoder = kwargs.pop("text_encoder", None)
210
+ vae = kwargs.pop("vae", None)
211
+ controlnet = kwargs.pop("controlnet", None)
212
+ adapter = kwargs.pop("adapter", None)
213
+ tokenizer = kwargs.pop("tokenizer", None)
214
+
215
+ paddle_dtype = kwargs.pop("paddle_dtype", None)
216
+
217
+ use_safetensors = kwargs.pop("use_safetensors", None)
218
+
219
+ pipeline_name = cls.__name__
220
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
221
+ from_safetensors = file_extension == "safetensors"
222
+
223
+ if from_safetensors and use_safetensors is False:
224
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
225
+
226
+ # TODO: For now we only support stable diffusion
227
+ stable_unclip = None
228
+ model_type = None
229
+
230
+ if pipeline_name in [
231
+ "StableDiffusionControlNetPipeline",
232
+ "StableDiffusionControlNetImg2ImgPipeline",
233
+ "StableDiffusionControlNetInpaintPipeline",
234
+ ]:
235
+ from ..models.controlnet import ControlNetModel
236
+ from ..pipelines.controlnet.multicontrolnet import MultiControlNetModel
237
+
238
+ # list/tuple or a single instance of ControlNetModel or MultiControlNetModel
239
+ if not (
240
+ isinstance(controlnet, (ControlNetModel, MultiControlNetModel))
241
+ or isinstance(controlnet, (list, tuple))
242
+ and isinstance(controlnet[0], ControlNetModel)
243
+ ):
244
+ raise ValueError("ControlNet needs to be passed if loading from ControlNet pipeline.")
245
+ elif "StableDiffusion" in pipeline_name:
246
+ # Model type will be inferred from the checkpoint.
247
+ pass
248
+ elif pipeline_name == "StableUnCLIPPipeline":
249
+ model_type = "FrozenOpenCLIPEmbedder"
250
+ stable_unclip = "txt2img"
251
+ elif pipeline_name == "StableUnCLIPImg2ImgPipeline":
252
+ model_type = "FrozenOpenCLIPEmbedder"
253
+ stable_unclip = "img2img"
254
+ elif pipeline_name == "PaintByExamplePipeline":
255
+ model_type = "PaintByExample"
256
+ elif pipeline_name == "LDMTextToImagePipeline":
257
+ model_type = "LDMTextToImage"
258
+ else:
259
+ raise ValueError(f"Unhandled pipeline class: {pipeline_name}")
260
+
261
+ pretrained_model_link_or_path = str(pretrained_model_link_or_path)
262
+ if os.path.isfile(pretrained_model_link_or_path):
263
+ checkpoint_path = pretrained_model_link_or_path
264
+ elif pretrained_model_link_or_path.startswith("http://") or pretrained_model_link_or_path.startswith(
265
+ "https://"
266
+ ):
267
+ if from_hf_hub:
268
+ # remove huggingface url
269
+ has_valid_url_prefix = False
270
+ valid_url_prefixes = [
271
+ "https://huggingface.co/",
272
+ "huggingface.co/",
273
+ "hf.co/",
274
+ "https://hf.co/",
275
+ "hf-mirror.com/",
276
+ "https://hf-mirror.com/",
277
+ ]
278
+ for prefix in valid_url_prefixes:
279
+ if pretrained_model_link_or_path.startswith(prefix):
280
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
281
+ has_valid_url_prefix = True
282
+
283
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
284
+ ckpt_path = Path(pretrained_model_link_or_path)
285
+ if not ckpt_path.is_file():
286
+ if not has_valid_url_prefix:
287
+ raise ValueError(
288
+ f"The provided path is either not a file or a valid huggingface URL was not provided. Valid URLs begin with {', '.join(valid_url_prefixes)}"
289
+ )
290
+
291
+ # get repo_id and (potentially nested) file path of ckpt in repo
292
+ repo_id = "/".join(ckpt_path.parts[:2])
293
+ file_path = "/".join(ckpt_path.parts[2:])
294
+
295
+ if file_path.startswith("blob/"):
296
+ file_path = file_path[len("blob/") :]
297
+
298
+ if file_path.startswith("main/"):
299
+ file_path = file_path[len("main/") :]
300
+
301
+ checkpoint_path = hf_hub_download(
302
+ repo_id,
303
+ filename=file_path,
304
+ cache_dir=cache_dir,
305
+ resume_download=resume_download,
306
+ proxies=proxies,
307
+ local_files_only=local_files_only,
308
+ token=use_auth_token,
309
+ revision=revision,
310
+ force_download=force_download,
311
+ )
312
+ else:
313
+ checkpoint_path = ckpt_path
314
+ else:
315
+ checkpoint_path = ppdiffusers_url_download(
316
+ pretrained_model_link_or_path,
317
+ cache_dir=cache_dir,
318
+ filename=http_file_name(pretrained_model_link_or_path).strip('"'),
319
+ force_download=force_download,
320
+ resume_download=resume_download,
321
+ )
322
+ else:
323
+ checkpoint_path = pretrained_model_link_or_path
324
+
325
+ pipe = download_from_original_stable_diffusion_ckpt(
326
+ checkpoint_path,
327
+ pipeline_class=cls,
328
+ model_type=model_type,
329
+ stable_unclip=stable_unclip,
330
+ controlnet=controlnet,
331
+ adapter=adapter,
332
+ from_safetensors=from_safetensors,
333
+ extract_ema=extract_ema,
334
+ image_size=image_size,
335
+ scheduler_type=scheduler_type,
336
+ num_in_channels=num_in_channels,
337
+ upcast_attention=upcast_attention,
338
+ load_safety_checker=load_safety_checker,
339
+ prediction_type=prediction_type,
340
+ paddle_dtype=paddle_dtype,
341
+ text_encoder=text_encoder,
342
+ vae=vae,
343
+ tokenizer=tokenizer,
344
+ original_config_file=original_config_file,
345
+ config_files=config_files,
346
+ local_files_only=local_files_only,
347
+ )
348
+
349
+ return pipe
350
+
351
+
352
+ class FromOriginalVAEMixin:
353
+ """
354
+ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into an [`AutoencoderKL`].
355
+ """
356
+
357
+ @classmethod
358
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
359
+ r"""
360
+ Instantiate a [`AutoencoderKL`] from pretrained ControlNet weights saved in the original `.ckpt` or
361
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
362
+
363
+ Parameters:
364
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
365
+ Can be either:
366
+ - A link to the `.ckpt` file (for example
367
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
368
+ - A path to a *file* containing all pipeline weights.
369
+ paddle_dtype (`str` or `paddle.dtype`, *optional*):
370
+ Override the default `paddle.dtype` and load the model with another dtype. If `"auto"` is passed, the
371
+ dtype is automatically derived from the model's weights.
372
+ force_download (`bool`, *optional*, defaults to `False`):
373
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
374
+ cached versions if they exist.
375
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
376
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
377
+ is not used.
378
+ resume_download (`bool`, *optional*, defaults to `False`):
379
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
380
+ incompletely downloaded files are deleted.
381
+ proxies (`Dict[str, str]`, *optional*):
382
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
383
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
384
+ local_files_only (`bool`, *optional*, defaults to `False`):
385
+ Whether to only load local model weights and configuration files or not. If set to True, the model
386
+ won't be downloaded from the Hub.
387
+ use_auth_token (`str` or *bool*, *optional*):
388
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
389
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
390
+ revision (`str`, *optional*, defaults to `"main"`):
391
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
392
+ allowed by Git.
393
+ image_size (`int`, *optional*, defaults to 512):
394
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
395
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
396
+ use_safetensors (`bool`, *optional*, defaults to `None`):
397
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
398
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
399
+ weights. If set to `False`, safetensors weights are not loaded.
400
+ upcast_attention (`bool`, *optional*, defaults to `None`):
401
+ Whether the attention computation should always be upcasted.
402
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
403
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
404
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
405
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
406
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z
407
+ = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution
408
+ Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
409
+ kwargs (remaining dictionary of keyword arguments, *optional*):
410
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
411
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
412
+ method. See example below for more information.
413
+
414
+ <Tip warning={true}>
415
+
416
+ Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you're loading
417
+ a VAE from SDXL or a Stable Diffusion v2 model or higher.
418
+
419
+ </Tip>
420
+
421
+ Examples:
422
+
423
+ ```py
424
+ from ppdiffusers import AutoencoderKL
425
+
426
+ url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file
427
+ model = AutoencoderKL.from_single_file(url)
428
+ ```
429
+ """
430
+ from_hf_hub = any(p in pretrained_model_link_or_path for p in ["huggingface.co", "hf.co", "hf-mirror"])
431
+ cache_dir = (
432
+ kwargs.pop("cache_dir", DIFFUSERS_CACHE) if from_hf_hub else kwargs.pop("cache_dir", PPDIFFUSERS_CACHE)
433
+ )
434
+
435
+ if not is_omegaconf_available():
436
+ raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
437
+
438
+ from omegaconf import OmegaConf
439
+
440
+ from ..models import AutoencoderKL
441
+
442
+ # import here to avoid circular dependency
443
+ from ..pipelines.stable_diffusion.convert_from_ckpt import (
444
+ convert_diffusers_vae_unet_to_ppdiffusers,
445
+ convert_ldm_vae_checkpoint,
446
+ create_vae_diffusers_config,
447
+ )
448
+
449
+ config_file = kwargs.pop("config_file", None)
450
+ resume_download = kwargs.pop("resume_download", False)
451
+ force_download = kwargs.pop("force_download", False)
452
+ proxies = kwargs.pop("proxies", None)
453
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
454
+ use_auth_token = kwargs.pop("use_auth_token", None)
455
+ revision = kwargs.pop("revision", None)
456
+ image_size = kwargs.pop("image_size", None)
457
+ scaling_factor = kwargs.pop("scaling_factor", None)
458
+ kwargs.pop("upcast_attention", None)
459
+
460
+ paddle_dtype = kwargs.pop("paddle_dtype", None)
461
+
462
+ use_safetensors = kwargs.pop("use_safetensors", None)
463
+
464
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
465
+ from_safetensors = file_extension == "safetensors"
466
+
467
+ if from_safetensors and use_safetensors is False:
468
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
469
+
470
+ pretrained_model_link_or_path = str(pretrained_model_link_or_path)
471
+ if os.path.isfile(pretrained_model_link_or_path):
472
+ checkpoint_path = pretrained_model_link_or_path
473
+ elif pretrained_model_link_or_path.startswith("http://") or pretrained_model_link_or_path.startswith(
474
+ "https://"
475
+ ):
476
+ if from_hf_hub:
477
+ # remove huggingface url
478
+ has_valid_url_prefix = False
479
+ valid_url_prefixes = [
480
+ "https://huggingface.co/",
481
+ "huggingface.co/",
482
+ "hf.co/",
483
+ "https://hf.co/",
484
+ "hf-mirror.com/",
485
+ "https://hf-mirror.com/",
486
+ ]
487
+ for prefix in valid_url_prefixes:
488
+ if pretrained_model_link_or_path.startswith(prefix):
489
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
490
+ has_valid_url_prefix = True
491
+
492
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
493
+ ckpt_path = Path(pretrained_model_link_or_path)
494
+ if not ckpt_path.is_file():
495
+ if not has_valid_url_prefix:
496
+ raise ValueError(
497
+ f"The provided path is either not a file or a valid huggingface URL was not provided. Valid URLs begin with {', '.join(valid_url_prefixes)}"
498
+ )
499
+ # get repo_id and (potentially nested) file path of ckpt in repo
500
+ repo_id = "/".join(ckpt_path.parts[:2])
501
+ file_path = "/".join(ckpt_path.parts[2:])
502
+
503
+ if file_path.startswith("blob/"):
504
+ file_path = file_path[len("blob/") :]
505
+
506
+ if file_path.startswith("main/"):
507
+ file_path = file_path[len("main/") :]
508
+
509
+ checkpoint_path = hf_hub_download(
510
+ repo_id,
511
+ filename=file_path,
512
+ cache_dir=cache_dir,
513
+ resume_download=resume_download,
514
+ proxies=proxies,
515
+ local_files_only=local_files_only,
516
+ token=use_auth_token,
517
+ revision=revision,
518
+ force_download=force_download,
519
+ )
520
+ else:
521
+ checkpoint_path = ckpt_path
522
+
523
+ else:
524
+ checkpoint_path = ppdiffusers_url_download(
525
+ pretrained_model_link_or_path,
526
+ cache_dir=cache_dir,
527
+ filename=http_file_name(pretrained_model_link_or_path).strip('"'),
528
+ force_download=force_download,
529
+ resume_download=resume_download,
530
+ )
531
+ else:
532
+ checkpoint_path = pretrained_model_link_or_path
533
+ checkpoint = smart_load(checkpoint_path, return_numpy=True)
534
+
535
+ if "state_dict" in checkpoint:
536
+ checkpoint = checkpoint["state_dict"]
537
+
538
+ if config_file is None:
539
+ config_url = "https://paddlenlp.bj.bcebos.com/models/community/junnyu/develop/v1-inference.yaml"
540
+ config_file = BytesIO(requests.get(config_url).content)
541
+
542
+ original_config = OmegaConf.load(config_file)
543
+
544
+ # default to sd-v1-5
545
+ image_size = image_size or 512
546
+
547
+ vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
548
+ converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
549
+
550
+ if scaling_factor is None:
551
+ if (
552
+ "model" in original_config
553
+ and "params" in original_config.model
554
+ and "scale_factor" in original_config.model.params
555
+ ):
556
+ vae_scaling_factor = original_config.model.params.scale_factor
557
+ else:
558
+ vae_scaling_factor = 0.18215 # default SD scaling factor
559
+
560
+ vae_config["scaling_factor"] = vae_scaling_factor
561
+
562
+ init_contexts = []
563
+ init_contexts.append(paddle.dtype_guard(paddle.float32))
564
+ init_contexts.append(no_init_weights(_enable=True))
565
+ if hasattr(paddle, "LazyGuard"):
566
+ init_contexts.append(paddle.LazyGuard())
567
+ with ContextManagers(init_contexts):
568
+ vae = AutoencoderKL(**vae_config)
569
+
570
+ # we must transpose linear layer
571
+ faster_set_state_dict(vae, convert_diffusers_vae_unet_to_ppdiffusers(vae, converted_vae_checkpoint))
572
+
573
+ if paddle_dtype is not None:
574
+ vae.to(paddle_dtype=paddle_dtype)
575
+
576
+ return vae
577
+
578
+
579
+ class FromOriginalControlnetMixin:
580
+ """
581
+ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into a [`ControlNetModel`].
582
+ """
583
+
584
+ @classmethod
585
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
586
+ r"""
587
+ Instantiate a [`ControlNetModel`] from pretrained ControlNet weights saved in the original `.ckpt` or
588
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
589
+
590
+ Parameters:
591
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
592
+ Can be either:
593
+ - A link to the `.ckpt` file (for example
594
+ `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
595
+ - A path to a *file* containing all pipeline weights.
596
+ paddle_dtype (`str` or `paddle.dtype`, *optional*):
597
+ Override the default `paddle.dtype` and load the model with another dtype. If `"auto"` is passed, the
598
+ dtype is automatically derived from the model's weights.
599
+ force_download (`bool`, *optional*, defaults to `False`):
600
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
601
+ cached versions if they exist.
602
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
603
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
604
+ is not used.
605
+ resume_download (`bool`, *optional*, defaults to `False`):
606
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
607
+ incompletely downloaded files are deleted.
608
+ proxies (`Dict[str, str]`, *optional*):
609
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
610
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
611
+ local_files_only (`bool`, *optional*, defaults to `False`):
612
+ Whether to only load local model weights and configuration files or not. If set to True, the model
613
+ won't be downloaded from the Hub.
614
+ use_auth_token (`str` or *bool*, *optional*):
615
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
616
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
617
+ revision (`str`, *optional*, defaults to `"main"`):
618
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
619
+ allowed by Git.
620
+ use_safetensors (`bool`, *optional*, defaults to `None`):
621
+ If set to `None`, the safetensors weights are downloaded if they're available **and** if the
622
+ safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
623
+ weights. If set to `False`, safetensors weights are not loaded.
624
+ image_size (`int`, *optional*, defaults to 512):
625
+ The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
626
+ Diffusion v2 base model. Use 768 for Stable Diffusion v2.
627
+ upcast_attention (`bool`, *optional*, defaults to `None`):
628
+ Whether the attention computation should always be upcasted.
629
+ kwargs (remaining dictionary of keyword arguments, *optional*):
630
+ Can be used to overwrite load and saveable variables (for example the pipeline components of the
631
+ specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
632
+ method. See example below for more information.
633
+
634
+ Examples:
635
+
636
+ ```py
637
+ from ppdiffusers import StableDiffusionControlNetPipeline, ControlNetModel
638
+
639
+ url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path
640
+ model = ControlNetModel.from_single_file(url)
641
+
642
+ url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path
643
+ pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet)
644
+ ```
645
+ """
646
+ # import here to avoid circular dependency
647
+ from ..pipelines.stable_diffusion.convert_from_ckpt import (
648
+ download_controlnet_from_original_ckpt,
649
+ )
650
+
651
+ from_hf_hub = any(p in pretrained_model_link_or_path for p in ["huggingface.co", "hf.co", "hf-mirror"])
652
+ cache_dir = (
653
+ kwargs.pop("cache_dir", DIFFUSERS_CACHE) if from_hf_hub else kwargs.pop("cache_dir", PPDIFFUSERS_CACHE)
654
+ )
655
+
656
+ config_file = kwargs.pop("config_file", None)
657
+ resume_download = kwargs.pop("resume_download", False)
658
+ force_download = kwargs.pop("force_download", False)
659
+ proxies = kwargs.pop("proxies", None)
660
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
661
+ use_auth_token = kwargs.pop("use_auth_token", None)
662
+ num_in_channels = kwargs.pop("num_in_channels", None)
663
+ use_linear_projection = kwargs.pop("use_linear_projection", None)
664
+ revision = kwargs.pop("revision", None)
665
+ extract_ema = kwargs.pop("extract_ema", False)
666
+ image_size = kwargs.pop("image_size", None)
667
+ upcast_attention = kwargs.pop("upcast_attention", None)
668
+
669
+ paddle_dtype = kwargs.pop("paddle_dtype", None)
670
+
671
+ use_safetensors = kwargs.pop("use_safetensors", None)
672
+
673
+ file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
674
+ from_safetensors = file_extension == "safetensors"
675
+
676
+ if from_safetensors and use_safetensors is False:
677
+ raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")
678
+
679
+ pretrained_model_link_or_path = str(pretrained_model_link_or_path)
680
+ if os.path.isfile(pretrained_model_link_or_path):
681
+ checkpoint_path = pretrained_model_link_or_path
682
+ elif pretrained_model_link_or_path.startswith("http://") or pretrained_model_link_or_path.startswith(
683
+ "https://"
684
+ ):
685
+ if from_hf_hub:
686
+ # remove huggingface url
687
+ has_valid_url_prefix = False
688
+ valid_url_prefixes = [
689
+ "https://huggingface.co/",
690
+ "huggingface.co/",
691
+ "hf.co/",
692
+ "https://hf.co/",
693
+ "hf-mirror.com/",
694
+ "https://hf-mirror.com/",
695
+ ]
696
+ for prefix in valid_url_prefixes:
697
+ if pretrained_model_link_or_path.startswith(prefix):
698
+ pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
699
+ has_valid_url_prefix = True
700
+
701
+ # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
702
+ ckpt_path = Path(pretrained_model_link_or_path)
703
+ if not ckpt_path.is_file():
704
+ if not has_valid_url_prefix:
705
+ raise ValueError(
706
+ f"The provided path is either not a file or a valid huggingface URL was not provided. Valid URLs begin with {', '.join(valid_url_prefixes)}"
707
+ )
708
+ # get repo_id and (potentially nested) file path of ckpt in repo
709
+ repo_id = "/".join(ckpt_path.parts[:2])
710
+ file_path = "/".join(ckpt_path.parts[2:])
711
+
712
+ if file_path.startswith("blob/"):
713
+ file_path = file_path[len("blob/") :]
714
+
715
+ if file_path.startswith("main/"):
716
+ file_path = file_path[len("main/") :]
717
+
718
+ checkpoint_path = hf_hub_download(
719
+ repo_id,
720
+ filename=file_path,
721
+ cache_dir=cache_dir,
722
+ resume_download=resume_download,
723
+ proxies=proxies,
724
+ local_files_only=local_files_only,
725
+ token=use_auth_token,
726
+ revision=revision,
727
+ force_download=force_download,
728
+ )
729
+ else:
730
+ checkpoint_path = ckpt_path
731
+
732
+ else:
733
+ checkpoint_path = ppdiffusers_url_download(
734
+ pretrained_model_link_or_path,
735
+ cache_dir=cache_dir,
736
+ filename=http_file_name(pretrained_model_link_or_path).strip('"'),
737
+ force_download=force_download,
738
+ resume_download=resume_download,
739
+ )
740
+ else:
741
+ checkpoint_path = pretrained_model_link_or_path
742
+
743
+ if config_file is None:
744
+ config_url = "https://paddlenlp.bj.bcebos.com/models/community/junnyu/develop/cldm_v15.yaml"
745
+ config_file = BytesIO(requests.get(config_url).content)
746
+
747
+ image_size = image_size or 512
748
+
749
+ controlnet = download_controlnet_from_original_ckpt(
750
+ checkpoint_path,
751
+ original_config_file=config_file,
752
+ image_size=image_size,
753
+ extract_ema=extract_ema,
754
+ num_in_channels=num_in_channels,
755
+ upcast_attention=upcast_attention,
756
+ from_safetensors=from_safetensors,
757
+ use_linear_projection=use_linear_projection,
758
+ )
759
+
760
+ if paddle_dtype is not None:
761
+ controlnet.to(paddle_dtype=paddle_dtype)
762
+
763
+ return controlnet
764
+
765
+
766
+ FromCkptMixin = FromSingleFileMixin
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/textual_inversion.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, List, Optional, Union
15
+
16
+ import paddle
17
+
18
+ from ..utils import (
19
+ DIFFUSERS_CACHE,
20
+ FROM_AISTUDIO,
21
+ FROM_DIFFUSERS,
22
+ FROM_HF_HUB,
23
+ HF_HUB_OFFLINE,
24
+ PPDIFFUSERS_CACHE,
25
+ _get_model_file,
26
+ is_paddlenlp_available,
27
+ logging,
28
+ )
29
+
30
+ if is_paddlenlp_available():
31
+ from paddlenlp.transformers import PretrainedModel, PretrainedTokenizer
32
+
33
+ logger = logging.get_logger(__name__)
34
+ from ..models.modeling_utils import load_state_dict
35
+
36
+ TORCH_TEXT_INVERSION_NAME = "learned_embeds.bin"
37
+ PADDLE_TEXT_INVERSION_NAME = "learned_embeds.pdparams"
38
+ TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors"
39
+
40
+
41
+ def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs):
42
+ from_hf_hub = kwargs.pop("from_hf_hub", FROM_HF_HUB)
43
+ from_aistudio = kwargs.pop("from_aistudio", FROM_AISTUDIO)
44
+ from_diffusers = kwargs.pop("from_diffusers", FROM_DIFFUSERS)
45
+
46
+ cache_dir = kwargs.pop("cache_dir", None)
47
+ if cache_dir is None:
48
+ if from_aistudio:
49
+ cache_dir = None # TODO, check aistudio cache
50
+ elif from_hf_hub:
51
+ cache_dir = DIFFUSERS_CACHE
52
+ else:
53
+ cache_dir = PPDIFFUSERS_CACHE
54
+ force_download = kwargs.pop("force_download", False)
55
+ resume_download = kwargs.pop("resume_download", False)
56
+ proxies = kwargs.pop("proxies", None)
57
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
58
+ use_auth_token = kwargs.pop("use_auth_token", None)
59
+ revision = kwargs.pop("revision", None)
60
+ subfolder = kwargs.pop("subfolder", None)
61
+ weight_name = kwargs.pop("weight_name", None)
62
+ use_safetensors = kwargs.pop("use_safetensors", None)
63
+
64
+ if use_safetensors is None:
65
+ use_safetensors = True
66
+
67
+ user_agent = {
68
+ "file_type": "text_inversion",
69
+ "framework": "pytorch" if from_diffusers else "paddle",
70
+ }
71
+ state_dicts = []
72
+ for pretrained_model_name_or_path in pretrained_model_name_or_paths:
73
+ if not isinstance(pretrained_model_name_or_path, (dict, paddle.Tensor)):
74
+ # 3.1. Load textual inversion file
75
+ model_file = None
76
+
77
+ # Let's first try to load .safetensors weights
78
+ if (use_safetensors and weight_name is None) or (
79
+ weight_name is not None and weight_name.endswith(".safetensors")
80
+ ):
81
+ try:
82
+ model_file = _get_model_file(
83
+ pretrained_model_name_or_path,
84
+ weights_name=weight_name or TEXT_INVERSION_NAME_SAFE,
85
+ cache_dir=cache_dir,
86
+ force_download=force_download,
87
+ resume_download=resume_download,
88
+ proxies=proxies,
89
+ local_files_only=local_files_only,
90
+ use_auth_token=use_auth_token,
91
+ revision=revision,
92
+ subfolder=subfolder,
93
+ user_agent=user_agent,
94
+ from_hf_hub=from_hf_hub,
95
+ from_aistudio=from_aistudio,
96
+ )
97
+ except Exception:
98
+ model_file = None
99
+ pass
100
+
101
+ if model_file is None:
102
+ model_file = _get_model_file(
103
+ pretrained_model_name_or_path,
104
+ weights_name=(weight_name or TORCH_TEXT_INVERSION_NAME)
105
+ if from_diffusers
106
+ else (weight_name or PADDLE_TEXT_INVERSION_NAME),
107
+ cache_dir=cache_dir,
108
+ force_download=force_download,
109
+ resume_download=resume_download,
110
+ proxies=proxies,
111
+ local_files_only=local_files_only,
112
+ use_auth_token=use_auth_token,
113
+ revision=revision,
114
+ subfolder=subfolder,
115
+ user_agent=user_agent,
116
+ from_hf_hub=from_hf_hub,
117
+ from_aistudio=from_aistudio,
118
+ )
119
+ assert model_file is not None, "Could not find the model file!"
120
+ state_dict = {}
121
+ load_state_dict(model_file, state_dict)
122
+ else:
123
+ state_dict = pretrained_model_name_or_path
124
+
125
+ state_dicts.append(state_dict)
126
+
127
+ return state_dicts
128
+
129
+
130
+ class TextualInversionLoaderMixin:
131
+ r"""
132
+ Load Textual Inversion tokens and embeddings to the tokenizer and text encoder.
133
+ """
134
+
135
+ def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PretrainedTokenizer"): # noqa: F821
136
+ r"""
137
+ Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
138
+ be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
139
+ inversion token or if the textual inversion token is a single vector, the input prompt is returned.
140
+
141
+ Parameters:
142
+ prompt (`str` or list of `str`):
143
+ The prompt or prompts to guide the image generation.
144
+ tokenizer (`PretrainedTokenizer`):
145
+ The tokenizer responsible for encoding the prompt into input tokens.
146
+
147
+ Returns:
148
+ `str` or list of `str`: The converted prompt
149
+ """
150
+ if not isinstance(prompt, List):
151
+ prompts = [prompt]
152
+ else:
153
+ prompts = prompt
154
+
155
+ prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]
156
+
157
+ if not isinstance(prompt, List):
158
+ return prompts[0]
159
+
160
+ return prompts
161
+
162
+ def _maybe_convert_prompt(self, prompt: str, tokenizer: "PretrainedTokenizer"): # noqa: F821
163
+ r"""
164
+ Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
165
+ to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
166
+ is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
167
+ inversion token or a textual inversion token that is a single vector, the input prompt is simply returned.
168
+
169
+ Parameters:
170
+ prompt (`str`):
171
+ The prompt to guide the image generation.
172
+ tokenizer (`PretrainedTokenizer`):
173
+ The tokenizer responsible for encoding the prompt into input tokens.
174
+
175
+ Returns:
176
+ `str`: The converted prompt
177
+ """
178
+ tokens = tokenizer.tokenize(prompt)
179
+ unique_tokens = set(tokens)
180
+ for token in unique_tokens:
181
+ if token in tokenizer.added_tokens_encoder:
182
+ replacement = token
183
+ i = 1
184
+ while f"{token}_{i}" in tokenizer.added_tokens_encoder:
185
+ replacement += f" {token}_{i}"
186
+ i += 1
187
+
188
+ prompt = prompt.replace(token, replacement)
189
+
190
+ return prompt
191
+
192
+ def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens):
193
+ if tokenizer is None:
194
+ raise ValueError(
195
+ f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PretrainedTokenizer` for calling"
196
+ f" `{self.load_textual_inversion.__name__}`"
197
+ )
198
+
199
+ if text_encoder is None:
200
+ raise ValueError(
201
+ f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PretrainedModel` for calling"
202
+ f" `{self.load_textual_inversion.__name__}`"
203
+ )
204
+
205
+ if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens):
206
+ raise ValueError(
207
+ f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} "
208
+ f"Make sure both lists have the same length."
209
+ )
210
+
211
+ valid_tokens = [t for t in tokens if t is not None]
212
+ if len(set(valid_tokens)) < len(valid_tokens):
213
+ raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}")
214
+
215
+ @staticmethod
216
+ def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer):
217
+ all_tokens = []
218
+ all_embeddings = []
219
+ for state_dict, token in zip(state_dicts, tokens):
220
+ if isinstance(state_dict, paddle.Tensor):
221
+ if token is None:
222
+ raise ValueError(
223
+ "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`."
224
+ )
225
+ loaded_token = token
226
+ embedding = state_dict
227
+ elif len(state_dict) == 1:
228
+ # diffusers
229
+ loaded_token, embedding = next(iter(state_dict.items()))
230
+ elif "string_to_param" in state_dict:
231
+ # A1111
232
+ loaded_token = state_dict["name"]
233
+ embedding = state_dict["string_to_param"]["*"]
234
+ else:
235
+ raise ValueError(
236
+ f"Loaded state dictonary is incorrect: {state_dict}. \n\n"
237
+ "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`"
238
+ " input key."
239
+ )
240
+
241
+ if token is not None and loaded_token != token:
242
+ logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.")
243
+ else:
244
+ token = loaded_token
245
+
246
+ if token in tokenizer.get_vocab():
247
+ raise ValueError(
248
+ f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
249
+ )
250
+
251
+ all_tokens.append(token)
252
+ all_embeddings.append(embedding)
253
+
254
+ return all_tokens, all_embeddings
255
+
256
+ @staticmethod
257
+ def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer):
258
+ all_tokens = []
259
+ all_embeddings = []
260
+
261
+ for embedding, token in zip(embeddings, tokens):
262
+ if f"{token}_1" in tokenizer.get_vocab():
263
+ multi_vector_tokens = [token]
264
+ i = 1
265
+ while f"{token}_{i}" in tokenizer.added_tokens_encoder:
266
+ multi_vector_tokens.append(f"{token}_{i}")
267
+ i += 1
268
+
269
+ raise ValueError(
270
+ f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder."
271
+ )
272
+
273
+ is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1
274
+ if is_multi_vector:
275
+ all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])]
276
+ all_embeddings += [e for e in embedding] # noqa: C416
277
+ else:
278
+ all_tokens += [token]
279
+ all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding]
280
+
281
+ return all_tokens, all_embeddings
282
+
283
+ def load_textual_inversion(
284
+ self,
285
+ pretrained_model_name_or_path: Union[str, List[str], Dict[str, paddle.Tensor], List[Dict[str, paddle.Tensor]]],
286
+ token: Optional[Union[str, List[str]]] = None,
287
+ tokenizer: Optional["PretrainedTokenizer"] = None, # noqa: F821
288
+ text_encoder: Optional["PretrainedModel"] = None, # noqa: F821
289
+ **kwargs,
290
+ ):
291
+ r"""
292
+ Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
293
+ Automatic1111 formats are supported).
294
+
295
+ Parameters:
296
+ pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
297
+ Can be either one of the following or a list of them:
298
+
299
+ - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
300
+ pretrained model hosted on the Hub.
301
+ - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
302
+ inversion weights.
303
+ - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
304
+ - A [torch state
305
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
306
+
307
+ token (`str` or `List[str]`, *optional*):
308
+ Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
309
+ list, then `token` must also be a list of equal length.
310
+ text_encoder ([`~transformers.CLIPTextModel`], *optional*):
311
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
312
+ If not specified, function will take self.tokenizer.
313
+ tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
314
+ A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
315
+ weight_name (`str`, *optional*):
316
+ Name of a custom weight file. This should be used when:
317
+
318
+ - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
319
+ name such as `text_inv.bin`.
320
+ - The saved textual inversion file is in the Automatic1111 format.
321
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
322
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
323
+ is not used.
324
+ force_download (`bool`, *optional*, defaults to `False`):
325
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
326
+ cached versions if they exist.
327
+ resume_download (`bool`, *optional*, defaults to `False`):
328
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
329
+ incompletely downloaded files are deleted.
330
+ proxies (`Dict[str, str]`, *optional*):
331
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
332
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
333
+ local_files_only (`bool`, *optional*, defaults to `False`):
334
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
335
+ won't be downloaded from the Hub.
336
+ use_auth_token (`str` or *bool*, *optional*):
337
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
338
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
339
+ revision (`str`, *optional*, defaults to `"main"`):
340
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
341
+ allowed by Git.
342
+ subfolder (`str`, *optional*, defaults to `""`):
343
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
344
+ mirror (`str`, *optional*):
345
+ Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
346
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
347
+ information.
348
+
349
+ Example:
350
+
351
+ To load a Textual Inversion embedding vector in 🤗 Diffusers format:
352
+
353
+ ```py
354
+ from ppdiffusers import StableDiffusionPipeline
355
+ import paddle
356
+
357
+ model_id = "runwayml/stable-diffusion-v1-5"
358
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, paddle_dtype=paddle.float16)
359
+
360
+ pipe.load_textual_inversion("sd-concepts-library/cat-toy")
361
+
362
+ prompt = "A <cat-toy> backpack"
363
+
364
+ image = pipe(prompt, num_inference_steps=50).images[0]
365
+ image.save("cat-backpack.png")
366
+ ```
367
+
368
+ To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first
369
+ (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector
370
+ locally:
371
+
372
+ ```py
373
+ from ppdiffusers import StableDiffusionPipeline
374
+ import paddle
375
+
376
+ model_id = "runwayml/stable-diffusion-v1-5"
377
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, paddle_dtype=paddle.float16)
378
+
379
+ pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
380
+
381
+ prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."
382
+
383
+ image = pipe(prompt, num_inference_steps=50).images[0]
384
+ image.save("character.png")
385
+ ```
386
+
387
+ """
388
+ # 1. Set correct tokenizer and text encoder
389
+ tokenizer = tokenizer or getattr(self, "tokenizer", None)
390
+ text_encoder = text_encoder or getattr(self, "text_encoder", None)
391
+
392
+ # 2. Normalize inputs
393
+ pretrained_model_name_or_paths = (
394
+ [pretrained_model_name_or_path]
395
+ if not isinstance(pretrained_model_name_or_path, list)
396
+ else pretrained_model_name_or_path
397
+ )
398
+ tokens = [token] if not isinstance(token, list) else token
399
+ if tokens[0] is None:
400
+ tokens = tokens * len(pretrained_model_name_or_paths)
401
+
402
+ # 3. Check inputs
403
+ self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens)
404
+
405
+ # 4. Load state dicts of textual embeddings
406
+ state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs)
407
+
408
+ # 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens
409
+ if len(tokens) > 1 and len(state_dicts) == 1:
410
+ if isinstance(state_dicts[0], paddle.Tensor):
411
+ state_dicts = list(state_dicts[0])
412
+ if len(tokens) != len(state_dicts):
413
+ raise ValueError(
414
+ f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} "
415
+ f"Make sure both have the same length."
416
+ )
417
+
418
+ # 4. Retrieve tokens and embeddings
419
+ tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer)
420
+
421
+ # 5. Extend tokens and embeddings for multi vector
422
+ tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer)
423
+
424
+ # 6. Make sure all embeddings have the correct size
425
+ expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1]
426
+ if any(expected_emb_dim != emb.shape[-1] for emb in embeddings):
427
+ raise ValueError(
428
+ "Loaded embeddings are of incorrect shape. Expected each textual inversion embedding "
429
+ f"to be of shape {expected_emb_dim}, but are {expected_emb_dim} "
430
+ )
431
+
432
+ # 7.2 save expected device and dtype
433
+ dtype = text_encoder.dtype
434
+
435
+ # 7.3 Increase token embedding matrix
436
+ text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens))
437
+
438
+ # 7.4 Load token and embedding
439
+ with paddle.no_grad():
440
+ for token, embedding in zip(tokens, embeddings):
441
+ # add tokens and get ids
442
+ tokenizer.add_tokens(token)
443
+ token_id = tokenizer.convert_tokens_to_ids(token)
444
+ text_encoder.get_input_embeddings().weight[token_id] = embedding
445
+ logger.info(f"Loaded textual inversion embedding for {token}.")
446
+
447
+ text_encoder.get_input_embeddings().to(
448
+ dtype=dtype,
449
+ )
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/unet.py ADDED
@@ -0,0 +1,830 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from collections import defaultdict
16
+ from contextlib import nullcontext
17
+ from functools import partial
18
+ from typing import Callable, Dict, List, Optional, Union
19
+
20
+ import paddle
21
+
22
+ from ..models.embeddings import ImageProjection
23
+ from ..models.modeling_pytorch_paddle_utils import (
24
+ convert_paddle_state_dict_to_pytorch,
25
+ convert_pytorch_state_dict_to_paddle,
26
+ )
27
+ from ..models.modeling_utils import faster_set_state_dict, load_state_dict
28
+ from ..utils import (
29
+ DIFFUSERS_CACHE,
30
+ FROM_AISTUDIO,
31
+ FROM_DIFFUSERS,
32
+ FROM_HF_HUB,
33
+ HF_HUB_OFFLINE,
34
+ LOW_CPU_MEM_USAGE_DEFAULT,
35
+ PPDIFFUSERS_CACHE,
36
+ TO_DIFFUSERS,
37
+ USE_PEFT_BACKEND,
38
+ _get_model_file,
39
+ delete_adapter_layers,
40
+ is_paddle_version,
41
+ is_ppxformers_available,
42
+ is_safetensors_available,
43
+ is_torch_available,
44
+ logging,
45
+ set_adapter_layers,
46
+ set_weights_and_activate_adapters,
47
+ )
48
+ from .utils import AttnProcsLayers
49
+
50
+ if is_safetensors_available():
51
+ from safetensors.numpy import save_file as np_safe_save_file
52
+
53
+ if is_torch_available():
54
+ from safetensors.torch import save_file as torch_safe_save_file
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+
59
+ if is_torch_available():
60
+ import torch
61
+ TEXT_ENCODER_NAME = "text_encoder"
62
+ UNET_NAME = "unet"
63
+
64
+ TORCH_LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
65
+ TORCH_LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
66
+
67
+ PADDLE_LORA_WEIGHT_NAME = "paddle_lora_weights.pdparams"
68
+ PADDLE_LORA_WEIGHT_NAME_SAFE = "paddle_lora_weights.safetensors"
69
+
70
+ TORCH_CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin"
71
+ TORCH_CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors"
72
+
73
+ PADDLE_CUSTOM_DIFFUSION_WEIGHT_NAME = "paddle_custom_diffusion_weights.pdparams"
74
+ PADDLE_CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "paddle_custom_diffusion_weights.safetensors"
75
+
76
+
77
+ class UNet2DConditionLoadersMixin:
78
+ """
79
+ Load LoRA layers into a [`UNet2DCondtionModel`].
80
+ """
81
+
82
+ text_encoder_name = TEXT_ENCODER_NAME
83
+ unet_name = UNET_NAME
84
+
85
+ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]], **kwargs):
86
+ r"""
87
+ Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be
88
+ defined in
89
+ [`attention_processor.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py)
90
+ and be a `nn.Layer` class.
91
+
92
+ Parameters:
93
+ pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
94
+ Can be either:
95
+
96
+ - A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
97
+ the Hub.
98
+ - A path to a directory (for example `./my_model_directory`) containing the model weights saved
99
+ with [`ModelMixin.save_pretrained`].
100
+ - A [torch state
101
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
102
+
103
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
104
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
105
+ is not used.
106
+ force_download (`bool`, *optional*, defaults to `False`):
107
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
108
+ cached versions if they exist.
109
+ resume_download (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
111
+ incompletely downloaded files are deleted.
112
+ proxies (`Dict[str, str]`, *optional*):
113
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
114
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
115
+ local_files_only (`bool`, *optional*, defaults to `False`):
116
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
117
+ won't be downloaded from the Hub.
118
+ use_auth_token (`str` or *bool*, *optional*):
119
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
120
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
121
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
122
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
123
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
124
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
125
+ argument to `True` will raise an error.
126
+ revision (`str`, *optional*, defaults to `"main"`):
127
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
128
+ allowed by Git.
129
+ subfolder (`str`, *optional*, defaults to `""`):
130
+ The subfolder location of a model file within a larger model repository on the Hub or locally.
131
+ mirror (`str`, *optional*):
132
+ Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
133
+ guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
134
+ information.
135
+
136
+ Example:
137
+
138
+ ```py
139
+ from ppdiffusers import AutoPipelineForText2Image
140
+ import torch
141
+
142
+ pipeline = AutoPipelineForText2Image.from_pretrained(
143
+ "stabilityai/stable-diffusion-xl-base-1.0", paddle_dtype=paddle.float16
144
+ )
145
+ pipeline.unet.load_attn_procs(
146
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
147
+ )
148
+ ```
149
+ """
150
+ from ..models.attention_processor import CustomDiffusionAttnProcessor
151
+ from ..models.lora import (
152
+ LoRACompatibleConv,
153
+ LoRACompatibleLinear,
154
+ LoRAConv2dLayer,
155
+ LoRALinearLayer,
156
+ )
157
+
158
+ from_hf_hub = kwargs.pop("from_hf_hub", FROM_HF_HUB)
159
+ from_aistudio = kwargs.pop("from_aistudio", FROM_AISTUDIO)
160
+ cache_dir = kwargs.pop("cache_dir", None)
161
+ if cache_dir is None:
162
+ if from_aistudio:
163
+ cache_dir = None # TODO, check aistudio cache
164
+ elif from_hf_hub:
165
+ cache_dir = DIFFUSERS_CACHE
166
+ else:
167
+ cache_dir = PPDIFFUSERS_CACHE
168
+ from_diffusers = kwargs.pop("from_diffusers", FROM_DIFFUSERS)
169
+
170
+ force_download = kwargs.pop("force_download", False)
171
+ resume_download = kwargs.pop("resume_download", False)
172
+ proxies = kwargs.pop("proxies", None)
173
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
174
+ use_auth_token = kwargs.pop("use_auth_token", None)
175
+ revision = kwargs.pop("revision", None)
176
+ subfolder = kwargs.pop("subfolder", None)
177
+ if subfolder is None:
178
+ subfolder = ""
179
+ weight_name = kwargs.pop("weight_name", None)
180
+ use_safetensors = kwargs.pop("use_safetensors", None)
181
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", LOW_CPU_MEM_USAGE_DEFAULT)
182
+ # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
183
+ # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
184
+ network_alphas = kwargs.pop("network_alphas", None)
185
+
186
+ _pipeline = kwargs.pop("_pipeline", None) # noqa: F841
187
+
188
+ is_network_alphas_none = network_alphas is None
189
+
190
+ if use_safetensors is None:
191
+ use_safetensors = True
192
+
193
+ if weight_name is not None:
194
+ if "paddle" in weight_name.lower() or ".pdparams" in weight_name.lower():
195
+ if from_diffusers:
196
+ logger.warning(
197
+ "Detect the weight is in ppdiffusers format, but currently, `from_diffusers` is set to `True`. To proceed, we will change the value of `from_diffusers` to `False`!"
198
+ )
199
+ from_diffusers = False
200
+ elif "torch" in weight_name.lower() or ".bin" in weight_name.lower() or ".pt" in weight_name.lower():
201
+ if not from_diffusers:
202
+ logger.warning(
203
+ "Detect the weight is in diffusers format, but currently, `from_diffusers` is set to `False`. To proceed, we will change the value of `from_diffusers` to `True`!"
204
+ )
205
+ from_diffusers = True
206
+
207
+ user_agent = {
208
+ "file_type": "attn_procs_weights",
209
+ "framework": "pytorch" if from_diffusers else "paddle",
210
+ }
211
+
212
+ if low_cpu_mem_usage and (not is_paddle_version(">=", "2.5.0") and not is_paddle_version("==", "0.0.0")):
213
+ raise NotImplementedError(
214
+ "Low memory initialization requires paddlepaddle-gpu >= 2.5.0. Please either update your PaddlePaddle version or set"
215
+ " `low_cpu_mem_usage=False`."
216
+ )
217
+
218
+ model_file = None
219
+ state_dict = {}
220
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
221
+ # Let's first try to load .safetensors weights
222
+ if (use_safetensors and weight_name is None) or (
223
+ weight_name is not None and weight_name.endswith(".safetensors")
224
+ ):
225
+ try:
226
+ model_file = _get_model_file(
227
+ pretrained_model_name_or_path_or_dict,
228
+ weights_name=(weight_name or TORCH_LORA_WEIGHT_NAME_SAFE)
229
+ if from_diffusers
230
+ else ((weight_name or PADDLE_LORA_WEIGHT_NAME_SAFE)),
231
+ cache_dir=cache_dir,
232
+ force_download=force_download,
233
+ resume_download=resume_download,
234
+ proxies=proxies,
235
+ local_files_only=local_files_only,
236
+ use_auth_token=use_auth_token,
237
+ revision=revision,
238
+ subfolder=subfolder,
239
+ user_agent=user_agent,
240
+ from_aistudio=from_aistudio,
241
+ from_hf_hub=from_hf_hub,
242
+ )
243
+ except Exception:
244
+ model_file = None
245
+ if model_file is None:
246
+ model_file = _get_model_file(
247
+ pretrained_model_name_or_path_or_dict,
248
+ weights_name=(weight_name or TORCH_LORA_WEIGHT_NAME)
249
+ if from_diffusers
250
+ else ((weight_name or PADDLE_LORA_WEIGHT_NAME)),
251
+ cache_dir=cache_dir,
252
+ force_download=force_download,
253
+ resume_download=resume_download,
254
+ proxies=proxies,
255
+ local_files_only=local_files_only,
256
+ use_auth_token=use_auth_token,
257
+ revision=revision,
258
+ subfolder=subfolder,
259
+ user_agent=user_agent,
260
+ from_aistudio=from_aistudio,
261
+ from_hf_hub=from_hf_hub,
262
+ )
263
+
264
+ assert model_file is not None, "Could not find the model file!"
265
+ data_format = load_state_dict(model_file, state_dict)
266
+ if not from_diffusers and data_format == "pt":
267
+ logger.warning(
268
+ "Detect the weight is in diffusers format, but currently, `from_diffusers` is set to `False`. To proceed, we will change the value of `from_diffusers` to `True`!"
269
+ )
270
+ from_diffusers = True
271
+ if from_diffusers and data_format in ["pd", "np"]:
272
+ logger.warning(
273
+ "Detect the weight is in ppdiffusers format, but currently, `from_diffusers` is set to `True`. To proceed, we will change the value of `from_diffusers` to `False`!"
274
+ )
275
+ from_diffusers = False
276
+ else:
277
+ state_dict = pretrained_model_name_or_path_or_dict
278
+
279
+ # fill attn processors
280
+ lora_layers_list = []
281
+
282
+ is_lora = all(("lora" in k or k.endswith(".alpha")) for k in state_dict.keys()) and not USE_PEFT_BACKEND
283
+ is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys())
284
+
285
+ if is_lora:
286
+ # correct keys
287
+ state_dict, network_alphas = self.convert_state_dict_legacy_attn_format(state_dict, network_alphas)
288
+
289
+ if network_alphas is not None:
290
+ network_alphas_keys = list(network_alphas.keys())
291
+ used_network_alphas_keys = set()
292
+
293
+ lora_grouped_dict = defaultdict(dict)
294
+ mapped_network_alphas = {}
295
+
296
+ all_keys = list(state_dict.keys())
297
+ for key in all_keys:
298
+ value = state_dict.pop(key)
299
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
300
+ lora_grouped_dict[attn_processor_key][sub_key] = value
301
+
302
+ # Create another `mapped_network_alphas` dictionary so that we can properly map them.
303
+ if network_alphas is not None:
304
+ for k in network_alphas_keys:
305
+ if k.replace(".alpha", "") in key:
306
+ mapped_network_alphas.update({attn_processor_key: network_alphas.get(k)})
307
+ used_network_alphas_keys.add(k)
308
+
309
+ if not is_network_alphas_none:
310
+ if len(set(network_alphas_keys) - used_network_alphas_keys) > 0:
311
+ raise ValueError(
312
+ f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
313
+ )
314
+
315
+ if len(state_dict) > 0:
316
+ raise ValueError(
317
+ f"The `state_dict` has to be empty at this point but has the following keys \n\n {', '.join(state_dict.keys())}"
318
+ )
319
+
320
+ for key, value_dict in lora_grouped_dict.items():
321
+ attn_processor = self
322
+ for sub_key in key.split("."):
323
+ attn_processor = getattr(attn_processor, sub_key)
324
+
325
+ # Process non-attention layers, which don't have to_{k,v,q,out_proj}_lora layers
326
+ # or add_{k,v,q,out_proj}_proj_lora layers.
327
+ if from_diffusers:
328
+ rank = value_dict["lora.down.weight"].shape[0]
329
+ else:
330
+ rank = value_dict["lora.down.weight"].shape[1]
331
+
332
+ if isinstance(attn_processor, LoRACompatibleConv):
333
+ in_features = attn_processor._in_channels
334
+ out_features = attn_processor._out_channels
335
+ kernel_size = attn_processor._kernel_size
336
+
337
+ ctx = paddle.LazyGuard if low_cpu_mem_usage else nullcontext
338
+ with ctx():
339
+ lora = LoRAConv2dLayer(
340
+ in_features=in_features,
341
+ out_features=out_features,
342
+ rank=rank,
343
+ kernel_size=kernel_size,
344
+ stride=attn_processor._stride,
345
+ padding=attn_processor._padding,
346
+ network_alpha=mapped_network_alphas.get(key),
347
+ )
348
+ elif isinstance(attn_processor, LoRACompatibleLinear):
349
+ ctx = paddle.LazyGuard if low_cpu_mem_usage else nullcontext
350
+ with ctx():
351
+ lora = LoRALinearLayer(
352
+ attn_processor.in_features,
353
+ attn_processor.out_features,
354
+ rank,
355
+ mapped_network_alphas.get(key),
356
+ )
357
+ else:
358
+ raise ValueError(f"Module {key} is not a LoRACompatibleConv or LoRACompatibleLinear module.")
359
+
360
+ value_dict = {k.replace("lora.", ""): v for k, v in value_dict.items()}
361
+ lora_layers_list.append((attn_processor, lora))
362
+ if from_diffusers:
363
+ convert_pytorch_state_dict_to_paddle(lora, value_dict)
364
+ faster_set_state_dict(lora, value_dict)
365
+
366
+ elif is_custom_diffusion:
367
+ attn_processors = {}
368
+ custom_diffusion_grouped_dict = defaultdict(dict)
369
+ for key, value in state_dict.items():
370
+ if len(value) == 0:
371
+ custom_diffusion_grouped_dict[key] = {}
372
+ else:
373
+ if "to_out" in key:
374
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
375
+ else:
376
+ attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:])
377
+ custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value
378
+
379
+ for key, value_dict in custom_diffusion_grouped_dict.items():
380
+ if len(value_dict) == 0:
381
+ attn_processors[key] = CustomDiffusionAttnProcessor(
382
+ train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None
383
+ )
384
+ else:
385
+ if from_diffusers:
386
+ cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1]
387
+ hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0]
388
+ else:
389
+ cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[0]
390
+ hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[1]
391
+ train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False
392
+ attn_processors[key] = CustomDiffusionAttnProcessor(
393
+ train_kv=True,
394
+ train_q_out=train_q_out,
395
+ hidden_size=hidden_size,
396
+ cross_attention_dim=cross_attention_dim,
397
+ )
398
+ if from_diffusers:
399
+ convert_pytorch_state_dict_to_paddle(attn_processors[key], value_dict)
400
+ faster_set_state_dict(attn_processors[key], value_dict)
401
+ elif USE_PEFT_BACKEND:
402
+ # In that case we have nothing to do as loading the adapter weights is already handled above by `set_peft_model_state_dict`
403
+ # on the Unet
404
+ pass
405
+ else:
406
+ raise ValueError(
407
+ f"{model_file} does not seem to be in the correct format expected by LoRA or Custom Diffusion training."
408
+ )
409
+
410
+ # For PEFT backend the Unet is already offloaded at this stage as it is handled inside `lora_lora_weights_into_unet`
411
+ if not USE_PEFT_BACKEND:
412
+
413
+ # only custom diffusion needs to set attn processors
414
+ if is_custom_diffusion:
415
+ self.set_attn_processor(attn_processors)
416
+
417
+ # set lora layers
418
+ for target_module, lora_layer in lora_layers_list:
419
+ target_module.set_lora_layer(lora_layer)
420
+
421
+ self.to(dtype=self.dtype)
422
+
423
+ def convert_state_dict_legacy_attn_format(self, state_dict, network_alphas):
424
+ is_new_lora_format = all(
425
+ key.startswith(self.unet_name) or key.startswith(self.text_encoder_name) for key in state_dict.keys()
426
+ )
427
+ if is_new_lora_format:
428
+ # Strip the `"unet"` prefix.
429
+ is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys())
430
+ if is_text_encoder_present:
431
+ warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)."
432
+ logger.warn(warn_message)
433
+ unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)]
434
+ state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}
435
+
436
+ # change processor format to 'pure' LoRACompatibleLinear format
437
+ if any("processor" in k.split(".") for k in state_dict.keys()):
438
+
439
+ def format_to_lora_compatible(key):
440
+ if "processor" not in key.split("."):
441
+ return key
442
+ return key.replace(".processor", "").replace("to_out_lora", "to_out.0.lora").replace("_lora", ".lora")
443
+
444
+ state_dict = {format_to_lora_compatible(k): v for k, v in state_dict.items()}
445
+
446
+ if network_alphas is not None:
447
+ network_alphas = {format_to_lora_compatible(k): v for k, v in network_alphas.items()}
448
+ return state_dict, network_alphas
449
+
450
+ def save_attn_procs(
451
+ self,
452
+ save_directory: Union[str, os.PathLike],
453
+ is_main_process: bool = True,
454
+ weight_name: str = None,
455
+ save_function: Callable = None,
456
+ safe_serialization: bool = True,
457
+ to_diffusers: Optional[bool] = None,
458
+ **kwargs,
459
+ ):
460
+ r"""
461
+ Save attention processor layers to a directory so that it can be reloaded with the
462
+ [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method.
463
+
464
+ Arguments:
465
+ save_directory (`str` or `os.PathLike`):
466
+ Directory to save an attention processor to (will be created if it doesn't exist).
467
+ is_main_process (`bool`, *optional*, defaults to `True`):
468
+ Whether the process calling this is the main process or not. Useful during distributed training and you
469
+ need to call this function on all processes. In this case, set `is_main_process=True` only on the main
470
+ process to avoid race conditions.
471
+ save_function (`Callable`):
472
+ The function to use to save the state dictionary. Useful during distributed training when you need to
473
+ replace `torch.save` with another method. Can be configured with the environment variable
474
+ `DIFFUSERS_SAVE_MODE`.
475
+ safe_serialization (`bool`, *optional*, defaults to `True`):
476
+ Whether to save the model using `safetensors` or with `pickle`.
477
+
478
+ Example:
479
+
480
+ ```py
481
+ import paddle
482
+ from ppdiffusers import DiffusionPipeline
483
+
484
+ pipeline = DiffusionPipeline.from_pretrained(
485
+ "CompVis/stable-diffusion-v1-4",
486
+ paddle_dtype=paddle.float16,
487
+ )
488
+ pipeline.unet.load_attn_procs("path-to-save-model", weight_name="paddle_custom_diffusion_weights.pdparams")
489
+ pipeline.unet.save_attn_procs("path-to-save-model", weight_name="paddle_custom_diffusion_weights.pdparams")
490
+ ```
491
+ """
492
+ from ..models.attention_processor import (
493
+ CustomDiffusionAttnProcessor,
494
+ CustomDiffusionAttnProcessor2_5,
495
+ CustomDiffusionXFormersAttnProcessor,
496
+ )
497
+
498
+ if to_diffusers is None:
499
+ to_diffusers = TO_DIFFUSERS
500
+ if os.path.isfile(save_directory):
501
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
502
+ return
503
+
504
+ os.makedirs(save_directory, exist_ok=True)
505
+
506
+ is_custom_diffusion = any(
507
+ isinstance(
508
+ x,
509
+ (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_5, CustomDiffusionXFormersAttnProcessor),
510
+ )
511
+ for (_, x) in self.attn_processors.items()
512
+ )
513
+ if is_custom_diffusion:
514
+ model_to_save = AttnProcsLayers(
515
+ {
516
+ y: x
517
+ for (y, x) in self.attn_processors.items()
518
+ if isinstance(
519
+ x,
520
+ (
521
+ CustomDiffusionAttnProcessor,
522
+ CustomDiffusionAttnProcessor2_5,
523
+ CustomDiffusionXFormersAttnProcessor,
524
+ ),
525
+ )
526
+ }
527
+ )
528
+ state_dict = model_to_save.state_dict()
529
+ for name, attn in self.attn_processors.items():
530
+ if len(attn.state_dict()) == 0:
531
+ state_dict[name] = {}
532
+ else:
533
+ model_to_save = AttnProcsLayers(self.attn_processors)
534
+ state_dict = model_to_save.state_dict()
535
+
536
+ if weight_name is None:
537
+ if to_diffusers:
538
+ if safe_serialization:
539
+ weight_name = (
540
+ TORCH_CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else TORCH_LORA_WEIGHT_NAME_SAFE
541
+ )
542
+ else:
543
+ weight_name = TORCH_CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else TORCH_LORA_WEIGHT_NAME
544
+ else:
545
+ if safe_serialization:
546
+ weight_name = (
547
+ PADDLE_CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE
548
+ if is_custom_diffusion
549
+ else PADDLE_LORA_WEIGHT_NAME_SAFE
550
+ )
551
+ else:
552
+ weight_name = (
553
+ PADDLE_CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else PADDLE_LORA_WEIGHT_NAME
554
+ )
555
+ else:
556
+ if "paddle" in weight_name.lower() or "pdparams" in weight_name.lower():
557
+ to_diffusers = False
558
+ elif "torch" in weight_name.lower() or "bin" in weight_name.lower():
559
+ to_diffusers = True
560
+
561
+ # choose save_function
562
+ if save_function is None:
563
+ if to_diffusers:
564
+ if not is_torch_available() and not safe_serialization:
565
+ safe_serialization = True
566
+ logger.warning(
567
+ "PyTorch is not installed, and `safe_serialization` is currently set to `False`. "
568
+ "To ensure proper model saving, we will automatically set `safe_serialization=True`. "
569
+ "If you want to keep `safe_serialization=False`, please make sure PyTorch is installed."
570
+ )
571
+ if safe_serialization:
572
+ if is_torch_available():
573
+ save_function = partial(torch_safe_save_file, metadata={"format": "pt"})
574
+ else:
575
+ save_function = partial(np_safe_save_file, metadata={"format": "pt"})
576
+ else:
577
+ save_function = torch.save
578
+
579
+ convert_paddle_state_dict_to_pytorch(self, state_dict)
580
+ else:
581
+ if safe_serialization:
582
+ for k, v in state_dict.items():
583
+ if isinstance(v, paddle.Tensor):
584
+ state_dict[k] = v.cpu().numpy()
585
+
586
+ save_function = partial(np_safe_save_file, metadata={"format": "pd"})
587
+ else:
588
+ save_function = paddle.save
589
+
590
+ # Save the model
591
+ save_function(state_dict, os.path.join(save_directory, weight_name))
592
+ logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
593
+
594
+ def fuse_lora(self, lora_scale=1.0, safe_fusing=False):
595
+ self.lora_scale = lora_scale
596
+ self._safe_fusing = safe_fusing
597
+ self.apply(self._fuse_lora_apply)
598
+
599
+ def _fuse_lora_apply(self, module):
600
+ if not USE_PEFT_BACKEND:
601
+ if hasattr(module, "_fuse_lora"):
602
+ module._fuse_lora(self.lora_scale, self._safe_fusing)
603
+ else:
604
+ from ppdiffusers.peft.tuners.tuners_utils import BaseTunerLayer
605
+
606
+ if isinstance(module, BaseTunerLayer):
607
+ if self.lora_scale != 1.0:
608
+ module.scale_layer(self.lora_scale)
609
+ module.merge(safe_merge=self._safe_fusing)
610
+
611
+ def unfuse_lora(self):
612
+ self.apply(self._unfuse_lora_apply)
613
+
614
+ def _unfuse_lora_apply(self, module):
615
+ if not USE_PEFT_BACKEND:
616
+ if hasattr(module, "_unfuse_lora"):
617
+ module._unfuse_lora()
618
+ else:
619
+ from ppdiffusers.peft.tuners.tuners_utils import BaseTunerLayer
620
+
621
+ if isinstance(module, BaseTunerLayer):
622
+ module.unmerge()
623
+
624
+ def set_adapters(
625
+ self,
626
+ adapter_names: Union[List[str], str],
627
+ weights: Optional[Union[List[float], float]] = None,
628
+ ):
629
+ """
630
+ Set the currently active adapters for use in the UNet.
631
+
632
+ Args:
633
+ adapter_names (`List[str]` or `str`):
634
+ The names of the adapters to use.
635
+ adapter_weights (`Union[List[float], float]`, *optional*):
636
+ The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the
637
+ adapters.
638
+
639
+ Example:
640
+
641
+ ```py
642
+ from ppdiffusers import AutoPipelineForText2Image
643
+ import torch
644
+
645
+ pipeline = AutoPipelineForText2Image.from_pretrained(
646
+ "stabilityai/stable-diffusion-xl-base-1.0", paddle_dtype=paddle.float16
647
+ )
648
+ pipeline.load_lora_weights(
649
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
650
+ )
651
+ pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
652
+ pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5])
653
+ ```
654
+ """
655
+ if not USE_PEFT_BACKEND:
656
+ raise ValueError("PEFT backend is required for `set_adapters()`.")
657
+
658
+ adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
659
+
660
+ if weights is None:
661
+ weights = [1.0] * len(adapter_names)
662
+ elif isinstance(weights, float):
663
+ weights = [weights] * len(adapter_names)
664
+
665
+ if len(adapter_names) != len(weights):
666
+ raise ValueError(
667
+ f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}."
668
+ )
669
+
670
+ set_weights_and_activate_adapters(self, adapter_names, weights)
671
+
672
+ def disable_lora(self):
673
+ """
674
+ Disable the UNet's active LoRA layers.
675
+
676
+ Example:
677
+
678
+ ```py
679
+ from ppdiffusers import AutoPipelineForText2Image
680
+ import torch
681
+
682
+ pipeline = AutoPipelineForText2Image.from_pretrained(
683
+ "stabilityai/stable-diffusion-xl-base-1.0", paddle_dtype=paddle.float16
684
+ )
685
+ pipeline.load_lora_weights(
686
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
687
+ )
688
+ pipeline.disable_lora()
689
+ ```
690
+ """
691
+ if not USE_PEFT_BACKEND:
692
+ raise ValueError("PEFT backend is required for this method.")
693
+ set_adapter_layers(self, enabled=False)
694
+
695
+ def enable_lora(self):
696
+ """
697
+ Enable the UNet's active LoRA layers.
698
+
699
+ Example:
700
+
701
+ ```py
702
+ from ppdiffusers import AutoPipelineForText2Image
703
+ import paddle
704
+
705
+ pipeline = AutoPipelineForText2Image.from_pretrained(
706
+ "stabilityai/stable-diffusion-xl-base-1.0", paddle_dtype=paddle.float16
707
+ )
708
+ pipeline.load_lora_weights(
709
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
710
+ )
711
+ pipeline.enable_lora()
712
+ ```
713
+ """
714
+ if not USE_PEFT_BACKEND:
715
+ raise ValueError("PEFT backend is required for this method.")
716
+ set_adapter_layers(self, enabled=True)
717
+
718
+ def delete_adapters(self, adapter_names: Union[List[str], str]):
719
+ """
720
+ Delete an adapter's LoRA layers from the UNet.
721
+
722
+ Args:
723
+ adapter_names (`Union[List[str], str]`):
724
+ The names (single string or list of strings) of the adapter to delete.
725
+
726
+ Example:
727
+
728
+ ```py
729
+ from ppdiffusers import AutoPipelineForText2Image
730
+ import paddle
731
+
732
+ pipeline = AutoPipelineForText2Image.from_pretrained(
733
+ "stabilityai/stable-diffusion-xl-base-1.0", paddle_dtype=paddle.float16
734
+ )
735
+ pipeline.load_lora_weights(
736
+ "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic"
737
+ )
738
+ pipeline.delete_adapters("cinematic")
739
+ ```
740
+ """
741
+ if not USE_PEFT_BACKEND:
742
+ raise ValueError("PEFT backend is required for this method.")
743
+
744
+ if isinstance(adapter_names, str):
745
+ adapter_names = [adapter_names]
746
+
747
+ for adapter_name in adapter_names:
748
+ delete_adapter_layers(self, adapter_name)
749
+
750
+ # Pop also the corresponding adapter from the config
751
+ if hasattr(self, "peft_config"):
752
+ self.peft_config.pop(adapter_name, None)
753
+
754
+ def _load_ip_adapter_weights(self, state_dict, from_diffusers=None):
755
+ if from_diffusers is None:
756
+ from_diffusers = FROM_DIFFUSERS
757
+
758
+ str_dtype = str(self.dtype).replace("paddle.", "")
759
+ from ..models.attention_processor import (
760
+ AttnProcessor,
761
+ AttnProcessor2_5,
762
+ IPAdapterAttnProcessor,
763
+ IPAdapterAttnProcessor2_5,
764
+ )
765
+
766
+ # set ip-adapter cross-attention processors & load state_dict
767
+ attn_procs = {}
768
+ key_id = 1
769
+ for name in self.attn_processors.keys():
770
+ cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim
771
+ if name.startswith("mid_block"):
772
+ hidden_size = self.config.block_out_channels[-1]
773
+ elif name.startswith("up_blocks"):
774
+ block_id = int(name[len("up_blocks.")])
775
+ hidden_size = list(reversed(self.config.block_out_channels))[block_id]
776
+ elif name.startswith("down_blocks"):
777
+ block_id = int(name[len("down_blocks.")])
778
+ hidden_size = self.config.block_out_channels[block_id]
779
+ if cross_attention_dim is None or "motion_modules" in name:
780
+ attn_processor_class = AttnProcessor2_5 if is_ppxformers_available() else AttnProcessor
781
+ attn_procs[name] = attn_processor_class()
782
+ else:
783
+ attn_processor_class = (
784
+ IPAdapterAttnProcessor2_5 if is_ppxformers_available() else IPAdapterAttnProcessor
785
+ )
786
+ attn_procs[name] = attn_processor_class(
787
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0
788
+ ).to(dtype=self.dtype)
789
+
790
+ value_dict = {}
791
+ for k, w in attn_procs[name].state_dict().items():
792
+ value_dict.update({f"{k}": state_dict["ip_adapter"][f"{key_id}.{k}"].astype(str_dtype)})
793
+
794
+ if from_diffusers:
795
+ convert_pytorch_state_dict_to_paddle(attn_procs[name], value_dict)
796
+ attn_procs[name].load_dict(value_dict)
797
+ key_id += 2
798
+
799
+ self.set_attn_processor(attn_procs)
800
+
801
+ # create image projection layers.
802
+ if from_diffusers:
803
+ clip_embeddings_dim = state_dict["image_proj"]["proj.weight"].shape[-1]
804
+ cross_attention_dim = state_dict["image_proj"]["proj.weight"].shape[0] // 4
805
+ else:
806
+ clip_embeddings_dim = state_dict["image_proj"]["proj.weight"].shape[0]
807
+ cross_attention_dim = state_dict["image_proj"]["proj.weight"].shape[-1] // 4
808
+ image_projection = ImageProjection(
809
+ cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim, num_image_text_embeds=4
810
+ )
811
+ image_projection.to(dtype=self.dtype)
812
+
813
+ # load image projection layer weights
814
+ image_proj_state_dict = {}
815
+ image_proj_state_dict.update(
816
+ {
817
+ "image_embeds.weight": state_dict["image_proj"]["proj.weight"].astype(str_dtype),
818
+ "image_embeds.bias": state_dict["image_proj"]["proj.bias"].astype(str_dtype),
819
+ "norm.weight": state_dict["image_proj"]["norm.weight"].astype(str_dtype),
820
+ "norm.bias": state_dict["image_proj"]["norm.bias"].astype(str_dtype),
821
+ }
822
+ )
823
+ if from_diffusers:
824
+ convert_pytorch_state_dict_to_paddle(image_projection, image_proj_state_dict)
825
+ image_projection.load_dict(image_proj_state_dict)
826
+
827
+ self.encoder_hid_proj = image_projection.to(dtype=self.dtype)
828
+ self.config.encoder_hid_dim_type = "ip_image_proj"
829
+
830
+ delete_adapter_layers
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/loaders/utils.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import weakref
17
+ from collections import OrderedDict
18
+
19
+ import paddle
20
+ import paddle.nn as nn
21
+
22
+ try:
23
+ from paddle.nn.layer.layers import HookRemoveHelper
24
+ except ImportError:
25
+ from paddle.fluid.dygraph.layers import HookRemoveHelper
26
+
27
+ from typing import Any, Callable, Dict, Optional
28
+
29
+
30
+ class _WrappedHook:
31
+ def __init__(self, hook: Callable, module: Optional["nn.Layer"] = None):
32
+ self.hook: Callable = hook
33
+ functools.update_wrapper(self, hook)
34
+
35
+ self.with_module: bool = False
36
+
37
+ if module is not None:
38
+ self.module: weakref.ReferenceType["nn.Layer"] = weakref.ref(module)
39
+ self.with_module = True
40
+
41
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
42
+ if self.with_module:
43
+ module = self.module()
44
+ if module is None:
45
+ raise RuntimeError("You are trying to call the hook of a dead Module!")
46
+ return self.hook(module, *args, **kwargs)
47
+ return self.hook(*args, **kwargs)
48
+
49
+ def __getstate__(self) -> Dict:
50
+ result = {"hook": self.hook, "with_module": self.with_module}
51
+ if self.with_module:
52
+ result["module"] = self.module()
53
+
54
+ return result
55
+
56
+ def __setstate__(self, state: Dict):
57
+ self.hook = state["hook"]
58
+ self.with_module = state["with_module"]
59
+
60
+ if self.with_module:
61
+ if state["module"] is None:
62
+ raise RuntimeError("You are trying to revive the hook of a dead Module!")
63
+ self.module = weakref.ref(state["module"])
64
+
65
+
66
+ class AttnProcsLayers(nn.Layer):
67
+ def __init__(self, state_dict: Dict[str, paddle.Tensor]):
68
+ super().__init__()
69
+ self.layers = nn.LayerList(state_dict.values())
70
+ self.mapping = dict(enumerate(state_dict.keys()))
71
+ self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
72
+
73
+ # .processor for unet, .self_attn for text encoder
74
+ self.split_keys = [".processor", ".self_attn"]
75
+
76
+ # we add a hook to state_dict() and load_state_dict() so that the
77
+ # naming fits with `unet.attn_processors`
78
+ def map_to(state_dict, *args, **kwargs):
79
+ new_state_dict = {}
80
+ for key, value in state_dict.items():
81
+ num = int(key.split(".")[1]) # 0 is always "layers"
82
+ new_key = key.replace(f"layers.{num}", self.mapping[num])
83
+ new_state_dict[new_key] = value
84
+
85
+ return new_state_dict
86
+
87
+ def remap_key(key, state_dict):
88
+ for k in self.split_keys:
89
+ if k in key:
90
+ return key.split(k)[0] + k
91
+
92
+ raise ValueError(
93
+ f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
94
+ )
95
+
96
+ def map_from(module, state_dict, *args, **kwargs):
97
+ all_keys = list(state_dict.keys())
98
+ for key in all_keys:
99
+ replace_key = remap_key(key, state_dict)
100
+ new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
101
+ state_dict[new_key] = state_dict[key]
102
+ del state_dict[key]
103
+
104
+ self.register_state_dict_hook(map_to)
105
+ self.register_load_state_dict_pre_hook(map_from, with_module=True)
106
+
107
+ def register_load_state_dict_pre_hook(self, hook, with_module=False):
108
+ if getattr(self, "load_state_dict_pre_hooks", None) is None:
109
+ self.load_state_dict_pre_hooks = OrderedDict()
110
+ handle = HookRemoveHelper(self.load_state_dict_pre_hooks)
111
+ self.load_state_dict_pre_hooks[handle._hook_id] = _WrappedHook(hook, self if with_module else None)
112
+ return handle
113
+
114
+ def set_state_dict(self, state_dict, use_structured_name: bool = True):
115
+ if getattr(self, "load_state_dict_pre_hooks", None) is not None:
116
+ for hook in self.load_state_dict_pre_hooks.values():
117
+ hook(state_dict)
118
+ return super().set_state_dict(self, state_dict, use_structured_name=use_structured_name)
119
+
120
+ load_dict = set_state_dict
121
+ set_dict = set_state_dict
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/__init__.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import PPDIFFUSERS_SLOW_IMPORT, _LazyModule, is_paddle_available
18
+
19
+ _import_structure = {}
20
+
21
+ if is_paddle_available():
22
+ _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"]
23
+ _import_structure["autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"]
24
+ _import_structure["autoencoder_kl"] = ["AutoencoderKL"]
25
+ _import_structure["autoencoder_kl_cogvideox"] = ["AutoencoderKLCogVideoX"]
26
+ _import_structure["autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"]
27
+ _import_structure["autoencoder_tiny"] = ["AutoencoderTiny"]
28
+ _import_structure["consistency_decoder_vae"] = ["ConsistencyDecoderVAE"]
29
+ _import_structure["controlnet"] = ["ControlNetModel"]
30
+ _import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"]
31
+ _import_structure["modeling_utils"] = ["ModelMixin"]
32
+ _import_structure["prior_transformer"] = ["PriorTransformer"]
33
+ _import_structure["t5_film_transformer"] = ["T5FilmDecoder"]
34
+ _import_structure["transformer_2d"] = ["Transformer2DModel"]
35
+ _import_structure["transformer_sd3"] = ["SD3Transformer2DModel"]
36
+ _import_structure["cogvideox_transformer_3d"] = ["CogVideoXTransformer3DModel"]
37
+ _import_structure["transformer_temporal"] = ["TransformerTemporalModel"]
38
+ _import_structure["unet_1d"] = ["UNet1DModel"]
39
+ _import_structure["unet_2d"] = ["UNet2DModel"]
40
+ _import_structure["unet_2d_condition"] = ["UNet2DConditionModel"]
41
+ _import_structure["unet_3d_condition"] = ["UNet3DConditionModel"]
42
+ _import_structure["unet_kandi3"] = ["Kandinsky3UNet"]
43
+ _import_structure["unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"]
44
+ _import_structure["unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"]
45
+ _import_structure["vq_model"] = ["VQModel"]
46
+ _import_structure["uvit_t2i"] = ["UViTT2IModel"]
47
+ _import_structure["dit_llama"] = ["DiTLLaMA2DModel"]
48
+ _import_structure["dit_llama_t2i"] = ["DiTLLaMAT2IModel"]
49
+ # NOTE, new add
50
+ _import_structure["lvdm_vae"] = ["LVDMAutoencoderKL"]
51
+ _import_structure["lvdm_unet_3d"] = ["LVDMUNet3DModel"]
52
+ _import_structure["ema"] = ["LitEma"]
53
+ _import_structure["paddleinfer_runtime"] = ["PaddleInferRuntimeModel"]
54
+ # NOTE, new add
55
+ _import_structure["modelscope_autoencoder_img2vid"] = ["AutoencoderKL_imgtovideo"]
56
+ _import_structure["modelscope_gaussian_diffusion"] = ["GaussianDiffusion"]
57
+ _import_structure["modelscope_gaussion_sdedit"] = ["GaussianDiffusion_SDEdit"]
58
+ _import_structure["modelscope_st_unet"] = ["STUNetModel"]
59
+ _import_structure["modelscope_st_unet_video2video"] = ["Vid2VidSTUNet"]
60
+ # NOTE, new add
61
+ _import_structure["controlnet_sd3"] = ["SD3ControlNetModel", "SD3MultiControlNetModel"]
62
+ # NOTE, new add
63
+ _import_structure["vctrl"] = ["VCtrlModel"]
64
+ _import_structure["cogvideox_transformer_3d_vctrl"] = ["CogVideoXTransformer3DVCtrlModel"]
65
+
66
+
67
+ if TYPE_CHECKING or PPDIFFUSERS_SLOW_IMPORT:
68
+ if is_paddle_available():
69
+ from .adapter import MultiAdapter, T2IAdapter
70
+ from .autoencoder_asym_kl import AsymmetricAutoencoderKL
71
+ from .autoencoder_kl import AutoencoderKL
72
+ from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
73
+ from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
74
+ from .autoencoder_tiny import AutoencoderTiny
75
+ from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
76
+ from .cogvideox_transformer_3d_vctrl import CogVideoXTransformer3DVCtrlModel
77
+ from .consistency_decoder_vae import ConsistencyDecoderVAE
78
+ from .controlnet import ControlNetModel
79
+ from .controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel
80
+ from .dit_llama import DiTLLaMA2DModel
81
+ from .dit_llama_t2i import DiTLLaMAT2IModel
82
+ from .dual_transformer_2d import DualTransformer2DModel
83
+
84
+ # NOTE, new add
85
+ from .ema import LitEma
86
+ from .lvdm_unet_3d import LVDMUNet3DModel
87
+ from .lvdm_vae import LVDMAutoencoderKL
88
+ from .modeling_utils import ModelMixin
89
+ from .modelscope_autoencoder_img2vid import AutoencoderKL_imgtovideo
90
+ from .modelscope_gaussian_diffusion import GaussianDiffusion
91
+ from .modelscope_gaussion_sdedit import GaussianDiffusion_SDEdit
92
+ from .modelscope_st_unet import STUNetModel
93
+ from .modelscope_st_unet_video2video import Vid2VidSTUNet
94
+ from .paddleinfer_runtime import PaddleInferRuntimeModel
95
+ from .prior_transformer import PriorTransformer
96
+ from .t5_film_transformer import T5FilmDecoder
97
+ from .transformer_2d import Transformer2DModel
98
+ from .transformer_sd3 import SD3Transformer2DModel
99
+ from .transformer_temporal import TransformerTemporalModel
100
+ from .unet_1d import UNet1DModel
101
+ from .unet_2d import UNet2DModel
102
+ from .unet_2d_condition import UNet2DConditionModel
103
+ from .unet_3d_condition import UNet3DConditionModel
104
+ from .unet_kandi3 import Kandinsky3UNet
105
+ from .unet_motion_model import MotionAdapter, UNetMotionModel
106
+ from .unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel
107
+ from .uvit_t2i import UViTT2IModel
108
+ from .vctrl import VCtrlModel
109
+ from .vq_model import VQModel
110
+ else:
111
+ import sys
112
+
113
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/activations.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import paddle
17
+ import paddle.nn.functional as F
18
+ from paddle import nn
19
+
20
+ from ..utils import USE_PEFT_BACKEND
21
+ from .lora import LoRACompatibleLinear
22
+
23
+ ACTIVATION_FUNCTIONS = {
24
+ "swish": nn.Silu(),
25
+ "silu": nn.Silu(),
26
+ "mish": nn.Mish(),
27
+ "gelu": nn.GELU(),
28
+ "relu": nn.ReLU(),
29
+ }
30
+
31
+
32
+ def get_activation(act_fn: str) -> nn.Layer:
33
+ """Helper function to get activation function from string.
34
+
35
+ Args:
36
+ act_fn (str): Name of activation function.
37
+
38
+ Returns:
39
+ nn.Layer: Activation function.
40
+ """
41
+
42
+ act_fn = act_fn.lower()
43
+ if act_fn in ACTIVATION_FUNCTIONS:
44
+ return ACTIVATION_FUNCTIONS[act_fn]
45
+ else:
46
+ raise ValueError(f"Unsupported activation function: {act_fn}")
47
+
48
+ class FP32SiLU(nn.Layer):
49
+ r"""
50
+ SiLU activation function with input upcasted to paddle.float32.
51
+ """
52
+
53
+ def __init__(self):
54
+ super().__init__()
55
+
56
+ def forward(self, inputs: paddle.Tensor) -> paddle.Tensor:
57
+ return F.silu(inputs.astype(paddle.float32)).astype(inputs.dtype)
58
+
59
+ class GELU(nn.Layer):
60
+ r"""
61
+ GELU activation function with tanh approximation support with `approximate="tanh"`.
62
+
63
+ Parameters:
64
+ dim_in (`int`): The number of channels in the input.
65
+ dim_out (`int`): The number of channels in the output.
66
+ approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
67
+ """
68
+
69
+ def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool=True):
70
+ super().__init__()
71
+ self.proj = nn.Linear(dim_in, dim_out, bias_attr=bias)
72
+ self.approximate = approximate
73
+
74
+ def gelu(self, gate: paddle.Tensor) -> paddle.Tensor:
75
+ return F.gelu(gate, approximate=self.approximate != "none")
76
+
77
+ def forward(self, hidden_states):
78
+ hidden_states = self.proj(hidden_states)
79
+ hidden_states = self.gelu(hidden_states)
80
+ return hidden_states
81
+
82
+
83
+ class GEGLU(nn.Layer):
84
+ r"""
85
+ A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.
86
+
87
+ Parameters:
88
+ dim_in (`int`): The number of channels in the input.
89
+ dim_out (`int`): The number of channels in the output.
90
+ """
91
+
92
+ def __init__(self, dim_in: int, dim_out: int, bias: bool=True):
93
+ super().__init__()
94
+ linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
95
+
96
+ self.proj = linear_cls(dim_in, dim_out * 2, bias_attr=bias)
97
+
98
+ def gelu(self, gate: paddle.Tensor) -> paddle.Tensor:
99
+ return F.gelu(gate)
100
+
101
+ def forward(self, hidden_states, scale: float = 1.0):
102
+ args = () if USE_PEFT_BACKEND else (scale,)
103
+ hidden_states, gate = self.proj(hidden_states, *args).chunk(2, axis=-1)
104
+ return hidden_states * self.gelu(gate)
105
+
106
+
107
+ class ApproximateGELU(nn.Layer):
108
+ r"""
109
+ The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this
110
+ [paper](https://arxiv.org/abs/1606.08415).
111
+
112
+ Parameters:
113
+ dim_in (`int`): The number of channels in the input.
114
+ dim_out (`int`): The number of channels in the output.
115
+ """
116
+
117
+ def __init__(self, dim_in: int, dim_out: int, bias: bool=True):
118
+ super().__init__()
119
+ self.proj = nn.Linear(dim_in, dim_out, bias_attr=bias)
120
+
121
+ def forward(self, x: paddle.Tensor) -> paddle.Tensor:
122
+ x = self.proj(x)
123
+ return x * F.sigmoid(1.702 * x)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/adapter.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from typing import Callable, List, Optional, Union
16
+
17
+ import paddle
18
+ import paddle.nn as nn
19
+
20
+ from ..configuration_utils import ConfigMixin, register_to_config
21
+ from ..utils import logging
22
+ from .modeling_utils import ModelMixin
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class MultiAdapter(ModelMixin):
28
+ r"""
29
+ MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to
30
+ user-assigned weighting.
31
+
32
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
33
+ implements for all the model (such as downloading or saving, etc.)
34
+
35
+ Parameters:
36
+ adapters (`List[T2IAdapter]`, *optional*, defaults to None):
37
+ A list of `T2IAdapter` model instances.
38
+ """
39
+
40
+ def __init__(self, adapters: List["T2IAdapter"]):
41
+ super(MultiAdapter, self).__init__()
42
+
43
+ self.num_adapter = len(adapters)
44
+ self.adapters = nn.LayerList(adapters)
45
+
46
+ if len(adapters) == 0:
47
+ raise ValueError("Expecting at least one adapter")
48
+
49
+ if len(adapters) == 1:
50
+ raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`")
51
+
52
+ # The outputs from each adapter are added together with a weight.
53
+ # This means that the change in dimensions from downsampling must
54
+ # be the same for all adapters. Inductively, it also means the
55
+ # downscale_factor and total_downscale_factor must be the same for all
56
+ # adapters.
57
+ first_adapter_total_downscale_factor = adapters[0].total_downscale_factor
58
+ first_adapter_downscale_factor = adapters[0].downscale_factor
59
+ for idx in range(1, len(adapters)):
60
+ if (
61
+ adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor
62
+ or adapters[idx].downscale_factor != first_adapter_downscale_factor
63
+ ):
64
+ raise ValueError(
65
+ f"Expecting all adapters to have the same downscaling behavior, but got:\n"
66
+ f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n"
67
+ f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n"
68
+ f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n"
69
+ f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}"
70
+ )
71
+
72
+ self.total_downscale_factor = first_adapter_total_downscale_factor
73
+ self.downscale_factor = first_adapter_downscale_factor
74
+
75
+ def forward(self, xs: paddle.Tensor, adapter_weights: Optional[List[float]] = None) -> List[paddle.Tensor]:
76
+ r"""
77
+ Args:
78
+ xs (`paddle.Tensor`):
79
+ (batch, channel, height, width) input images for multiple adapter models concated along dimension 1,
80
+ `channel` should equal to `num_adapter` * "number of channel of image".
81
+ adapter_weights (`List[float]`, *optional*, defaults to None):
82
+ List of floats representing the weight which will be multiply to each adapter's output before adding
83
+ them together.
84
+ """
85
+ if adapter_weights is None:
86
+ adapter_weights = paddle.to_tensor([1 / self.num_adapter] * self.num_adapter)
87
+ else:
88
+ adapter_weights = paddle.to_tensor(adapter_weights)
89
+
90
+ accume_state = None
91
+ for x, w, adapter in zip(xs, adapter_weights, self.adapters):
92
+ features = adapter(x)
93
+ if accume_state is None:
94
+ accume_state = features
95
+ for i in range(len(accume_state)):
96
+ accume_state[i] = w * accume_state[i]
97
+ else:
98
+ for i in range(len(features)):
99
+ accume_state[i] += w * features[i]
100
+ return accume_state
101
+
102
+ def save_pretrained(
103
+ self,
104
+ save_directory: Union[str, os.PathLike],
105
+ is_main_process: bool = True,
106
+ save_function: Callable = None,
107
+ safe_serialization: bool = True,
108
+ variant: Optional[str] = None,
109
+ ):
110
+ """
111
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
112
+ `[`~models.adapter.MultiAdapter.from_pretrained`]` class method.
113
+
114
+ Arguments:
115
+ save_directory (`str` or `os.PathLike`):
116
+ Directory to which to save. Will be created if it doesn't exist.
117
+ is_main_process (`bool`, *optional*, defaults to `True`):
118
+ Whether the process calling this is the main process or not. Useful when in distributed training like
119
+ TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
120
+ the main process to avoid race conditions.
121
+ save_function (`Callable`):
122
+ The function to use to save the state dictionary. Useful on distributed training like TPUs when one
123
+ need to replace `torch.save` by another method. Can be configured with the environment variable
124
+ `DIFFUSERS_SAVE_MODE`.
125
+ safe_serialization (`bool`, *optional*, defaults to `True`):
126
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
127
+ variant (`str`, *optional*):
128
+ If specified, weights are saved in the format pytorch_model.<variant>.bin.
129
+ """
130
+ idx = 0
131
+ model_path_to_save = save_directory
132
+ for adapter in self.adapters:
133
+ adapter.save_pretrained(
134
+ model_path_to_save,
135
+ is_main_process=is_main_process,
136
+ save_function=save_function,
137
+ safe_serialization=safe_serialization,
138
+ variant=variant,
139
+ )
140
+
141
+ idx += 1
142
+ model_path_to_save = model_path_to_save + f"_{idx}"
143
+
144
+ @classmethod
145
+ def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
146
+ r"""
147
+ Instantiate a pretrained MultiAdapter model from multiple pre-trained adapter models.
148
+
149
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
150
+ the model, you should first set it back in training mode with `model.train()`.
151
+
152
+ The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
153
+ pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
154
+ task.
155
+
156
+ The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
157
+ weights are discarded.
158
+
159
+ Parameters:
160
+ pretrained_model_path (`os.PathLike`):
161
+ A path to a *directory* containing model weights saved using
162
+ [`~ppdiffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`.
163
+ torch_dtype (`str` or `torch.dtype`, *optional*):
164
+ Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
165
+ will be automatically derived from the model's weights.
166
+ output_loading_info(`bool`, *optional*, defaults to `False`):
167
+ Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
168
+ device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
169
+ A map that specifies where each submodule should go. It doesn't need to be refined to each
170
+ parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
171
+ same device.
172
+
173
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
174
+ more information about each option see [designing a device
175
+ map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
176
+ max_memory (`Dict`, *optional*):
177
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
178
+ GPU and the available CPU RAM if unset.
179
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
180
+ Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
181
+ also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
182
+ model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
183
+ setting this argument to `True` will raise an error.
184
+ variant (`str`, *optional*):
185
+ If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
186
+ ignored when using `from_flax`.
187
+ use_safetensors (`bool`, *optional*, defaults to `None`):
188
+ If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
189
+ `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
190
+ `safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
191
+ """
192
+ idx = 0
193
+ adapters = []
194
+
195
+ # load adapter and append to list until no adapter directory exists anymore
196
+ # first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained`
197
+ # second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ...
198
+ model_path_to_load = pretrained_model_path
199
+ while os.path.isdir(model_path_to_load):
200
+ adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs)
201
+ adapters.append(adapter)
202
+
203
+ idx += 1
204
+ model_path_to_load = pretrained_model_path + f"_{idx}"
205
+
206
+ logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.")
207
+
208
+ if len(adapters) == 0:
209
+ raise ValueError(
210
+ f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
211
+ )
212
+
213
+ return cls(adapters)
214
+
215
+
216
+ class T2IAdapter(ModelMixin, ConfigMixin):
217
+ r"""
218
+ A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model
219
+ generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's
220
+ architecture follows the original implementation of
221
+ [Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97)
222
+ and
223
+ [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235).
224
+
225
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
226
+ implements for all the model (such as downloading or saving, etc.)
227
+
228
+ Parameters:
229
+ in_channels (`int`, *optional*, defaults to 3):
230
+ Number of channels of Adapter's input(*control image*). Set this parameter to 1 if you're using gray scale
231
+ image as *control image*.
232
+ channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
233
+ The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will
234
+ also determine the number of downsample blocks in the Adapter.
235
+ num_res_blocks (`int`, *optional*, defaults to 2):
236
+ Number of ResNet blocks in each downsample block.
237
+ downscale_factor (`int`, *optional*, defaults to 8):
238
+ A factor that determines the total downscale factor of the Adapter.
239
+ adapter_type (`str`, *optional*, defaults to `full_adapter`):
240
+ The type of Adapter to use. Choose either `full_adapter` or `full_adapter_xl` or `light_adapter`.
241
+ """
242
+
243
+ @register_to_config
244
+ def __init__(
245
+ self,
246
+ in_channels: int = 3,
247
+ channels: List[int] = [320, 640, 1280, 1280],
248
+ num_res_blocks: int = 2,
249
+ downscale_factor: int = 8,
250
+ adapter_type: str = "full_adapter",
251
+ ):
252
+ super().__init__()
253
+
254
+ if adapter_type == "full_adapter":
255
+ self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor)
256
+ elif adapter_type == "full_adapter_xl":
257
+ self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor)
258
+ elif adapter_type == "light_adapter":
259
+ self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor)
260
+ else:
261
+ raise ValueError(
262
+ f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or "
263
+ "'full_adapter_xl' or 'light_adapter'."
264
+ )
265
+
266
+ def forward(self, x: paddle.Tensor) -> List[paddle.Tensor]:
267
+ r"""
268
+ This function processes the input tensor `x` through the adapter model and returns a list of feature tensors,
269
+ each representing information extracted at a different scale from the input. The length of the list is
270
+ determined by the number of downsample blocks in the Adapter, as specified by the `channels` and
271
+ `num_res_blocks` parameters during initialization.
272
+ """
273
+ return self.adapter(x)
274
+
275
+ @property
276
+ def total_downscale_factor(self):
277
+ return self.adapter.total_downscale_factor
278
+
279
+ @property
280
+ def downscale_factor(self):
281
+ """The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are
282
+ not evenly divisible by the downscale_factor then an exception will be raised.
283
+ """
284
+ return self.adapter.unshuffle.downscale_factor
285
+
286
+
287
+ # full adapter
288
+
289
+
290
+ class FullAdapter(nn.Layer):
291
+ r"""
292
+ See [`T2IAdapter`] for more information.
293
+ """
294
+
295
+ def __init__(
296
+ self,
297
+ in_channels: int = 3,
298
+ channels: List[int] = [320, 640, 1280, 1280],
299
+ num_res_blocks: int = 2,
300
+ downscale_factor: int = 8,
301
+ ):
302
+ super().__init__()
303
+
304
+ in_channels = in_channels * downscale_factor**2
305
+
306
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
307
+ self.conv_in = nn.Conv2D(in_channels, channels[0], kernel_size=3, padding=1)
308
+
309
+ self.body = nn.LayerList(
310
+ [
311
+ AdapterBlock(channels[0], channels[0], num_res_blocks),
312
+ *[
313
+ AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)
314
+ for i in range(1, len(channels))
315
+ ],
316
+ ]
317
+ )
318
+
319
+ self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1)
320
+
321
+ def forward(self, x: paddle.Tensor) -> List[paddle.Tensor]:
322
+ r"""
323
+ This method processes the input tensor `x` through the FullAdapter model and performs operations including
324
+ pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each
325
+ capturing information at a different stage of processing within the FullAdapter model. The number of feature
326
+ tensors in the list is determined by the number of downsample blocks specified during initialization.
327
+ """
328
+ x = self.unshuffle(x)
329
+ x = self.conv_in(x)
330
+
331
+ features = []
332
+
333
+ for block in self.body:
334
+ x = block(x)
335
+ features.append(x)
336
+
337
+ return features
338
+
339
+
340
+ class FullAdapterXL(nn.Layer):
341
+ r"""
342
+ See [`T2IAdapter`] for more information.
343
+ """
344
+
345
+ def __init__(
346
+ self,
347
+ in_channels: int = 3,
348
+ channels: List[int] = [320, 640, 1280, 1280],
349
+ num_res_blocks: int = 2,
350
+ downscale_factor: int = 16,
351
+ ):
352
+ super().__init__()
353
+
354
+ in_channels = in_channels * downscale_factor**2
355
+
356
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
357
+ self.conv_in = nn.Conv2D(in_channels, channels[0], kernel_size=3, padding=1)
358
+
359
+ self.body = []
360
+ # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32]
361
+ for i in range(len(channels)):
362
+ if i == 1:
363
+ self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks))
364
+ elif i == 2:
365
+ self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True))
366
+ else:
367
+ self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks))
368
+
369
+ self.body = nn.LayerList(self.body)
370
+ # XL has only one downsampling AdapterBlock.
371
+ self.total_downscale_factor = downscale_factor * 2
372
+
373
+ def forward(self, x: paddle.Tensor) -> List[paddle.Tensor]:
374
+ r"""
375
+ This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations
376
+ including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors.
377
+ """
378
+ x = self.unshuffle(x)
379
+ x = self.conv_in(x)
380
+
381
+ features = []
382
+
383
+ for block in self.body:
384
+ x = block(x)
385
+ features.append(x)
386
+
387
+ return features
388
+
389
+
390
+ class AdapterBlock(nn.Layer):
391
+ r"""
392
+ An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and
393
+ `FullAdapterXL` models.
394
+
395
+ Parameters:
396
+ in_channels (`int`):
397
+ Number of channels of AdapterBlock's input.
398
+ out_channels (`int`):
399
+ Number of channels of AdapterBlock's output.
400
+ num_res_blocks (`int`):
401
+ Number of ResNet blocks in the AdapterBlock.
402
+ down (`bool`, *optional*, defaults to `False`):
403
+ Whether to perform downsampling on AdapterBlock's input.
404
+ """
405
+
406
+ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
407
+ super().__init__()
408
+
409
+ self.downsample = None
410
+ if down:
411
+ self.downsample = nn.AvgPool2D(kernel_size=2, stride=2, ceil_mode=True)
412
+
413
+ self.in_conv = None
414
+ if in_channels != out_channels:
415
+ self.in_conv = nn.Conv2D(in_channels, out_channels, kernel_size=1)
416
+
417
+ self.resnets = nn.Sequential(
418
+ *[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)],
419
+ )
420
+
421
+ def forward(self, x: paddle.Tensor) -> paddle.Tensor:
422
+ r"""
423
+ This method takes tensor x as input and performs operations downsampling and convolutional layers if the
424
+ self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of
425
+ residual blocks to the input tensor.
426
+ """
427
+ if self.downsample is not None:
428
+ x = self.downsample(x)
429
+
430
+ if self.in_conv is not None:
431
+ x = self.in_conv(x)
432
+
433
+ x = self.resnets(x)
434
+
435
+ return x
436
+
437
+
438
+ class AdapterResnetBlock(nn.Layer):
439
+ r"""
440
+ An `AdapterResnetBlock` is a helper model that implements a ResNet-like block.
441
+
442
+ Parameters:
443
+ channels (`int`):
444
+ Number of channels of AdapterResnetBlock's input and output.
445
+ """
446
+
447
+ def __init__(self, channels: int):
448
+ super().__init__()
449
+ self.block1 = nn.Conv2D(channels, channels, kernel_size=3, padding=1)
450
+ self.act = nn.ReLU()
451
+ self.block2 = nn.Conv2D(channels, channels, kernel_size=1)
452
+
453
+ def forward(self, x: paddle.Tensor) -> paddle.Tensor:
454
+ r"""
455
+ This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional
456
+ layer on the input tensor. It returns addition with the input tensor.
457
+ """
458
+
459
+ h = self.act(self.block1(x))
460
+ h = self.block2(h)
461
+
462
+ return h + x
463
+
464
+
465
+ # light adapter
466
+
467
+
468
+ class LightAdapter(nn.Layer):
469
+ r"""
470
+ See [`T2IAdapter`] for more information.
471
+ """
472
+
473
+ def __init__(
474
+ self,
475
+ in_channels: int = 3,
476
+ channels: List[int] = [320, 640, 1280],
477
+ num_res_blocks: int = 4,
478
+ downscale_factor: int = 8,
479
+ ):
480
+ super().__init__()
481
+
482
+ in_channels = in_channels * downscale_factor**2
483
+
484
+ self.unshuffle = nn.PixelUnshuffle(downscale_factor)
485
+
486
+ self.body = nn.LayerList(
487
+ [
488
+ LightAdapterBlock(in_channels, channels[0], num_res_blocks),
489
+ *[
490
+ LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True)
491
+ for i in range(len(channels) - 1)
492
+ ],
493
+ LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True),
494
+ ]
495
+ )
496
+
497
+ self.total_downscale_factor = downscale_factor * (2 ** len(channels))
498
+
499
+ def forward(self, x: paddle.Tensor) -> List[paddle.Tensor]:
500
+ r"""
501
+ This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each
502
+ feature tensor corresponds to a different level of processing within the LightAdapter.
503
+ """
504
+ x = self.unshuffle(x)
505
+
506
+ features = []
507
+
508
+ for block in self.body:
509
+ x = block(x)
510
+ features.append(x)
511
+
512
+ return features
513
+
514
+
515
+ class LightAdapterBlock(nn.Layer):
516
+ r"""
517
+ A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the
518
+ `LightAdapter` model.
519
+
520
+ Parameters:
521
+ in_channels (`int`):
522
+ Number of channels of LightAdapterBlock's input.
523
+ out_channels (`int`):
524
+ Number of channels of LightAdapterBlock's output.
525
+ num_res_blocks (`int`):
526
+ Number of LightAdapterResnetBlocks in the LightAdapterBlock.
527
+ down (`bool`, *optional*, defaults to `False`):
528
+ Whether to perform downsampling on LightAdapterBlock's input.
529
+ """
530
+
531
+ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
532
+ super().__init__()
533
+ mid_channels = out_channels // 4
534
+
535
+ self.downsample = None
536
+ if down:
537
+ self.downsample = nn.AvgPool2D(kernel_size=2, stride=2, ceil_mode=True)
538
+
539
+ self.in_conv = nn.Conv2D(in_channels, mid_channels, kernel_size=1)
540
+ self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)])
541
+ self.out_conv = nn.Conv2D(mid_channels, out_channels, kernel_size=1)
542
+
543
+ def forward(self, x: paddle.Tensor) -> paddle.Tensor:
544
+ r"""
545
+ This method takes tensor x as input and performs downsampling if required. Then it applies in convolution
546
+ layer, a sequence of residual blocks, and out convolutional layer.
547
+ """
548
+ if self.downsample is not None:
549
+ x = self.downsample(x)
550
+
551
+ x = self.in_conv(x)
552
+ x = self.resnets(x)
553
+ x = self.out_conv(x)
554
+
555
+ return x
556
+
557
+
558
+ class LightAdapterResnetBlock(nn.Layer):
559
+ """
560
+ A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different
561
+ architecture than `AdapterResnetBlock`.
562
+
563
+ Parameters:
564
+ channels (`int`):
565
+ Number of channels of LightAdapterResnetBlock's input and output.
566
+ """
567
+
568
+ def __init__(self, channels: int):
569
+ super().__init__()
570
+ self.block1 = nn.Conv2D(channels, channels, kernel_size=3, padding=1)
571
+ self.act = nn.ReLU()
572
+ self.block2 = nn.Conv2D(channels, channels, kernel_size=3, padding=1)
573
+
574
+ def forward(self, x: paddle.Tensor) -> paddle.Tensor:
575
+ r"""
576
+ This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and
577
+ another convolutional layer and adds it to input tensor.
578
+ """
579
+
580
+ h = self.act(self.block1(x))
581
+ h = self.block2(h)
582
+
583
+ return h + x
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/attention.py ADDED
@@ -0,0 +1,721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, Optional
15
+
16
+ import paddle
17
+ from paddle import nn
18
+ import paddle.nn.functional as F
19
+
20
+ from ..utils import USE_PEFT_BACKEND
21
+ from ..utils.paddle_utils import maybe_allow_in_graph
22
+ from .activations import GEGLU, GELU, ApproximateGELU
23
+ from .attention_processor import Attention, JointAttnProcessor2_5
24
+ from .embeddings import SinusoidalPositionalEmbedding
25
+ from .lora import LoRACompatibleLinear
26
+ from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, SD35AdaLayerNormZeroX
27
+
28
+
29
+ def _chunked_feed_forward(
30
+ ff: nn.Layer, hidden_states: paddle.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None
31
+ ):
32
+ # "feed_forward_chunk_size" can be used to save memory
33
+ if hidden_states.shape[chunk_dim] % chunk_size != 0:
34
+ raise ValueError(
35
+ f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
36
+ )
37
+
38
+ num_chunks = hidden_states.shape[chunk_dim] // chunk_size
39
+ if lora_scale is None:
40
+ ff_output = paddle.concat(
41
+ [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, axis=chunk_dim)],
42
+ dim=chunk_dim,
43
+ )
44
+ else:
45
+ # TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete
46
+ ff_output = paddle.concat(
47
+ [ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, axis=chunk_dim)],
48
+ axis=chunk_dim,
49
+ )
50
+
51
+ return ff_output
52
+
53
+
54
+ @maybe_allow_in_graph
55
+ class GatedSelfAttentionDense(nn.Layer):
56
+ r"""
57
+ A gated self-attention dense layer that combines visual features and object features.
58
+
59
+ Parameters:
60
+ query_dim (`int`): The number of channels in the query.
61
+ context_dim (`int`): The number of channels in the context.
62
+ n_heads (`int`): The number of heads to use for attention.
63
+ d_head (`int`): The number of channels in each head.
64
+ """
65
+
66
+ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
67
+ super().__init__()
68
+
69
+ # we need a linear projection since we need cat visual feature and obj feature
70
+ self.linear = nn.Linear(context_dim, query_dim)
71
+
72
+ self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
73
+ self.ff = FeedForward(query_dim, activation_fn="geglu")
74
+
75
+ self.norm1 = nn.LayerNorm(query_dim)
76
+ self.norm2 = nn.LayerNorm(query_dim)
77
+
78
+ self.alpha_attn = nn.Parameter(paddle.to_tensor(0.0))
79
+ self.alpha_dense = nn.Parameter(paddle.to_tensor(0.0))
80
+
81
+ self.enabled = True
82
+
83
+ def forward(self, x: paddle.Tensor, objs: paddle.Tensor) -> paddle.Tensor:
84
+ if not self.enabled:
85
+ return x
86
+
87
+ n_visual = x.shape[1]
88
+ objs = self.linear(objs)
89
+
90
+ x = x + self.alpha_attn.tanh() * self.attn(self.norm1(paddle.concat([x, objs], axis=1)))[:, :n_visual, :]
91
+ x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
92
+
93
+ return x
94
+
95
+ @maybe_allow_in_graph
96
+ class JointTransformerBlock(nn.Layer):
97
+ r"""
98
+ A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3.
99
+ Reference: https://arxiv.org/abs/2403.03206
100
+ Parameters:
101
+ dim (`int`): The number of channels in the input and output.
102
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
103
+ attention_head_dim (`int`): The number of channels in each head.
104
+ context_pre_only (`bool`): Boolean to determine if we should add some blocks associated with the
105
+ processing of `context` conditions.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ dim: int,
111
+ num_attention_heads: int,
112
+ attention_head_dim: int,
113
+ context_pre_only: bool = False,
114
+ qk_norm: Optional[str] = None,
115
+ use_dual_attention: bool = False,
116
+ ):
117
+ super().__init__()
118
+
119
+ self.use_dual_attention = use_dual_attention
120
+ self.context_pre_only = context_pre_only
121
+ context_norm_type = "ada_norm_continous" if context_pre_only else "ada_norm_zero"
122
+
123
+ if use_dual_attention:
124
+ self.norm1 = SD35AdaLayerNormZeroX(dim)
125
+ else:
126
+ self.norm1 = AdaLayerNormZero(dim)
127
+
128
+ if context_norm_type == "ada_norm_continous":
129
+ self.norm1_context = AdaLayerNormContinuous(
130
+ dim, dim, elementwise_affine=False, eps=1e-6, bias=True, norm_type="layer_norm"
131
+ )
132
+ elif context_norm_type == "ada_norm_zero":
133
+ self.norm1_context = AdaLayerNormZero(dim)
134
+ else:
135
+ raise ValueError(
136
+ f"Unknown context_norm_type: {context_norm_type}, currently only support `ada_norm_continous`, `ada_norm_zero`"
137
+ )
138
+
139
+ if hasattr(F, "scaled_dot_product_attention"):
140
+ processor = JointAttnProcessor2_5()
141
+ else:
142
+ raise ValueError(
143
+ "The current PyTorch version does not support the `scaled_dot_product_attention` function."
144
+ )
145
+
146
+ self.attn = Attention(
147
+ query_dim=dim,
148
+ cross_attention_dim=None,
149
+ added_kv_proj_dim=dim,
150
+ dim_head=attention_head_dim // num_attention_heads,
151
+ heads=num_attention_heads,
152
+ out_dim=attention_head_dim,
153
+ context_pre_only=context_pre_only,
154
+ bias=True,
155
+ processor=processor,
156
+ qk_norm=qk_norm,
157
+ eps=1e-6,
158
+ )
159
+
160
+ if use_dual_attention:
161
+ self.attn2 = Attention(
162
+ query_dim=dim,
163
+ cross_attention_dim=None,
164
+ dim_head=attention_head_dim // num_attention_heads,
165
+ heads=num_attention_heads,
166
+ out_dim=attention_head_dim,
167
+ bias=True,
168
+ processor=processor,
169
+ qk_norm=qk_norm,
170
+ eps=1e-6,
171
+ )
172
+ else:
173
+ self.attn2 = None
174
+
175
+ self.norm2 = nn.LayerNorm(dim, weight_attr=False, bias_attr=False, epsilon=1e-6)
176
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
177
+
178
+ if not context_pre_only:
179
+ self.norm2_context = nn.LayerNorm(dim, weight_attr=False, bias_attr=False, epsilon=1e-6)
180
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
181
+ else:
182
+ self.norm2_context = None
183
+ self.ff_context = None
184
+
185
+ # let chunk size default to None
186
+ self._chunk_size = None
187
+ self._chunk_dim = 0
188
+
189
+ # Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward
190
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
191
+ # Sets chunk feed-forward
192
+ self._chunk_size = chunk_size
193
+ self._chunk_dim = dim
194
+
195
+ def forward(
196
+ self, hidden_states: paddle.Tensor, encoder_hidden_states: paddle.Tensor, temb: paddle.Tensor
197
+ ):
198
+ if self.use_dual_attention:
199
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_hidden_states2, gate_msa2 = self.norm1(
200
+ hidden_states, emb=temb
201
+ )
202
+ else:
203
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
204
+
205
+ if self.context_pre_only:
206
+ norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb)
207
+ else:
208
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
209
+ encoder_hidden_states, emb=temb
210
+ )
211
+
212
+ # Attention.
213
+ attn_output, context_attn_output = self.attn(
214
+ hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states,
215
+ )
216
+
217
+ # Process attention outputs for the `hidden_states`.
218
+ attn_output = gate_msa.unsqueeze(1) * attn_output
219
+ hidden_states = hidden_states + attn_output
220
+
221
+ if self.use_dual_attention:
222
+ attn_output2 = self.attn2(hidden_states=norm_hidden_states2)
223
+ attn_output2 = gate_msa2.unsqueeze(1) * attn_output2
224
+ hidden_states = hidden_states + attn_output2
225
+
226
+ norm_hidden_states = self.norm2(hidden_states)
227
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
228
+ if self._chunk_size is not None:
229
+ # "feed_forward_chunk_size" can be used to save memory
230
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
231
+ else:
232
+ ff_output = self.ff(norm_hidden_states)
233
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
234
+
235
+ hidden_states = hidden_states + ff_output
236
+
237
+ # Process attention outputs for the `encoder_hidden_states`.
238
+ if self.context_pre_only:
239
+ encoder_hidden_states = None
240
+ else:
241
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
242
+ encoder_hidden_states = encoder_hidden_states + context_attn_output
243
+
244
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
245
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
246
+ if self._chunk_size is not None:
247
+ # "feed_forward_chunk_size" can be used to save memory
248
+ context_ff_output = _chunked_feed_forward(
249
+ self.ff_context, norm_encoder_hidden_states, self._chunk_dim, self._chunk_size
250
+ )
251
+ else:
252
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
253
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
254
+
255
+ return encoder_hidden_states, hidden_states
256
+
257
+ @maybe_allow_in_graph
258
+ class BasicTransformerBlock(nn.Layer):
259
+ r"""
260
+ A basic Transformer block.
261
+
262
+ Parameters:
263
+ dim (`int`): The number of channels in the input and output.
264
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
265
+ attention_head_dim (`int`): The number of channels in each head.
266
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
267
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
268
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
269
+ num_embeds_ada_norm (:
270
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
271
+ attention_bias (:
272
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
273
+ only_cross_attention (`bool`, *optional*):
274
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
275
+ double_self_attention (`bool`, *optional*):
276
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
277
+ upcast_attention (`bool`, *optional*):
278
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
279
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
280
+ Whether to use learnable elementwise affine parameters for normalization.
281
+ norm_type (`str`, *optional*, defaults to `"layer_norm"`):
282
+ The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
283
+ final_dropout (`bool` *optional*, defaults to False):
284
+ Whether to apply a final dropout after the last feed-forward layer.
285
+ attention_type (`str`, *optional*, defaults to `"default"`):
286
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
287
+ positional_embeddings (`str`, *optional*, defaults to `None`):
288
+ The type of positional embeddings to apply to.
289
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
290
+ The maximum number of positional embeddings to apply.
291
+ """
292
+
293
+ def __init__(
294
+ self,
295
+ dim: int,
296
+ num_attention_heads: int,
297
+ attention_head_dim: int,
298
+ dropout=0.0,
299
+ cross_attention_dim: Optional[int] = None,
300
+ activation_fn: str = "geglu",
301
+ num_embeds_ada_norm: Optional[int] = None,
302
+ attention_bias: bool = False,
303
+ only_cross_attention: bool = False,
304
+ double_self_attention: bool = False,
305
+ upcast_attention: bool = False,
306
+ norm_elementwise_affine: bool = True,
307
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single'
308
+ norm_eps: float = 1e-5,
309
+ final_dropout: bool = False,
310
+ attention_type: str = "default",
311
+ positional_embeddings: Optional[str] = None,
312
+ num_positional_embeddings: Optional[int] = None,
313
+ ):
314
+ super().__init__()
315
+ self.only_cross_attention = only_cross_attention
316
+
317
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
318
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
319
+ self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
320
+ self.use_layer_norm = norm_type == "layer_norm"
321
+
322
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
323
+ raise ValueError(
324
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
325
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
326
+ )
327
+
328
+ if positional_embeddings and (num_positional_embeddings is None):
329
+ raise ValueError(
330
+ "If `positional_embedding` type is defined, `num_positional_embeddings` must also be defined."
331
+ )
332
+
333
+ if positional_embeddings == "sinusoidal":
334
+ self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
335
+ else:
336
+ self.pos_embed = None
337
+
338
+ # Define 3 blocks. Each block has its own normalization layer.
339
+ # 1. Self-Attn
340
+ if self.use_ada_layer_norm:
341
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
342
+ elif self.use_ada_layer_norm_zero:
343
+ self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
344
+ else:
345
+ norm_elementwise_affine_kwargs = (
346
+ {} if norm_elementwise_affine else dict(weight_attr=False, bias_attr=False)
347
+ )
348
+ self.norm1 = nn.LayerNorm(dim, epsilon=norm_eps, **norm_elementwise_affine_kwargs)
349
+
350
+ self.attn1 = Attention(
351
+ query_dim=dim,
352
+ heads=num_attention_heads,
353
+ dim_head=attention_head_dim,
354
+ dropout=dropout,
355
+ bias=attention_bias,
356
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
357
+ upcast_attention=upcast_attention,
358
+ )
359
+
360
+ # 2. Cross-Attn
361
+ if cross_attention_dim is not None or double_self_attention:
362
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
363
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
364
+ # the second cross attention block.
365
+ norm_elementwise_affine_kwargs = (
366
+ {} if norm_elementwise_affine else dict(weight_attr=False, bias_attr=False)
367
+ )
368
+ self.norm2 = (
369
+ AdaLayerNorm(dim, num_embeds_ada_norm)
370
+ if self.use_ada_layer_norm
371
+ else nn.LayerNorm(dim, epsilon=norm_eps, **norm_elementwise_affine_kwargs)
372
+ )
373
+ self.attn2 = Attention(
374
+ query_dim=dim,
375
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
376
+ heads=num_attention_heads,
377
+ dim_head=attention_head_dim,
378
+ dropout=dropout,
379
+ bias=attention_bias,
380
+ upcast_attention=upcast_attention,
381
+ ) # is self-attn if encoder_hidden_states is none
382
+ else:
383
+ self.norm2 = None
384
+ self.attn2 = None
385
+
386
+ # 3. Feed-forward
387
+ if not self.use_ada_layer_norm_single:
388
+ norm_elementwise_affine_kwargs = (
389
+ {} if norm_elementwise_affine else dict(weight_attr=False, bias_attr=False)
390
+ )
391
+ self.norm3 = nn.LayerNorm(dim, epsilon=norm_eps, **norm_elementwise_affine_kwargs)
392
+
393
+ self.ff = FeedForward(
394
+ dim,
395
+ dropout=dropout,
396
+ activation_fn=activation_fn,
397
+ final_dropout=final_dropout,
398
+ )
399
+
400
+ # 4. Fuser
401
+ if attention_type == "gated" or attention_type == "gated-text-image":
402
+ self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)
403
+
404
+ # 5. Scale-shift for PixArt-Alpha.
405
+ if self.use_ada_layer_norm_single:
406
+ self.scale_shift_table = nn.Parameter(paddle.randn([6, dim]) / dim**0.5)
407
+
408
+ # let chunk size default to None
409
+ self._chunk_size = None
410
+ self._chunk_dim = 0
411
+
412
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
413
+ # Sets chunk feed-forward
414
+ self._chunk_size = chunk_size
415
+ self._chunk_dim = dim
416
+
417
+ def forward(
418
+ self,
419
+ hidden_states: paddle.Tensor,
420
+ attention_mask: Optional[paddle.Tensor] = None,
421
+ encoder_hidden_states: Optional[paddle.Tensor] = None,
422
+ encoder_attention_mask: Optional[paddle.Tensor] = None,
423
+ timestep: Optional[paddle.Tensor] = None,
424
+ cross_attention_kwargs: Dict[str, Any] = None,
425
+ class_labels: Optional[paddle.Tensor] = None,
426
+ ) -> paddle.Tensor:
427
+ # Notice that normalization is always applied before the real computation in the following blocks.
428
+ # 0. Self-Attention
429
+ batch_size = hidden_states.shape[0]
430
+
431
+ if self.use_ada_layer_norm:
432
+ norm_hidden_states = self.norm1(hidden_states, timestep)
433
+ elif self.use_ada_layer_norm_zero:
434
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
435
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
436
+ )
437
+ elif self.use_layer_norm:
438
+ norm_hidden_states = self.norm1(hidden_states)
439
+ elif self.use_ada_layer_norm_single:
440
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
441
+ self.scale_shift_table[None] + timestep.reshape([batch_size, 6, -1])
442
+ ).chunk(6, axis=1)
443
+ norm_hidden_states = self.norm1(hidden_states)
444
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
445
+ norm_hidden_states = norm_hidden_states.squeeze(1)
446
+ else:
447
+ raise ValueError("Incorrect norm used")
448
+
449
+ if self.pos_embed is not None:
450
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
451
+
452
+ # 1. Retrieve lora scale.
453
+ lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
454
+
455
+ # 2. Prepare GLIGEN inputs
456
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
457
+ gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
458
+
459
+ attn_output = self.attn1(
460
+ norm_hidden_states,
461
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
462
+ attention_mask=attention_mask,
463
+ **cross_attention_kwargs,
464
+ )
465
+ if self.use_ada_layer_norm_zero:
466
+ attn_output = gate_msa.unsqueeze(1) * attn_output
467
+ elif self.use_ada_layer_norm_single:
468
+ attn_output = gate_msa * attn_output
469
+
470
+ hidden_states = attn_output + hidden_states
471
+ if hidden_states.ndim == 4:
472
+ hidden_states = hidden_states.squeeze(1)
473
+
474
+ # 2.5 GLIGEN Control
475
+ if gligen_kwargs is not None:
476
+ hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
477
+
478
+ # 3. Cross-Attention
479
+ if self.attn2 is not None:
480
+ if self.use_ada_layer_norm:
481
+ norm_hidden_states = self.norm2(hidden_states, timestep)
482
+ elif self.use_ada_layer_norm_zero or self.use_layer_norm:
483
+ norm_hidden_states = self.norm2(hidden_states)
484
+ elif self.use_ada_layer_norm_single:
485
+ # For PixArt norm2 isn't applied here:
486
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
487
+ norm_hidden_states = hidden_states
488
+ else:
489
+ raise ValueError("Incorrect norm")
490
+
491
+ if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
492
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
493
+
494
+ attn_output = self.attn2(
495
+ norm_hidden_states,
496
+ encoder_hidden_states=encoder_hidden_states,
497
+ attention_mask=encoder_attention_mask,
498
+ **cross_attention_kwargs,
499
+ )
500
+ hidden_states = attn_output + hidden_states
501
+
502
+ # 4. Feed-forward
503
+ if not self.use_ada_layer_norm_single:
504
+ norm_hidden_states = self.norm3(hidden_states)
505
+
506
+ if self.use_ada_layer_norm_zero:
507
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
508
+
509
+ if self.use_ada_layer_norm_single:
510
+ norm_hidden_states = self.norm2(hidden_states)
511
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
512
+
513
+ if self._chunk_size is not None:
514
+ # "feed_forward_chunk_size" can be used to save memory
515
+ ff_output = _chunked_feed_forward(
516
+ self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale
517
+ )
518
+ else:
519
+ ff_output = self.ff(norm_hidden_states, scale=lora_scale)
520
+
521
+ if self.use_ada_layer_norm_zero:
522
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
523
+ elif self.use_ada_layer_norm_single:
524
+ ff_output = gate_mlp * ff_output
525
+
526
+ hidden_states = ff_output + hidden_states
527
+ if hidden_states.ndim == 4:
528
+ hidden_states = hidden_states.squeeze(1)
529
+
530
+ return hidden_states
531
+
532
+
533
+ @maybe_allow_in_graph
534
+ class TemporalBasicTransformerBlock(nn.Layer):
535
+ r"""
536
+ A basic Transformer block for video like data.
537
+
538
+ Parameters:
539
+ dim (`int`): The number of channels in the input and output.
540
+ time_mix_inner_dim (`int`): The number of channels for temporal attention.
541
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
542
+ attention_head_dim (`int`): The number of channels in each head.
543
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
544
+ """
545
+
546
+ def __init__(
547
+ self,
548
+ dim: int,
549
+ time_mix_inner_dim: int,
550
+ num_attention_heads: int,
551
+ attention_head_dim: int,
552
+ cross_attention_dim: Optional[int] = None,
553
+ ):
554
+ super().__init__()
555
+ self.is_res = dim == time_mix_inner_dim
556
+
557
+ self.norm_in = nn.LayerNorm(dim)
558
+
559
+ # Define 3 blocks. Each block has its own normalization layer.
560
+ # 1. Self-Attn
561
+ self.norm_in = nn.LayerNorm(dim)
562
+ self.ff_in = FeedForward(
563
+ dim,
564
+ dim_out=time_mix_inner_dim,
565
+ activation_fn="geglu",
566
+ )
567
+
568
+ self.norm1 = nn.LayerNorm(time_mix_inner_dim)
569
+ self.attn1 = Attention(
570
+ query_dim=time_mix_inner_dim,
571
+ heads=num_attention_heads,
572
+ dim_head=attention_head_dim,
573
+ cross_attention_dim=None,
574
+ )
575
+
576
+ # 2. Cross-Attn
577
+ if cross_attention_dim is not None:
578
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
579
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
580
+ # the second cross attention block.
581
+ self.norm2 = nn.LayerNorm(time_mix_inner_dim)
582
+ self.attn2 = Attention(
583
+ query_dim=time_mix_inner_dim,
584
+ cross_attention_dim=cross_attention_dim,
585
+ heads=num_attention_heads,
586
+ dim_head=attention_head_dim,
587
+ ) # is self-attn if encoder_hidden_states is none
588
+ else:
589
+ self.norm2 = None
590
+ self.attn2 = None
591
+
592
+ # 3. Feed-forward
593
+ self.norm3 = nn.LayerNorm(time_mix_inner_dim)
594
+ self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")
595
+
596
+ # let chunk size default to None
597
+ self._chunk_size = None
598
+ self._chunk_dim = None
599
+
600
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
601
+ # Sets chunk feed-forward
602
+ self._chunk_size = chunk_size
603
+ # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
604
+ self._chunk_dim = 1
605
+
606
+ def forward(
607
+ self,
608
+ hidden_states: paddle.Tensor,
609
+ num_frames: int,
610
+ encoder_hidden_states: Optional[paddle.Tensor] = None,
611
+ ) -> paddle.Tensor:
612
+ # Notice that normalization is always applied before the real computation in the following blocks.
613
+ # 0. Self-Attention
614
+ batch_size = hidden_states.shape[0]
615
+
616
+ batch_frames, seq_length, channels = hidden_states.shape
617
+ batch_size = batch_frames // num_frames
618
+
619
+ hidden_states = hidden_states[None, :].reshape([batch_size, num_frames, seq_length, channels])
620
+ hidden_states = hidden_states.transpose([0, 2, 1, 3])
621
+ hidden_states = hidden_states.reshape([batch_size * seq_length, num_frames, channels])
622
+
623
+ residual = hidden_states
624
+ hidden_states = self.norm_in(hidden_states)
625
+
626
+ if self._chunk_size is not None:
627
+ hidden_states = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size)
628
+ else:
629
+ hidden_states = self.ff_in(hidden_states)
630
+
631
+ if self.is_res:
632
+ hidden_states = hidden_states + residual
633
+
634
+ norm_hidden_states = self.norm1(hidden_states)
635
+ attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
636
+ hidden_states = attn_output + hidden_states
637
+
638
+ # 3. Cross-Attention
639
+ if self.attn2 is not None:
640
+ norm_hidden_states = self.norm2(hidden_states)
641
+ attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
642
+ hidden_states = attn_output + hidden_states
643
+
644
+ # 4. Feed-forward
645
+ norm_hidden_states = self.norm3(hidden_states)
646
+
647
+ if self._chunk_size is not None:
648
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
649
+ else:
650
+ ff_output = self.ff(norm_hidden_states)
651
+
652
+ if self.is_res:
653
+ hidden_states = ff_output + hidden_states
654
+ else:
655
+ hidden_states = ff_output
656
+
657
+ hidden_states = hidden_states[None, :].reshape([batch_size, seq_length, num_frames, channels])
658
+ hidden_states = hidden_states.transpose([0, 2, 1, 3])
659
+ hidden_states = hidden_states.reshape([batch_size * num_frames, seq_length, channels])
660
+
661
+ return hidden_states
662
+
663
+
664
+ class FeedForward(nn.Layer):
665
+ r"""
666
+ A feed-forward layer.
667
+
668
+ Parameters:
669
+ dim (`int`): The number of channels in the input.
670
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
671
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
672
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
673
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
674
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
675
+ """
676
+
677
+ def __init__(
678
+ self,
679
+ dim: int,
680
+ dim_out: Optional[int] = None,
681
+ mult: int = 4,
682
+ dropout: float = 0.0,
683
+ activation_fn: str = "geglu",
684
+ final_dropout: bool = False,
685
+ inner_dim=None,
686
+ bias: bool = True,
687
+ ):
688
+ super().__init__()
689
+ if inner_dim is None:
690
+ inner_dim = int(dim * mult)
691
+ dim_out = dim_out if dim_out is not None else dim
692
+ linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
693
+
694
+ if activation_fn == "gelu":
695
+ act_fn = GELU(dim, inner_dim, bias=bias)
696
+ if activation_fn == "gelu-approximate":
697
+ act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
698
+ elif activation_fn == "geglu":
699
+ act_fn = GEGLU(dim, inner_dim, bias=bias)
700
+ elif activation_fn == "geglu-approximate":
701
+ act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
702
+
703
+ self.net = nn.LayerList([])
704
+ # project in
705
+ self.net.append(act_fn)
706
+ # project dropout
707
+ self.net.append(nn.Dropout(dropout))
708
+ # project out
709
+ self.net.append(linear_cls(inner_dim, dim_out, bias_attr=bias))
710
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
711
+ if final_dropout:
712
+ self.net.append(nn.Dropout(dropout))
713
+
714
+ def forward(self, hidden_states: paddle.Tensor, scale: float = 1.0) -> paddle.Tensor:
715
+ compatible_cls = (GEGLU,) if USE_PEFT_BACKEND else (GEGLU, LoRACompatibleLinear)
716
+ for module in self.net:
717
+ if isinstance(module, compatible_cls):
718
+ hidden_states = module(hidden_states, scale)
719
+ else:
720
+ hidden_states = module(hidden_states)
721
+ return hidden_states
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/autoencoder_tiny.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from dataclasses import dataclass
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import paddle
20
+
21
+ from ..configuration_utils import ConfigMixin, register_to_config
22
+ from ..utils import BaseOutput
23
+ from ..utils.accelerate_utils import apply_forward_hook
24
+ from .modeling_utils import ModelMixin
25
+ from .vae import DecoderOutput, DecoderTiny, EncoderTiny
26
+
27
+
28
+ @dataclass
29
+ class AutoencoderTinyOutput(BaseOutput):
30
+ """
31
+ Output of AutoencoderTiny encoding method.
32
+
33
+ Args:
34
+ latents (`paddle.Tensor`): Encoded outputs of the `Encoder`.
35
+
36
+ """
37
+
38
+ latents: paddle.Tensor
39
+
40
+
41
+ class AutoencoderTiny(ModelMixin, ConfigMixin):
42
+ r"""
43
+ A tiny distilled VAE model for encoding images into latents and decoding latent representations into images.
44
+
45
+ [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`.
46
+
47
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for
48
+ all models (such as downloading or saving).
49
+
50
+ Parameters:
51
+ in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
52
+ out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
53
+ encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
54
+ Tuple of integers representing the number of output channels for each encoder block. The length of the
55
+ tuple should be equal to the number of encoder blocks.
56
+ decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
57
+ Tuple of integers representing the number of output channels for each decoder block. The length of the
58
+ tuple should be equal to the number of decoder blocks.
59
+ act_fn (`str`, *optional*, defaults to `"relu"`):
60
+ Activation function to be used throughout the model.
61
+ latent_channels (`int`, *optional*, defaults to 4):
62
+ Number of channels in the latent representation. The latent space acts as a compressed representation of
63
+ the input image.
64
+ upsampling_scaling_factor (`int`, *optional*, defaults to 2):
65
+ Scaling factor for upsampling in the decoder. It determines the size of the output image during the
66
+ upsampling process.
67
+ num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`):
68
+ Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The
69
+ length of the tuple should be equal to the number of stages in the encoder. Each stage has a different
70
+ number of encoder blocks.
71
+ num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`):
72
+ Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The
73
+ length of the tuple should be equal to the number of stages in the decoder. Each stage has a different
74
+ number of decoder blocks.
75
+ latent_magnitude (`float`, *optional*, defaults to 3.0):
76
+ Magnitude of the latent representation. This parameter scales the latent representation values to control
77
+ the extent of information preservation.
78
+ latent_shift (float, *optional*, defaults to 0.5):
79
+ Shift applied to the latent representation. This parameter controls the center of the latent space.
80
+ scaling_factor (`float`, *optional*, defaults to 1.0):
81
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
82
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
83
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
84
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
85
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
86
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder,
87
+ however, no such scaling factor was used, hence the value of 1.0 as the default.
88
+ force_upcast (`bool`, *optional*, default to `False`):
89
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
90
+ can be fine-tuned / trained to a lower range without losing too much precision, in which case
91
+ `force_upcast` can be set to `False` (see this fp16-friendly
92
+ [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
93
+ """
94
+
95
+ _supports_gradient_checkpointing = True
96
+
97
+ @register_to_config
98
+ def __init__(
99
+ self,
100
+ in_channels: int = 3,
101
+ out_channels: int = 3,
102
+ encoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
103
+ decoder_block_out_channels: Tuple[int, ...] = (64, 64, 64, 64),
104
+ act_fn: str = "relu",
105
+ latent_channels: int = 4,
106
+ upsampling_scaling_factor: int = 2,
107
+ num_encoder_blocks: Tuple[int, ...] = (1, 3, 3, 3),
108
+ num_decoder_blocks: Tuple[int, ...] = (3, 3, 3, 1),
109
+ latent_magnitude: int = 3,
110
+ latent_shift: float = 0.5,
111
+ force_upcast: bool = False,
112
+ scaling_factor: float = 1.0,
113
+ ):
114
+ super().__init__()
115
+
116
+ if len(encoder_block_out_channels) != len(num_encoder_blocks):
117
+ raise ValueError("`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.")
118
+ if len(decoder_block_out_channels) != len(num_decoder_blocks):
119
+ raise ValueError("`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.")
120
+
121
+ self.encoder = EncoderTiny(
122
+ in_channels=in_channels,
123
+ out_channels=latent_channels,
124
+ num_blocks=num_encoder_blocks,
125
+ block_out_channels=encoder_block_out_channels,
126
+ act_fn=act_fn,
127
+ )
128
+
129
+ self.decoder = DecoderTiny(
130
+ in_channels=latent_channels,
131
+ out_channels=out_channels,
132
+ num_blocks=num_decoder_blocks,
133
+ block_out_channels=decoder_block_out_channels,
134
+ upsampling_scaling_factor=upsampling_scaling_factor,
135
+ act_fn=act_fn,
136
+ )
137
+
138
+ self.latent_magnitude = latent_magnitude
139
+ self.latent_shift = latent_shift
140
+ self.scaling_factor = scaling_factor
141
+
142
+ self.use_slicing = False
143
+ self.use_tiling = False
144
+
145
+ # only relevant if vae tiling is enabled
146
+ self.spatial_scale_factor = 2**out_channels
147
+ self.tile_overlap_factor = 0.125
148
+ self.tile_sample_min_size = 512
149
+ self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor
150
+
151
+ self.register_to_config(block_out_channels=decoder_block_out_channels)
152
+ self.register_to_config(force_upcast=False)
153
+
154
+ def _set_gradient_checkpointing(self, module, value: bool = False) -> None:
155
+ if isinstance(module, (EncoderTiny, DecoderTiny)):
156
+ module.gradient_checkpointing = value
157
+
158
+ def scale_latents(self, x: paddle.Tensor) -> paddle.Tensor:
159
+ """raw latents -> [0, 1]"""
160
+ return ((x / 2 * self.latent_magnitude) + self.latent_shift).clip(0, 1)
161
+
162
+ def unscale_latents(self, x: paddle.Tensor) -> paddle.Tensor:
163
+ """[0, 1] -> raw latents"""
164
+ return (x - self.latent_shift) * (2 * self.latent_magnitude)
165
+
166
+ def enable_slicing(self) -> None:
167
+ r"""
168
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
169
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
170
+ """
171
+ self.use_slicing = True
172
+
173
+ def disable_slicing(self) -> None:
174
+ r"""
175
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
176
+ decoding in one step.
177
+ """
178
+ self.use_slicing = False
179
+
180
+ def enable_tiling(self, use_tiling: bool = True) -> None:
181
+ r"""
182
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
183
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
184
+ processing larger images.
185
+ """
186
+ self.use_tiling = use_tiling
187
+
188
+ def disable_tiling(self) -> None:
189
+ r"""
190
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
191
+ decoding in one step.
192
+ """
193
+ self.enable_tiling(False)
194
+
195
+ def _tiled_encode(self, x: paddle.Tensor) -> paddle.Tensor:
196
+ r"""Encode a batch of images using a tiled encoder.
197
+
198
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
199
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
200
+ tiles overlap and are blended together to form a smooth output.
201
+
202
+ Args:
203
+ x (`paddle.Tensor`): Input batch of images.
204
+
205
+ Returns:
206
+ `paddle.Tensor`: Encoded batch of images.
207
+ """
208
+ # scale of encoder output relative to input
209
+ sf = self.spatial_scale_factor
210
+ tile_size = self.tile_sample_min_size
211
+
212
+ # number of pixels to blend and to traverse between tile
213
+ blend_size = int(tile_size * self.tile_overlap_factor)
214
+ traverse_size = tile_size - blend_size
215
+
216
+ # tiles index (up/left)
217
+ ti = range(0, x.shape[-2], traverse_size)
218
+ tj = range(0, x.shape[-1], traverse_size)
219
+
220
+ # mask for blending
221
+ blend_masks = paddle.stack(
222
+ paddle.meshgrid([paddle.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing="ij")
223
+ )
224
+ blend_masks = blend_masks.clip(0, 1)
225
+
226
+ # output array
227
+ out = paddle.zeros([x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf])
228
+ for i in ti:
229
+ for j in tj:
230
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
231
+ # tile result
232
+ tile_out = out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf]
233
+ tile = self.encoder(tile_in)
234
+ h, w = tile.shape[-2], tile.shape[-1]
235
+ # blend tile result into output
236
+ blend_mask_i = paddle.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
237
+ blend_mask_j = paddle.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
238
+ blend_mask = blend_mask_i * blend_mask_j
239
+ tile, blend_mask = tile[..., :h, :w], blend_mask[..., :h, :w]
240
+
241
+ # NOTE this copy_ method is not work in paddlepaddle
242
+ # tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
243
+ out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf] = (
244
+ blend_mask * tile + (1 - blend_mask) * tile_out
245
+ )
246
+
247
+ return out
248
+
249
+ def _tiled_decode(self, x: paddle.Tensor) -> paddle.Tensor:
250
+ r"""Encode a batch of images using a tiled encoder.
251
+
252
+ When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
253
+ steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
254
+ tiles overlap and are blended together to form a smooth output.
255
+
256
+ Args:
257
+ x (`paddle.Tensor`): Input batch of images.
258
+
259
+ Returns:
260
+ `paddle.Tensor`: Encoded batch of images.
261
+ """
262
+ # scale of decoder output relative to input
263
+ sf = self.spatial_scale_factor
264
+ tile_size = self.tile_latent_min_size
265
+
266
+ # number of pixels to blend and to traverse between tiles
267
+ blend_size = int(tile_size * self.tile_overlap_factor)
268
+ traverse_size = tile_size - blend_size
269
+
270
+ # tiles index (up/left)
271
+ ti = range(0, x.shape[-2], traverse_size)
272
+ tj = range(0, x.shape[-1], traverse_size)
273
+
274
+ # mask for blending
275
+ blend_masks = paddle.stack(
276
+ paddle.meshgrid([paddle.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing="ij")
277
+ )
278
+ blend_masks = blend_masks.clip(0, 1)
279
+
280
+ # output array
281
+ out = paddle.zeros([x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf])
282
+ for i in ti:
283
+ for j in tj:
284
+ tile_in = x[..., i : i + tile_size, j : j + tile_size]
285
+ # tile result
286
+ tile_out = out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf]
287
+ tile = self.decoder(tile_in)
288
+ h, w = tile.shape[-2], tile.shape[-1]
289
+ # blend tile result into output
290
+ blend_mask_i = paddle.ones_like(blend_masks[0]) if i == 0 else blend_masks[0]
291
+ blend_mask_j = paddle.ones_like(blend_masks[1]) if j == 0 else blend_masks[1]
292
+ blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w]
293
+
294
+ # NOTE this copy_ method is not work in paddlepaddle
295
+ # tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out)
296
+ out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf] = (
297
+ blend_mask * tile + (1 - blend_mask) * tile_out
298
+ )
299
+ return out
300
+
301
+ @apply_forward_hook
302
+ def encode(self, x: paddle.Tensor, return_dict: bool = True) -> Union[AutoencoderTinyOutput, Tuple[paddle.Tensor]]:
303
+ # TODO junnyu, support float16
304
+ x = x.cast(self.encoder.layers[0].weight.dtype)
305
+ if self.use_slicing and x.shape[0] > 1:
306
+ output = [
307
+ self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x) for x_slice in x.chunk(x.shape[0])
308
+ ]
309
+ output = paddle.concat(output)
310
+ else:
311
+ output = self._tiled_encode(x) if self.use_tiling else self.encoder(x)
312
+
313
+ if not return_dict:
314
+ return (output,)
315
+
316
+ return AutoencoderTinyOutput(latents=output)
317
+
318
+ @apply_forward_hook
319
+ def decode(
320
+ self, x: paddle.Tensor, generator: Optional[paddle.Generator] = None, return_dict: bool = True
321
+ ) -> Union[DecoderOutput, Tuple[paddle.Tensor]]:
322
+ # TODO junnyu, add this to support pure fp16
323
+ x = x.cast(self.decoder.layers[0].weight.dtype)
324
+
325
+ if self.use_slicing and x.shape[0] > 1:
326
+ output = [
327
+ self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x) for x_slice in x.chunk(x.shape[0])
328
+ ]
329
+ output = paddle.concat(output)
330
+ else:
331
+ output = self._tiled_decode(x) if self.use_tiling else self.decoder(x)
332
+
333
+ if not return_dict:
334
+ return (output,)
335
+
336
+ return DecoderOutput(sample=output)
337
+
338
+ def forward(
339
+ self,
340
+ sample: paddle.Tensor,
341
+ return_dict: bool = True,
342
+ ) -> Union[DecoderOutput, Tuple[paddle.Tensor]]:
343
+ r"""
344
+ Args:
345
+ sample (`paddle.Tensor`): Input sample.
346
+ return_dict (`bool`, *optional*, defaults to `True`):
347
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
348
+ """
349
+ enc = self.encode(sample).latents
350
+
351
+ # scale latents to be in [0, 1], then quantize latents to a byte tensor,
352
+ # as if we were storing the latents in an RGBA uint8 image.
353
+ scaled_enc = (self.scale_latents(enc) * 255).round().cast("byte")
354
+
355
+ # unquantize latents back into [0, 1], then unscale latents back to their original range,
356
+ # as if we were loading the latents from an RGBA uint8 image.
357
+ unscaled_enc = self.unscale_latents(scaled_enc / 255.0)
358
+
359
+ dec = self.decode(unscaled_enc)
360
+
361
+ if not return_dict:
362
+ return (dec,)
363
+ return DecoderOutput(sample=dec)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/controlnet_sd3.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import paddle
18
+ import paddle.nn as nn
19
+ import paddle.nn.functional as F
20
+ from paddle.distributed.fleet.utils import recompute
21
+
22
+ from ..configuration_utils import ConfigMixin, register_to_config
23
+ from ..loaders import FromOriginalControlnetMixin
24
+ from ..models.attention import JointTransformerBlock
25
+ from ..models.attention_processor import Attention, AttentionProcessor
26
+ from ..models.modeling_outputs import Transformer2DModelOutput
27
+ from ..models.modeling_utils import ModelMixin
28
+ from ..utils import (
29
+ USE_PEFT_BACKEND,
30
+ logging,
31
+ recompute_use_reentrant,
32
+ scale_lora_layers,
33
+ unscale_lora_layers,
34
+ use_old_recompute,
35
+ )
36
+ from .controlnet import BaseOutput, zero_module
37
+ from .embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
38
+
39
+
40
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
+
42
+
43
+ @dataclass
44
+ class SD3ControlNetOutput(BaseOutput):
45
+ controlnet_block_samples: Tuple[paddle.Tensor]
46
+
47
+
48
+ class SD3ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
49
+ _supports_gradient_checkpointing = True
50
+
51
+ @register_to_config
52
+ def __init__(
53
+ self,
54
+ sample_size: int = 128,
55
+ patch_size: int = 2,
56
+ in_channels: int = 16,
57
+ num_layers: int = 18,
58
+ attention_head_dim: int = 64,
59
+ num_attention_heads: int = 18,
60
+ joint_attention_dim: int = 4096,
61
+ caption_projection_dim: int = 1152,
62
+ pooled_projection_dim: int = 2048,
63
+ out_channels: int = 16,
64
+ pos_embed_max_size: int = 96,
65
+ extra_conditioning_channels: int = 0,
66
+ ):
67
+ super().__init__()
68
+ default_out_channels = in_channels
69
+ self.out_channels = out_channels if out_channels is not None else default_out_channels
70
+ self.inner_dim = num_attention_heads * attention_head_dim
71
+
72
+ self.pos_embed = PatchEmbed(
73
+ height=sample_size,
74
+ width=sample_size,
75
+ patch_size=patch_size,
76
+ in_channels=in_channels,
77
+ embed_dim=self.inner_dim,
78
+ pos_embed_max_size=pos_embed_max_size,
79
+ )
80
+ self.time_text_embed = CombinedTimestepTextProjEmbeddings(
81
+ embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim
82
+ )
83
+ self.context_embedder = nn.Linear(joint_attention_dim, caption_projection_dim)
84
+
85
+ # `attention_head_dim` is doubled to account for the mixing.
86
+ # It needs to crafted when we get the actual checkpoints.
87
+ self.transformer_blocks = nn.LayerList(
88
+ [
89
+ JointTransformerBlock(
90
+ dim=self.inner_dim,
91
+ num_attention_heads=num_attention_heads,
92
+ # attention_head_dim=self.config.attention_head_dim,
93
+ attention_head_dim=self.inner_dim,
94
+ context_pre_only=False,
95
+ )
96
+ for i in range(num_layers)
97
+ ]
98
+ )
99
+
100
+ # controlnet_blocks
101
+ self.controlnet_blocks = nn.LayerList([])
102
+ for _ in range(len(self.transformer_blocks)):
103
+ controlnet_block = nn.Linear(self.inner_dim, self.inner_dim)
104
+ controlnet_block = zero_module(controlnet_block)
105
+ self.controlnet_blocks.append(controlnet_block)
106
+ pos_embed_input = PatchEmbed(
107
+ height=sample_size,
108
+ width=sample_size,
109
+ patch_size=patch_size,
110
+ in_channels=in_channels + extra_conditioning_channels,
111
+ embed_dim=self.inner_dim,
112
+ add_pos_embed=False, # to verify
113
+ )
114
+ self.pos_embed_input = zero_module(pos_embed_input)
115
+
116
+ self.gradient_checkpointing = False
117
+
118
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
119
+ def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
120
+ """
121
+ Sets the attention processor to use [feed forward
122
+ chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
123
+ Parameters:
124
+ chunk_size (`int`, *optional*):
125
+ The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
126
+ over each tensor of dim=`dim`.
127
+ dim (`int`, *optional*, defaults to `0`):
128
+ The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
129
+ or dim=1 (sequence length).
130
+ """
131
+ if dim not in [0, 1]:
132
+ raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
133
+
134
+ # By default chunk size is 1
135
+ chunk_size = chunk_size or 1
136
+
137
+ def fn_recursive_feed_forward(module: paddle.nn.Layer, chunk_size: int, dim: int):
138
+ if hasattr(module, "set_chunk_feed_forward"):
139
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
140
+
141
+ for child in module.children():
142
+ fn_recursive_feed_forward(child, chunk_size, dim)
143
+
144
+ for module in self.children():
145
+ fn_recursive_feed_forward(module, chunk_size, dim)
146
+
147
+ @property
148
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
149
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
150
+ r"""
151
+ Returns:
152
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
153
+ indexed by its weight name.
154
+ """
155
+ # set recursively
156
+ processors = {}
157
+
158
+ def fn_recursive_add_processors(name: str, module: paddle.nn.Layer, processors: Dict[str, AttentionProcessor]):
159
+ if hasattr(module, "get_processor"):
160
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
161
+
162
+ for sub_name, child in module.named_children():
163
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
164
+
165
+ return processors
166
+
167
+ for name, module in self.named_children():
168
+ fn_recursive_add_processors(name, module, processors)
169
+
170
+ return processors
171
+
172
+
173
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
174
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
175
+ r"""
176
+ Sets the attention processor to use to compute attention.
177
+ Parameters:
178
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
179
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
180
+ for **all** `Attention` layers.
181
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
182
+ processor. This is strongly recommended when setting trainable attention processors.
183
+ """
184
+ count = len(self.attn_processors.keys())
185
+
186
+ if isinstance(processor, dict) and len(processor) != count:
187
+ raise ValueError(
188
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
189
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
190
+ )
191
+
192
+ def fn_recursive_attn_processor(name: str, module: paddle.nn.Layer, processor):
193
+ if hasattr(module, "set_processor"):
194
+ if not isinstance(processor, dict):
195
+ module.set_processor(processor)
196
+ else:
197
+ module.set_processor(processor.pop(f"{name}.processor"))
198
+
199
+ for sub_name, child in module.named_children():
200
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
201
+
202
+ for name, module in self.named_children():
203
+ fn_recursive_attn_processor(name, module, processor)
204
+
205
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
206
+ def fuse_qkv_projections(self):
207
+ """
208
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
209
+ are fused. For cross-attention modules, key and value projection matrices are fused.
210
+ <Tip warning={true}>
211
+ This API is 🧪 experimental.
212
+ </Tip>
213
+ """
214
+ self.original_attn_processors = None
215
+
216
+ for _, attn_processor in self.attn_processors.items():
217
+ if "Added" in str(attn_processor.__class__.__name__):
218
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
219
+
220
+ self.original_attn_processors = self.attn_processors
221
+
222
+ for module in self.modules():
223
+ if isinstance(module, Attention):
224
+ module.fuse_projections(fuse=True)
225
+
226
+ # TODO: ?
227
+ # self.set_attn_processor(FusedJointAttnProcessor2_0())
228
+
229
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
230
+ def unfuse_qkv_projections(self):
231
+ """Disables the fused QKV projection if enabled.
232
+ <Tip warning={true}>
233
+ This API is 🧪 experimental.
234
+ </Tip>
235
+ """
236
+ if self.original_attn_processors is not None:
237
+ self.set_attn_processor(self.original_attn_processors)
238
+
239
+ def _set_gradient_checkpointing(self, module, value=False):
240
+ if hasattr(module, "gradient_checkpointing"):
241
+ module.gradient_checkpointing = value
242
+
243
+ # TODO
244
+ @classmethod
245
+ def from_transformer(
246
+ cls, transformer, num_layers=12, num_extra_conditioning_channels=1, load_weights_from_transformer=True
247
+ ):
248
+ config = transformer.config
249
+ config["num_layers"] = num_layers or config.num_layers
250
+ config["extra_conditioning_channels"] = num_extra_conditioning_channels
251
+ controlnet = cls(**config)
252
+
253
+ if load_weights_from_transformer:
254
+ controlnet.pos_embed.load_dict(transformer.pos_embed.state_dict())
255
+ controlnet.time_text_embed.load_dict(transformer.time_text_embed.state_dict())
256
+ controlnet.context_embedder.load_dict(transformer.context_embedder.state_dict())
257
+ controlnet.transformer_blocks.load_dict(transformer.transformer_blocks.state_dict(), strict=False)
258
+
259
+ controlnet.pos_embed_input = zero_module(controlnet.pos_embed_input)
260
+
261
+ return controlnet
262
+
263
+ def forward(
264
+ self,
265
+ hidden_states: paddle.Tensor,
266
+ controlnet_cond: paddle.Tensor,
267
+ conditioning_scale: float = 1.0,
268
+ encoder_hidden_states: paddle.Tensor = None,
269
+ pooled_projections: paddle.Tensor = None,
270
+ timestep: paddle.Tensor = None,
271
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
272
+ return_dict: bool = True,
273
+ ) -> Union[paddle.Tensor, Transformer2DModelOutput]:
274
+ """
275
+ The [`SD3Transformer2DModel`] forward method.
276
+
277
+ Args:
278
+ hidden_states (`paddle.Tensor` of shape `(batch size, channel, height, width)`):
279
+ Input `hidden_states`.
280
+ controlnet_cond (`paddle.Tensor`):
281
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
282
+ conditioning_scale (`float`, defaults to `1.0`):
283
+ The scale factor for ControlNet outputs.
284
+ encoder_hidden_states (`paddle.Tensor` of shape `(batch size, sequence_len, embed_dims)`):
285
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
286
+ pooled_projections (`paddle.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected
287
+ from the embeddings of input conditions.
288
+ timestep ( `paddle.Tensor`):
289
+ Used to indicate denoising step.
290
+ joint_attention_kwargs (`dict`, *optional*):
291
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
292
+ `self.processor` in
293
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
294
+ return_dict (`bool`, *optional*, defaults to `True`):
295
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
296
+ tuple.
297
+
298
+ Returns:
299
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
300
+ `tuple` where the first element is the sample tensor.
301
+ """
302
+ if joint_attention_kwargs is not None:
303
+ joint_attention_kwargs = joint_attention_kwargs.copy()
304
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
305
+ else:
306
+ lora_scale = 1.0
307
+
308
+ if USE_PEFT_BACKEND:
309
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
310
+ scale_lora_layers(self, lora_scale)
311
+ else:
312
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
313
+ logger.warning(
314
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
315
+ )
316
+
317
+ hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
318
+ temb = self.time_text_embed(timestep, pooled_projections)
319
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
320
+
321
+ # add
322
+ hidden_states = hidden_states + self.pos_embed_input(controlnet_cond)
323
+
324
+ block_res_samples = ()
325
+
326
+ for block in self.transformer_blocks:
327
+ if self.training and self.gradient_checkpointing and not use_old_recompute():
328
+
329
+ def create_custom_forward(module, return_dict=None):
330
+ def custom_forward(*inputs):
331
+ if return_dict is not None:
332
+ return module(*inputs, return_dict=return_dict)
333
+ else:
334
+ return module(*inputs)
335
+
336
+ return custom_forward
337
+
338
+ ckpt_kwargs = {} if recompute_use_reentrant() else {"use_reentrant": False}
339
+ hidden_states = recompute(
340
+ create_custom_forward(block),
341
+ hidden_states,
342
+ encoder_hidden_states,
343
+ temb,
344
+ **ckpt_kwargs,
345
+ )
346
+
347
+ else:
348
+ encoder_hidden_states, hidden_states = block(
349
+ hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb
350
+ )
351
+
352
+ block_res_samples = block_res_samples + (hidden_states,)
353
+
354
+ controlnet_block_res_samples = ()
355
+ for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks):
356
+ block_res_sample = controlnet_block(block_res_sample)
357
+ controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,)
358
+
359
+ # 6. scaling
360
+ controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples]
361
+
362
+ if USE_PEFT_BACKEND:
363
+ # remove `lora_scale` from each PEFT layer
364
+ unscale_lora_layers(self, lora_scale)
365
+
366
+ if not return_dict:
367
+ return (controlnet_block_res_samples,)
368
+
369
+ return SD3ControlNetOutput(controlnet_block_samples=controlnet_block_res_samples)
370
+
371
+
372
+ class SD3MultiControlNetModel(ModelMixin):
373
+ r"""
374
+ `SD3ControlNetModel` wrapper class for Multi-SD3ControlNet
375
+
376
+ This module is a wrapper for multiple instances of the `SD3ControlNetModel`. The `forward()` API is designed to be
377
+ compatible with `SD3ControlNetModel`.
378
+
379
+ Args:
380
+ controlnets (`List[SD3ControlNetModel]`):
381
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
382
+ `SD3ControlNetModel` as a list.
383
+ """
384
+
385
+ def __init__(self, controlnets):
386
+ super().__init__()
387
+ self.nets = nn.LayerList(controlnets)
388
+
389
+ def forward(
390
+ self,
391
+ hidden_states: paddle.Tensor,
392
+ controlnet_cond: List[paddle.Tensor],
393
+ conditioning_scale: List[float],
394
+ pooled_projections: paddle.Tensor,
395
+ encoder_hidden_states: paddle.Tensor = None,
396
+ timestep: paddle.Tensor = None,
397
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
398
+ return_dict: bool = True,
399
+ ) -> Union[SD3ControlNetOutput, Tuple]:
400
+ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
401
+ block_samples = controlnet(
402
+ hidden_states=hidden_states,
403
+ timestep=timestep,
404
+ encoder_hidden_states=encoder_hidden_states,
405
+ pooled_projections=pooled_projections,
406
+ controlnet_cond=image,
407
+ conditioning_scale=scale,
408
+ joint_attention_kwargs=joint_attention_kwargs,
409
+ return_dict=return_dict,
410
+ )
411
+
412
+ # merge samples
413
+ if i == 0:
414
+ control_block_samples = block_samples
415
+ else:
416
+ control_block_samples = [
417
+ control_block_sample + block_sample
418
+ for control_block_sample, block_sample in zip(control_block_samples[0], block_samples[0])
419
+ ]
420
+ control_block_samples = (tuple(control_block_samples),)
421
+
422
+ return control_block_samples
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/dit_llama.py ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Optional
17
+
18
+ import paddle
19
+ import paddle.nn as nn
20
+ import paddle.nn.functional as F
21
+ from paddle.nn.functional.flash_attention import flash_attention
22
+
23
+ from ..configuration_utils import ConfigMixin, register_to_config
24
+ from .embeddings import LabelEmbedding
25
+ from .modeling_utils import ModelMixin
26
+ from .transformer_2d import Transformer2DModelOutput
27
+
28
+
29
+ def TypePromote(x, y):
30
+ TYPE_PROMOTE_DICT = {
31
+ "INT16FP16": "float16",
32
+ "INT16FP32": "float32",
33
+ "INT16FP64": "float64",
34
+ "INT32FP16": "float32",
35
+ "INT32FP32": "float32",
36
+ "INT32FP64": "float64",
37
+ "INT64FP16": "float64",
38
+ "INT64FP32": "float64",
39
+ "INT64FP64": "float64",
40
+ }
41
+ if x.dtype.name + y.dtype.name in TYPE_PROMOTE_DICT:
42
+ promote_type = TYPE_PROMOTE_DICT[x.dtype.name + y.dtype.name]
43
+ elif y.dtype.name + x.dtype.name in TYPE_PROMOTE_DICT:
44
+ promote_type = TYPE_PROMOTE_DICT[y.dtype.name + x.dtype.name]
45
+ else:
46
+ return x, y
47
+ return x.cast(promote_type), y.cast(promote_type)
48
+
49
+
50
+ def modulate(x, shift, scale):
51
+ return x * (1 + scale.unsqueeze(axis=1)) + shift.unsqueeze(axis=1)
52
+
53
+
54
+ class TimestepEmbedder(nn.Layer):
55
+ """
56
+ Embeds scalar timesteps into vector representations.
57
+ """
58
+
59
+ def __init__(self, hidden_size, frequency_embedding_size=256):
60
+ super().__init__()
61
+ self.mlp = nn.Sequential(
62
+ nn.Linear(frequency_embedding_size, hidden_size),
63
+ nn.Silu(),
64
+ nn.Linear(hidden_size, hidden_size),
65
+ )
66
+ self.frequency_embedding_size = frequency_embedding_size
67
+
68
+ @staticmethod
69
+ def timestep_embedding(t, dim, max_period=10000):
70
+ """
71
+ Create sinusoidal timestep embeddings.
72
+ :param t: a 1-D Tensor of N indices, one per batch element.
73
+ These may be fractional.
74
+ :param dim: the dimension of the output.
75
+ :param max_period: controls the minimum frequency of the embeddings.
76
+ :return: an (N, D) Tensor of positional embeddings.
77
+ """
78
+ half = dim // 2
79
+ freqs = paddle.exp(x=-math.log(max_period) * paddle.arange(start=0, end=half, dtype="float32") / half)
80
+ args = t[:, (None)].astype(dtype="float32") * freqs[None]
81
+ embedding = paddle.concat(x=[paddle.cos(x=args), paddle.sin(x=args)], axis=-1)
82
+ if dim % 2:
83
+ embedding = paddle.concat(x=[embedding, paddle.zeros_like(x=embedding[:, :1])], axis=-1)
84
+ return embedding
85
+
86
+ def forward(self, t):
87
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
88
+ t_emb = self.mlp(t_freq.cast(self.mlp[0].weight.dtype))
89
+ return t_emb
90
+
91
+
92
+ class Attention(nn.Layer):
93
+ def __init__(self, dim, n_heads, n_kv_heads, qk_norm=True, fused_attn=True):
94
+ """
95
+ Initialize the Attention module.
96
+
97
+ Args:
98
+ dim (int): Number of input dimensions.
99
+ n_heads (int): Number of heads.
100
+ n_kv_heads (Optional[int]): Number of kv heads, if using GQA.
101
+
102
+ Attributes:
103
+ n_kv_heads (int): Number of key and value heads.
104
+ n_local_heads (int): Number of local query heads.
105
+ n_local_kv_heads (int): Number of local key and value heads.
106
+ n_rep (int): Number of repetitions for local heads.
107
+ head_dim (int): Dimension size of each attention head.
108
+ wq (nn.Linear): Linear transformation for queries.
109
+ wk (nn.Linear): Linear transformation for keys.
110
+ wv (nn.Linear): Linear transformation for values.
111
+ wo (nn.Linear): Linear transformation for output.
112
+ cache_k (paddle.Tensor): Cached keys for attention.
113
+ cache_v (paddle.Tensor): Cached values for attention.
114
+
115
+ """
116
+ super().__init__()
117
+ self.n_kv_heads = n_heads if n_kv_heads is None else n_kv_heads
118
+ self.n_local_heads = n_heads
119
+ self.n_local_kv_heads = self.n_kv_heads
120
+ self.n_rep = self.n_local_heads // self.n_local_kv_heads
121
+ self.head_dim = dim // n_heads
122
+
123
+ self.wq = nn.Linear(dim, n_heads * self.head_dim, bias_attr=False)
124
+ self.wk = nn.Linear(dim, self.n_kv_heads * self.head_dim, bias_attr=False)
125
+ self.wv = nn.Linear(dim, self.n_kv_heads * self.head_dim, bias_attr=False)
126
+ self.wo = nn.Linear(n_heads * self.head_dim, dim, bias_attr=False)
127
+
128
+ if qk_norm:
129
+ self.q_norm = nn.LayerNorm(self.n_local_heads * self.head_dim)
130
+ self.k_norm = nn.LayerNorm(self.n_local_kv_heads * self.head_dim)
131
+ else:
132
+ self.q_norm = self.k_norm = nn.Identity()
133
+
134
+ self.fused_attn = fused_attn
135
+ self.scale = self.head_dim**-0.5
136
+
137
+ @staticmethod
138
+ def reshape_for_broadcast(freqs_cis, x):
139
+ """
140
+ Reshape frequency tensor for broadcasting it with another tensor.
141
+
142
+ This function reshapes the frequency tensor to have the same shape as
143
+ the target tensor 'x' for the purpose of broadcasting the frequency
144
+ tensor during element-wise operations.
145
+
146
+ Args:
147
+ freqs_cis (paddle.Tensor): Frequency tensor to be reshaped.
148
+ x (paddle.Tensor): Target tensor for broadcasting compatibility.
149
+
150
+ Returns:
151
+ paddle.Tensor: Reshaped frequency tensor.
152
+
153
+ Raises:
154
+ AssertionError: If the frequency tensor doesn't match the expected
155
+ shape.
156
+ AssertionError: If the target tensor 'x' doesn't have the expected
157
+ number of dimensions.
158
+ """
159
+ ndim = x.ndim
160
+ assert 0 <= 1 < ndim
161
+ assert tuple(freqs_cis.shape) == (tuple(x.shape)[1], tuple(x.shape)[-1])
162
+ shape = [(d if i == 1 or i == ndim - 1 else 1) for i, d in enumerate(tuple(x.shape))]
163
+ return freqs_cis.reshape([*shape])
164
+
165
+ @staticmethod
166
+ def apply_rotary_emb(xq, xk, freqs_cis):
167
+ """
168
+ Apply rotary embeddings to input tensors using the given frequency
169
+ tensor.
170
+
171
+ This function applies rotary embeddings to the given query 'xq' and
172
+ key 'xk' tensors using the provided frequency tensor 'freqs_cis'. The
173
+ input tensors are reshaped as complex numbers, and the frequency tensor
174
+ is reshaped for broadcasting compatibility. The resulting tensors
175
+ contain rotary embeddings and are returned as real tensors.
176
+
177
+ Args:
178
+ xq (paddle.Tensor): Query tensor to apply rotary embeddings.
179
+ xk (paddle.Tensor): Key tensor to apply rotary embeddings.
180
+ freqs_cis (paddle.Tensor): Precomputed frequency tensor for complex
181
+ exponentials.
182
+
183
+ Returns:
184
+ Tuple[paddle.Tensor, paddle.Tensor]: Tuple of modified query tensor
185
+ and key tensor with rotary embeddings.
186
+ """
187
+ with paddle.amp.auto_cast(enable=False):
188
+ xq_ = paddle.as_complex(xq.cast("float32").reshape([*tuple(xq.shape)[:-1], -1, 2]))
189
+ xk_ = paddle.as_complex(xk.cast("float32").reshape([*tuple(xk.shape)[:-1], -1, 2]))
190
+ freqs_cis = Attention.reshape_for_broadcast(freqs_cis, xq_)
191
+ xq_out = paddle.as_real(xq_ * freqs_cis).flatten(start_axis=3)
192
+ xk_out = paddle.as_real(xk_ * freqs_cis).flatten(start_axis=3)
193
+ return xq_out.cast(xq.dtype), xk_out.cast(xk.dtype)
194
+
195
+ def forward(self, x, freqs_cis):
196
+ """
197
+ Forward pass of the attention module.
198
+
199
+ Args:
200
+ x (paddle.Tensor): Input tensor.
201
+ freqs_cis (paddle.Tensor): Precomputed frequency tensor.
202
+
203
+ Returns:
204
+ paddle.Tensor: Output tensor after attention.
205
+
206
+ """
207
+ bsz, seqlen, _ = tuple(x.shape)
208
+ xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
209
+ dtype = xq.dtype
210
+
211
+ xq = self.q_norm(xq)
212
+ xk = self.k_norm(xk)
213
+
214
+ xq = xq.reshape([bsz, seqlen, self.n_local_heads, self.head_dim])
215
+ xk = xk.reshape([bsz, seqlen, self.n_local_kv_heads, self.head_dim])
216
+ xv = xv.reshape([bsz, seqlen, self.n_local_kv_heads, self.head_dim])
217
+
218
+ xq, xk = Attention.apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
219
+ xq, xk = xq.cast(dtype), xk.cast(dtype)
220
+
221
+ if dtype in [paddle.float16, paddle.bfloat16]:
222
+ output, _ = flash_attention(
223
+ xq,
224
+ xk,
225
+ xv,
226
+ dropout=0.0,
227
+ causal=False,
228
+ return_softmax=False,
229
+ )
230
+ else:
231
+ n_rep = self.n_local_heads // self.n_local_kv_heads
232
+ if n_rep > 1:
233
+ xk = xk.unsqueeze(axis=3).tile([1, 1, 1, n_rep, 1]).flatten(start_axis=2, stop_axis=3)
234
+ xv = xv.unsqueeze(axis=3).tile([1, 1, 1, n_rep, 1]).flatten(start_axis=2, stop_axis=3)
235
+
236
+ if self.fused_attn:
237
+ output = F.scaled_dot_product_attention_(
238
+ xq,
239
+ xk,
240
+ xv,
241
+ dropout_p=0.0,
242
+ is_causal=False,
243
+ )
244
+ else:
245
+ q = xq.transpose([0, 2, 1, 3]) * self.scale
246
+ attn = q @ xk.transpose([0, 2, 1, 3]).transpose([0, 1, 3, 2])
247
+ attn = F.softmax(attn, axis=-1)
248
+ output = attn @ xv.transpose([0, 2, 1, 3])
249
+ output = output.transpose([0, 2, 1, 3])
250
+
251
+ output = output.flatten(start_axis=-2)
252
+ return self.wo(output)
253
+
254
+
255
+ class FeedForward(nn.Layer):
256
+ def __init__(self, dim, hidden_dim, multiple_of=256, ffn_dim_multiplier=None):
257
+ """
258
+ Initialize the FeedForward module.
259
+
260
+ Args:
261
+ dim (int): Input dimension.
262
+ hidden_dim (int): Hidden dimension of the feedforward layer.
263
+ multiple_of (int): Value to ensure hidden dimension is a multiple
264
+ of this value.
265
+ ffn_dim_multiplier (float, optional): Custom multiplier for hidden
266
+ dimension. Defaults to None.
267
+
268
+ Attributes:
269
+ w1 (nn.Linear): Linear transformation for the first
270
+ layer.
271
+ w2 (nn.Linear): Linear transformation for the second layer.
272
+ w3 (nn.Linear): Linear transformation for the third
273
+ layer.
274
+
275
+ """
276
+ super().__init__()
277
+ hidden_dim = int(2 * hidden_dim / 3)
278
+ if ffn_dim_multiplier is not None:
279
+ hidden_dim = int(ffn_dim_multiplier * hidden_dim)
280
+ hidden_dim = int(multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of))
281
+
282
+ self.w1 = nn.Linear(dim, hidden_dim, bias_attr=False)
283
+ self.w2 = nn.Linear(hidden_dim, dim, bias_attr=False)
284
+ self.w3 = nn.Linear(dim, hidden_dim, bias_attr=False)
285
+
286
+ def forward(self, x):
287
+ xw1 = F.silu(self.w1(x))
288
+ xw3 = self.w3(x)
289
+ output = self.w2(xw1 * xw3)
290
+ return output
291
+
292
+
293
+ class TransformerBlock(nn.Layer):
294
+ def __init__(
295
+ self,
296
+ layer_id: int,
297
+ dim: int,
298
+ n_heads: int,
299
+ n_kv_heads: int,
300
+ multiple_of: int,
301
+ mlp_ratio: float,
302
+ ffn_dim_multiplier: float,
303
+ norm_eps: float,
304
+ qk_norm: bool,
305
+ fused_attn: bool,
306
+ ) -> None:
307
+ """
308
+ Initialize a TransformerBlock.
309
+
310
+ Args:
311
+ layer_id (int): Identifier for the layer.
312
+ dim (int): Embedding dimension of the input features.
313
+ n_heads (int): Number of attention heads.
314
+ n_kv_heads (Optional[int]): Number of attention heads in key and
315
+ value features (if using GQA), or set to None for the same as
316
+ query.
317
+ multiple_of (int): Value to ensure hidden dimension is a multiple
318
+ of this value in the FeedForward block.
319
+ ffn_dim_multiplier (float, optional): Custom multiplier for hidden
320
+ dimension in the FeedForward block. Defaults to None.
321
+ norm_eps (float): A small value added to the norm layer
322
+ denominators to avoid division-by-zero.
323
+
324
+ Attributes:
325
+ n_heads (int): Number of attention heads.
326
+ dim (int): Dimension size of the model.
327
+ head_dim (int): Dimension size of each attention head.
328
+ attention (Attention): Attention module.
329
+ feed_forward (FeedForward): FeedForward module.
330
+ layer_id (int): Identifier for the layer.
331
+ attention_norm (RMSNorm): Layer normalization for attention output.
332
+ ffn_norm (RMSNorm): Layer normalization for feedforward output.
333
+ adaLN_modulation (nn.Sequential): A small network to generate
334
+ feature modulation factors.
335
+
336
+ """
337
+ super().__init__()
338
+ self.dim = dim
339
+ self.head_dim = dim // n_heads
340
+ self.attention = Attention(dim, n_heads, n_kv_heads, qk_norm, fused_attn)
341
+ mlp_hidden_dim = int(dim * mlp_ratio)
342
+ self.feed_forward = FeedForward(
343
+ dim=dim, hidden_dim=mlp_hidden_dim, multiple_of=multiple_of, ffn_dim_multiplier=ffn_dim_multiplier
344
+ )
345
+ self.layer_id = layer_id
346
+ self.attention_norm = nn.LayerNorm(dim, epsilon=norm_eps, bias_attr=False)
347
+ self.ffn_norm = nn.LayerNorm(dim, epsilon=norm_eps, bias_attr=False)
348
+
349
+ self.adaLN_modulation = nn.Sequential(
350
+ nn.Silu(),
351
+ nn.Linear(min(dim, 1024), 6 * dim),
352
+ )
353
+
354
+ def forward(self, x, freqs_cis, adaln_input=None):
355
+ """
356
+ Perform a forward pass through the TransformerBlock.
357
+
358
+ Args:
359
+ x (paddle.Tensor): Input tensor.
360
+ freqs_cis (paddle.Tensor): Precomputed cosine and sine frequencies.
361
+ mask (paddle.Tensor, optional): Masking tensor for attention.
362
+ Defaults to None.
363
+
364
+ Returns:
365
+ paddle.Tensor: Output tensor after applying attention and
366
+ feedforward layers.
367
+
368
+ """
369
+ if adaln_input is not None:
370
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(adaln_input).chunk(
371
+ 6, axis=1
372
+ )
373
+ h = x + gate_msa.unsqueeze(1) * self.attention(
374
+ modulate(self.attention_norm(x), shift_msa, scale_msa), freqs_cis
375
+ )
376
+ out = h + gate_mlp.unsqueeze(1) * self.feed_forward(modulate(self.ffn_norm(h), shift_mlp, scale_mlp))
377
+ else:
378
+ h = x + self.attention(self.attention_norm(x), freqs_cis)
379
+ out = h + self.feed_forward(self.ffn_norm(h))
380
+ return out
381
+
382
+
383
+ class FinalLayer(paddle.nn.Layer):
384
+ def __init__(self, hidden_size, patch_size, out_channels):
385
+ super().__init__()
386
+ self.norm_final = paddle.nn.LayerNorm(hidden_size, weight_attr=False, bias_attr=False, epsilon=1e-06)
387
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels)
388
+ self.adaLN_modulation = nn.Sequential(nn.Silu(), nn.Linear(min(hidden_size, 1024), 2 * hidden_size))
389
+
390
+ def forward(self, x, c):
391
+ shift, scale = self.adaLN_modulation(c).chunk(2, axis=1)
392
+ x = modulate(self.norm_final(x), shift, scale)
393
+ x = self.linear(x)
394
+ return x
395
+
396
+
397
+ class DiTLLaMA2DModel(ModelMixin, ConfigMixin):
398
+ _supports_gradient_checkpointing = True
399
+ _use_memory_efficient_attention_xformers = True
400
+
401
+ @register_to_config
402
+ def __init__(
403
+ self,
404
+ sample_size: int = 32, # image_size // 8
405
+ patch_size: int = 2,
406
+ in_channels: int = 4,
407
+ out_channels: int = 8,
408
+ num_layers: int = 32,
409
+ num_attention_heads: int = 16,
410
+ attention_head_dim: int = 96,
411
+ mlp_ratio: float = 4.0,
412
+ n_kv_heads=None,
413
+ multiple_of: int = 256,
414
+ ffn_dim_multiplier=None,
415
+ norm_eps: float = 1e-05,
416
+ class_dropout_prob: float = 0.1,
417
+ num_classes: int = 1000,
418
+ learn_sigma: bool = True,
419
+ qk_norm: bool = True,
420
+ ):
421
+ super().__init__()
422
+ self.sample_size = sample_size
423
+ self.patch_size = patch_size
424
+ self.in_channels = in_channels
425
+ self.out_channels = in_channels * 2 if learn_sigma else in_channels
426
+ dim = attention_head_dim * num_attention_heads
427
+
428
+ self.num_layers = num_layers
429
+ self.num_attention_heads = num_attention_heads
430
+ self.mlp_ratio = mlp_ratio
431
+ self.multiple_of = multiple_of
432
+ self.ffn_dim_multiplier = ffn_dim_multiplier
433
+ self.norm_eps = norm_eps
434
+ self.class_dropout_prob = class_dropout_prob
435
+ self.num_classes = num_classes
436
+ self.learn_sigma = learn_sigma
437
+ self.qk_norm = qk_norm
438
+
439
+ self.gradient_checkpointing = True
440
+ self.fused_attn = True
441
+
442
+ self.x_embedder = nn.Linear(in_channels * patch_size**2, dim)
443
+ self.t_embedder = TimestepEmbedder(min(dim, 1024))
444
+ self.y_embedder = LabelEmbedding(num_classes, min(dim, 1024), class_dropout_prob)
445
+
446
+ # 2. Define transformers blocks
447
+ self.layers = nn.LayerList(
448
+ [
449
+ TransformerBlock(
450
+ layer_id=idx,
451
+ dim=dim,
452
+ n_heads=num_attention_heads,
453
+ n_kv_heads=n_kv_heads,
454
+ multiple_of=multiple_of,
455
+ mlp_ratio=mlp_ratio,
456
+ ffn_dim_multiplier=ffn_dim_multiplier,
457
+ norm_eps=norm_eps,
458
+ qk_norm=qk_norm,
459
+ fused_attn=self.fused_attn,
460
+ )
461
+ for idx in range(num_layers)
462
+ ]
463
+ )
464
+
465
+ # 3. Define output layers
466
+ self.final_layer = FinalLayer(dim, patch_size, self.out_channels)
467
+ self.freqs_cis = self.precompute_freqs_cis(dim // num_attention_heads, 4096)
468
+
469
+ def _set_gradient_checkpointing(self, module, value=False):
470
+ if hasattr(module, "gradient_checkpointing"):
471
+ module.gradient_checkpointing = value
472
+
473
+ def enable_gradient_checkpointing(self, value=True):
474
+ self.gradient_checkpointing = value
475
+
476
+ def enable_xformers_memory_efficient_attention(self, attention_op: Optional[str] = None):
477
+ self._use_memory_efficient_attention_xformers = True
478
+ self.fused_attn = True
479
+
480
+ def unpatchify(self, x):
481
+ """
482
+ Args:
483
+ x: (N, T, patch_size**2 * C)
484
+ imgs: (N, H, W, C)
485
+ """
486
+ c = self.out_channels
487
+ p = self.patch_size
488
+ h = w = int(tuple(x.shape)[1] ** 0.5)
489
+ assert h * w == tuple(x.shape)[1]
490
+
491
+ x = x.reshape(shape=([tuple(x.shape)[0], h, w, p, p, c]))
492
+ x = paddle.einsum("nhwpqc->nchpwq", x)
493
+ imgs = x.reshape(shape=([tuple(x.shape)[0], c, h * p, h * p]))
494
+ return imgs
495
+
496
+ def patchify(self, x):
497
+ B, C, H, W = tuple(x.shape)
498
+ assert (H, W) == (self.sample_size, self.sample_size)
499
+ pH = pW = self.patch_size
500
+ x = x.reshape([B, C, H // pH, pH, W // pW, pW])
501
+ x = x.transpose([0, 2, 4, 1, 3, 5]).flatten(start_axis=-3).flatten(start_axis=1, stop_axis=2)
502
+ return x
503
+
504
+ @staticmethod
505
+ def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
506
+ """
507
+ Precompute the frequency tensor for complex exponentials (cis) with
508
+ given dimensions.
509
+
510
+ This function calculates a frequency tensor with complex exponentials
511
+ using the given dimension 'dim' and the end index 'end'. The 'theta'
512
+ parameter scales the frequencies. The returned tensor contains complex
513
+ values in complex64 data type.
514
+
515
+ Args:
516
+ dim (int): Dimension of the frequency tensor.
517
+ end (int): End index for precomputing frequencies.
518
+ theta (float, optional): Scaling factor for frequency computation.
519
+ Defaults to 10000.0.
520
+
521
+ Returns:
522
+ paddle.Tensor: Precomputed frequency tensor with complex
523
+ exponentials.
524
+ """
525
+ freqs = 1.0 / theta ** (paddle.arange(start=0, end=dim, step=2)[: dim // 2].cast("float32") / dim)
526
+ t = paddle.arange(end=end)
527
+ input_0, vec2_0 = TypePromote(t, freqs)
528
+ freqs = paddle.outer(input_0, vec2_0).cast("float32")
529
+ freqs_cis = paddle.complex(
530
+ paddle.ones_like(freqs) * paddle.cos(freqs), paddle.ones_like(freqs) * paddle.sin(freqs)
531
+ )
532
+ return freqs_cis
533
+
534
+ def forward(
535
+ self,
536
+ hidden_states: paddle.Tensor,
537
+ timestep: paddle.Tensor,
538
+ class_labels: paddle.Tensor,
539
+ return_dict: bool = True,
540
+ ):
541
+ """
542
+ Args:
543
+ hidden_states: (N, C, H, W) tensor of spatial inputs (images or latent
544
+ representations of images)
545
+ timestep: (N,) tensor of diffusion timesteps
546
+ class_labels: (N,) tensor of class labels
547
+ """
548
+ hidden_states = hidden_states.cast(self.dtype)
549
+ timestep = timestep.cast(self.dtype)
550
+
551
+ # 1. Input
552
+ hidden_states = self.patchify(hidden_states)
553
+ x = self.x_embedder(hidden_states)
554
+ t = self.t_embedder(timestep)
555
+ y = self.y_embedder(class_labels)
556
+ adaln_input = t + y
557
+
558
+ # 2. Blocks
559
+ for i, layer in enumerate(self.layers):
560
+ if self.gradient_checkpointing:
561
+ x = paddle.distributed.fleet.utils.recompute(layer, x, self.freqs_cis[: x.shape[1]], adaln_input)
562
+ else:
563
+ x = layer(
564
+ x,
565
+ self.freqs_cis[: x.shape[1]],
566
+ adaln_input,
567
+ )
568
+
569
+ # 3. Output
570
+ hidden_states = self.final_layer(x, adaln_input)
571
+ output = self.unpatchify(hidden_states)
572
+
573
+ if not return_dict:
574
+ return (output,)
575
+
576
+ return Transformer2DModelOutput(sample=output)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/dual_transformer_2d.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ import paddle.nn as nn
17
+
18
+ from .transformer_2d import Transformer2DModel, Transformer2DModelOutput
19
+
20
+
21
+ class DualTransformer2DModel(nn.Layer):
22
+ """
23
+ Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
24
+
25
+ Parameters:
26
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
27
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
28
+ in_channels (`int`, *optional*):
29
+ Pass if the input is continuous. The number of channels in the input and output.
30
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
31
+ dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.
32
+ cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
33
+ sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
34
+ Note that this is fixed at training time as it is used for learning a number of position embeddings. See
35
+ `ImagePositionalEmbeddings`.
36
+ num_vector_embeds (`int`, *optional*):
37
+ Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
38
+ Includes the class for the masked latent pixel.
39
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
40
+ num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
41
+ The number of diffusion steps used during training. Note that this is fixed at training time as it is used
42
+ to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
43
+ up to but not more than steps than `num_embeds_ada_norm`.
44
+ attention_bias (`bool`, *optional*):
45
+ Configure if the TransformerBlocks' attention should contain a bias parameter.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ num_attention_heads: int = 16,
51
+ attention_head_dim: int = 88,
52
+ in_channels: Optional[int] = None,
53
+ num_layers: int = 1,
54
+ dropout: float = 0.0,
55
+ norm_num_groups: int = 32,
56
+ cross_attention_dim: Optional[int] = None,
57
+ attention_bias: bool = False,
58
+ sample_size: Optional[int] = None,
59
+ num_vector_embeds: Optional[int] = None,
60
+ activation_fn: str = "geglu",
61
+ num_embeds_ada_norm: Optional[int] = None,
62
+ ):
63
+ super().__init__()
64
+ self.transformers = nn.LayerList(
65
+ [
66
+ Transformer2DModel(
67
+ num_attention_heads=num_attention_heads,
68
+ attention_head_dim=attention_head_dim,
69
+ in_channels=in_channels,
70
+ num_layers=num_layers,
71
+ dropout=dropout,
72
+ norm_num_groups=norm_num_groups,
73
+ cross_attention_dim=cross_attention_dim,
74
+ attention_bias=attention_bias,
75
+ sample_size=sample_size,
76
+ num_vector_embeds=num_vector_embeds,
77
+ activation_fn=activation_fn,
78
+ num_embeds_ada_norm=num_embeds_ada_norm,
79
+ )
80
+ for _ in range(2)
81
+ ]
82
+ )
83
+
84
+ # Variables that can be set by a pipeline:
85
+
86
+ # The ratio of transformer1 to transformer2's output states to be combined during inference
87
+ self.mix_ratio = 0.5
88
+
89
+ # The shape of `encoder_hidden_states` is expected to be
90
+ # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
91
+ self.condition_lengths = [77, 257]
92
+
93
+ # Which transformer to use to encode which condition.
94
+ # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
95
+ self.transformer_index_for_condition = [1, 0]
96
+
97
+ def forward(
98
+ self,
99
+ hidden_states,
100
+ encoder_hidden_states,
101
+ timestep=None,
102
+ added_cond_kwargs=None,
103
+ class_labels=None,
104
+ cross_attention_kwargs=None,
105
+ attention_mask=None,
106
+ encoder_attention_mask=None,
107
+ return_dict: bool = True,
108
+ ):
109
+ """
110
+ Args:
111
+ hidden_states ( When discrete, `paddle.Tensor` of shape `(batch size, num latent pixels)`.
112
+ When continuous, `paddle.Tensor` of shape `(batch size, channel, height, width)`): Input
113
+ hidden_states.
114
+ encoder_hidden_states ( `paddle.Tensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
115
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
116
+ self-attention.
117
+ timestep ( `paddle.Tensor`, *optional*):
118
+ Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
119
+ attention_mask (`paddle.Tensor`, *optional*):
120
+ Optional attention mask to be applied in Attention.
121
+ cross_attention_kwargs (`dict`, *optional*):
122
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
123
+ `self.processor` in
124
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
125
+ return_dict (`bool`, *optional*, defaults to `True`):
126
+ Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
127
+
128
+ Returns:
129
+ [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
130
+ [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
131
+ returning a tuple, the first element is the sample tensor.
132
+ """
133
+ input_states = hidden_states
134
+
135
+ encoded_states = []
136
+ tokens_start = 0
137
+ # attention_mask is not used yet
138
+ for i in range(2):
139
+ # for each of the two transformers, pass the corresponding condition tokens
140
+ condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
141
+ transformer_index = self.transformer_index_for_condition[i]
142
+ encoded_state = self.transformers[transformer_index](
143
+ input_states,
144
+ encoder_hidden_states=condition_state,
145
+ timestep=timestep,
146
+ cross_attention_kwargs=cross_attention_kwargs,
147
+ return_dict=False,
148
+ )[0]
149
+ encoded_states.append(encoded_state - input_states)
150
+ tokens_start += self.condition_lengths[i]
151
+
152
+ output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
153
+ output_states = output_states + input_states
154
+
155
+ if not return_dict:
156
+ return (output_states,)
157
+
158
+ return Transformer2DModelOutput(sample=output_states)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/ema.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import paddle
17
+ from paddle import nn
18
+
19
+
20
+ class LitEma(nn.Layer):
21
+ """
22
+ Exponential Moving Average (EMA) of model updates
23
+
24
+ Parameters:
25
+ model: The model architecture for apply EMA.
26
+ decay: The exponential decay. Default 0.9999.
27
+ use_num_updates: Whether to use number of updates when computing
28
+ averages.
29
+ """
30
+
31
+ def __init__(self, model, decay=0.9999, use_num_upates=True):
32
+ super().__init__()
33
+ if decay < 0.0 or decay > 1.0:
34
+ raise ValueError("Decay must be between 0 and 1")
35
+
36
+ self.m_name2s_name = {}
37
+ # 0-d tensor broadcasting is not supported during the Sharding initialization phase
38
+ # self.register_buffer("decay", paddle.to_tensor(decay, dtype=paddle.float32))
39
+ # self.register_buffer(
40
+ # "num_updates",
41
+ # paddle.to_tensor(0, dtype=paddle.int64) if use_num_upates else paddle.to_tensor(-1, dtype=paddle.int64),
42
+ # )
43
+ self.decay = decay
44
+ self.num_updates = 0 if use_num_upates else -1
45
+
46
+ for name, p in model.named_parameters():
47
+ if not p.stop_gradient:
48
+ # remove as '.'-character is not allowed in buffers
49
+ s_name = name.replace(".", "")
50
+ self.m_name2s_name.update({name: s_name})
51
+ self.register_buffer(s_name, p.clone().detach().astype('float32'))
52
+
53
+ self.collected_params = []
54
+
55
+ def forward(self, model):
56
+ decay = self.decay
57
+
58
+ if self.num_updates >= 0:
59
+ self.num_updates += 1
60
+ decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
61
+
62
+ one_minus_decay = 1.0 - decay
63
+
64
+ with paddle.no_grad():
65
+ m_param = dict(model.named_parameters())
66
+ shadow_params = dict(self.named_buffers())
67
+
68
+ for key in m_param:
69
+ if not m_param[key].stop_gradient:
70
+ sname = self.m_name2s_name[key]
71
+ shadow_params[sname].scale_(decay)
72
+ shadow_params[sname].add_(m_param[key].astype('float32') * one_minus_decay)
73
+ else:
74
+ assert key not in self.m_name2s_name
75
+
76
+ def copy_to(self, model):
77
+ m_param = dict(model.named_parameters())
78
+ shadow_params = dict(self.named_buffers())
79
+ for key in m_param:
80
+ if not m_param[key].stop_gradient:
81
+ # allow dtype cast
82
+ m_param[key].copy_(shadow_params[self.m_name2s_name[key]].cast(m_param[key].dtype), False)
83
+ else:
84
+ assert key not in self.m_name2s_name
85
+
86
+ def store(self, parameters):
87
+ """
88
+ Save the current parameters for restoring later.
89
+ Args:
90
+ parameters: Iterable of `EagerParamBase`; the parameters to be
91
+ temporarily stored.
92
+ """
93
+ self.collected_params = [param.detach().cpu().clone() for param in parameters]
94
+
95
+ def restore(self, parameters):
96
+ """
97
+ Restore the parameters stored with the `store` method.
98
+ Useful to validate the model with EMA parameters without affecting the
99
+ original optimization process. Store the parameters before the
100
+ `copy_to` method. After validation (or model saving), use this to
101
+ restore the former parameters.
102
+ Args:
103
+ parameters: Iterable of `EagerParamBase`; the parameters to be
104
+ updated with the stored parameters.
105
+ """
106
+ for c_param, param in zip(self.collected_params, parameters):
107
+ # allow dtype cast
108
+ param.copy_(c_param.cast(param.dtype), False)
109
+ self.collected_params = None
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/lvdm_aemodules3d.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+
17
+ import numpy as np
18
+ import paddle
19
+
20
+
21
+ def silu(x):
22
+ return x * paddle.nn.functional.sigmoid(x=x)
23
+
24
+
25
+ class SiLU(paddle.nn.Layer):
26
+ def __init__(self):
27
+ super(SiLU, self).__init__()
28
+
29
+ def forward(self, x):
30
+ return silu(x)
31
+
32
+
33
+ def hinge_d_loss(logits_real, logits_fake):
34
+ loss_real = paddle.mean(x=paddle.nn.functional.relu(x=1.0 - logits_real))
35
+ loss_fake = paddle.mean(x=paddle.nn.functional.relu(x=1.0 + logits_fake))
36
+ d_loss = 0.5 * (loss_real + loss_fake)
37
+ return d_loss
38
+
39
+
40
+ def vanilla_d_loss(logits_real, logits_fake):
41
+ d_loss = 0.5 * (
42
+ paddle.mean(x=paddle.nn.functional.softplus(x=-logits_real))
43
+ + paddle.mean(x=paddle.nn.functional.softplus(x=logits_fake))
44
+ )
45
+ return d_loss
46
+
47
+
48
+ def Normalize(in_channels, norm_type="group"):
49
+ assert norm_type in ["group", "batch"]
50
+ if norm_type == "group":
51
+ return paddle.nn.GroupNorm(
52
+ num_groups=32, num_channels=in_channels, epsilon=1e-06, weight_attr=None, bias_attr=None
53
+ )
54
+ elif norm_type == "batch":
55
+ return paddle.nn.SyncBatchNorm(in_channels)
56
+
57
+
58
+ class ResBlock(paddle.nn.Layer):
59
+ def __init__(
60
+ self,
61
+ in_channels,
62
+ out_channels=None,
63
+ conv_shortcut=False,
64
+ dropout=0.0,
65
+ norm_type="group",
66
+ padding_type="replicate",
67
+ ):
68
+ super().__init__()
69
+ self.in_channels = in_channels
70
+ out_channels = in_channels if out_channels is None else out_channels
71
+ self.out_channels = out_channels
72
+ self.use_conv_shortcut = conv_shortcut
73
+ self.norm1 = Normalize(in_channels, norm_type)
74
+ self.conv1 = SamePadConv3d(in_channels, out_channels, kernel_size=3, padding_type=padding_type)
75
+ self.dropout = paddle.nn.Dropout(p=dropout)
76
+ self.norm2 = Normalize(in_channels, norm_type)
77
+ self.conv2 = SamePadConv3d(out_channels, out_channels, kernel_size=3, padding_type=padding_type)
78
+ if self.in_channels != self.out_channels:
79
+ self.conv_shortcut = SamePadConv3d(in_channels, out_channels, kernel_size=3, padding_type=padding_type)
80
+
81
+ def forward(self, x):
82
+ h = x
83
+ h = self.norm1(h)
84
+ h = silu(h)
85
+ h = self.conv1(h)
86
+ h = self.norm2(h)
87
+ h = silu(h)
88
+ h = self.conv2(h)
89
+ if self.in_channels != self.out_channels:
90
+ x = self.conv_shortcut(x)
91
+ return x + h
92
+
93
+
94
+ class SamePadConv3d(paddle.nn.Layer):
95
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, padding_type="replicate"):
96
+ super().__init__()
97
+ if isinstance(kernel_size, int):
98
+ kernel_size = (kernel_size,) * 3
99
+ if isinstance(stride, int):
100
+ stride = (stride,) * 3
101
+ total_pad = tuple([(k - s) for k, s in zip(kernel_size, stride)])
102
+ pad_input = []
103
+ for p in total_pad[::-1]:
104
+ pad_input.append((p // 2 + p % 2, p // 2))
105
+ pad_input = sum(pad_input, tuple())
106
+ self.pad_input = pad_input
107
+ self.padding_type = padding_type
108
+ self.conv = paddle.nn.Conv3D(
109
+ in_channels=in_channels,
110
+ out_channels=out_channels,
111
+ kernel_size=kernel_size,
112
+ stride=stride,
113
+ padding=0,
114
+ bias_attr=bias,
115
+ )
116
+ self.weight = self.conv.weight
117
+
118
+ def forward(self, x):
119
+ return self.conv(
120
+ paddle.nn.functional.pad(x=x, pad=self.pad_input, mode=self.padding_type, data_format="NCDHW")
121
+ )
122
+
123
+
124
+ class SamePadConvTranspose3d(paddle.nn.Layer):
125
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, padding_type="replicate"):
126
+ super().__init__()
127
+ if isinstance(kernel_size, int):
128
+ kernel_size = (kernel_size,) * 3
129
+ if isinstance(stride, int):
130
+ stride = (stride,) * 3
131
+ total_pad = tuple([(k - s) for k, s in zip(kernel_size, stride)])
132
+ pad_input = []
133
+ for p in total_pad[::-1]:
134
+ pad_input.append((p // 2 + p % 2, p // 2))
135
+ pad_input = sum(pad_input, tuple())
136
+ self.pad_input = pad_input
137
+ self.padding_type = padding_type
138
+ self.convt = paddle.nn.Conv3DTranspose(
139
+ in_channels=in_channels,
140
+ out_channels=out_channels,
141
+ kernel_size=kernel_size,
142
+ stride=stride,
143
+ padding=tuple([(k - 1) for k in kernel_size]),
144
+ bias_attr=bias,
145
+ )
146
+
147
+ def forward(self, x):
148
+ return self.convt(
149
+ paddle.nn.functional.pad(x=x, pad=self.pad_input, mode=self.padding_type, data_format="NCDHW")
150
+ )
151
+
152
+
153
+ class Encoder(paddle.nn.Layer):
154
+ def __init__(
155
+ self, n_hiddens, downsample, z_channels, double_z, image_channel=3, norm_type="group", padding_type="replicate"
156
+ ):
157
+ super().__init__()
158
+ n_times_downsample = np.array([int(math.log2(d)) for d in downsample])
159
+ self.conv_blocks = paddle.nn.LayerList()
160
+ max_ds = n_times_downsample.max()
161
+ self.conv_first = SamePadConv3d(image_channel, n_hiddens, kernel_size=3, padding_type=padding_type)
162
+ for i in range(max_ds):
163
+ block = paddle.nn.Layer()
164
+ in_channels = n_hiddens * 2**i
165
+ out_channels = n_hiddens * 2 ** (i + 1)
166
+ stride = tuple([(2 if d > 0 else 1) for d in n_times_downsample])
167
+ block.down = SamePadConv3d(in_channels, out_channels, 4, stride=stride, padding_type=padding_type)
168
+ block.res = ResBlock(out_channels, out_channels, norm_type=norm_type)
169
+ self.conv_blocks.append(block)
170
+ n_times_downsample -= 1
171
+ self.final_block = paddle.nn.Sequential(
172
+ Normalize(out_channels, norm_type),
173
+ SiLU(),
174
+ SamePadConv3d(
175
+ out_channels,
176
+ 2 * z_channels if double_z else z_channels,
177
+ kernel_size=3,
178
+ stride=1,
179
+ padding_type=padding_type,
180
+ ),
181
+ )
182
+ self.out_channels = out_channels
183
+
184
+ def forward(self, x):
185
+ h = self.conv_first(x)
186
+ for block in self.conv_blocks:
187
+ h = block.down(h)
188
+ h = block.res(h)
189
+ h = self.final_block(h)
190
+ return h
191
+
192
+
193
+ class Decoder(paddle.nn.Layer):
194
+ def __init__(self, n_hiddens, upsample, z_channels, image_channel, norm_type="group"):
195
+ super().__init__()
196
+ n_times_upsample = np.array([int(math.log2(d)) for d in upsample])
197
+ max_us = n_times_upsample.max()
198
+ in_channels = z_channels
199
+ self.conv_blocks = paddle.nn.LayerList()
200
+ for i in range(max_us):
201
+ block = paddle.nn.Layer()
202
+ in_channels = in_channels if i == 0 else n_hiddens * 2 ** (max_us - i + 1)
203
+ out_channels = n_hiddens * 2 ** (max_us - i)
204
+ us = tuple([(2 if d > 0 else 1) for d in n_times_upsample])
205
+ block.up = SamePadConvTranspose3d(in_channels, out_channels, 4, stride=us)
206
+ block.res1 = ResBlock(out_channels, out_channels, norm_type=norm_type)
207
+ block.res2 = ResBlock(out_channels, out_channels, norm_type=norm_type)
208
+ self.conv_blocks.append(block)
209
+ n_times_upsample -= 1
210
+ self.conv_out = SamePadConv3d(out_channels, image_channel, kernel_size=3)
211
+
212
+ def forward(self, x):
213
+ h = x
214
+ for i, block in enumerate(self.conv_blocks):
215
+ h = block.up(h)
216
+ h = block.res1(h)
217
+ h = block.res2(h)
218
+ h = self.conv_out(h)
219
+ return h
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/lvdm_distributions.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ import paddle
17
+
18
+
19
+ class AbstractDistribution:
20
+ def sample(self):
21
+ raise NotImplementedError()
22
+
23
+ def mode(self):
24
+ raise NotImplementedError()
25
+
26
+
27
+ class DiracDistribution(AbstractDistribution):
28
+ def __init__(self, value):
29
+ self.value = value
30
+
31
+ def sample(self):
32
+ return self.value
33
+
34
+ def mode(self):
35
+ return self.value
36
+
37
+
38
+ class DiagonalGaussianDistribution(object):
39
+ def __init__(self, parameters, deterministic=False):
40
+ self.parameters = parameters
41
+ self.mean, self.logvar = paddle.chunk(x=parameters, chunks=2, axis=1)
42
+ self.logvar = paddle.clip(x=self.logvar, min=-30.0, max=20.0)
43
+ self.deterministic = deterministic
44
+ self.std = paddle.exp(x=(0.5 * self.logvar).astype("float32"))
45
+ self.var = paddle.exp(x=self.logvar.astype("float32"))
46
+ if self.deterministic:
47
+ self.var = self.std = paddle.zeros_like(x=self.mean)
48
+
49
+ def sample(self, noise=None):
50
+ if noise is None:
51
+ noise = paddle.randn(shape=self.mean.shape)
52
+ x = self.mean + self.std * noise
53
+ return x
54
+
55
+ def kl(self, other=None):
56
+ if self.deterministic:
57
+ return paddle.to_tensor(data=[0.0], dtype="float32")
58
+ elif other is None:
59
+ return 0.5 * paddle.sum(x=paddle.pow(x=self.mean, y=2) + self.var - 1.0 - self.logvar, axis=[1, 2, 3])
60
+ else:
61
+ return 0.5 * paddle.sum(
62
+ x=paddle.pow(x=self.mean - other.mean, y=2) / other.var
63
+ + self.var / other.var
64
+ - 1.0
65
+ - self.logvar
66
+ + other.logvar,
67
+ axis=[1, 2, 3],
68
+ )
69
+
70
+ def nll(self, sample, dims=[1, 2, 3]):
71
+ if self.deterministic:
72
+ return paddle.to_tensor(data=[0.0], dtype="float32")
73
+ logtwopi = np.log(2.0 * np.pi)
74
+ return 0.5 * paddle.sum(x=logtwopi + self.logvar + paddle.pow(x=sample - self.mean, y=2) / self.var, axis=dims)
75
+
76
+ def mode(self):
77
+ return self.mean
78
+
79
+
80
+ def normal_kl(mean1, logvar1, mean2, logvar2):
81
+ """
82
+ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
83
+ Compute the KL divergence between two gaussians.
84
+ Shapes are automatically broadcasted, so batches can be compared to
85
+ scalars, among other use cases.
86
+ """
87
+ tensor = None
88
+ for obj in (mean1, logvar1, mean2, logvar2):
89
+ if isinstance(obj, paddle.Tensor):
90
+ tensor = obj
91
+ break
92
+ assert tensor is not None, "at least one argument must be a Tensor"
93
+ logvar1, logvar2 = [(x if isinstance(x, paddle.Tensor) else paddle.to_tensor(data=x)) for x in (logvar1, logvar2)]
94
+ return 0.5 * (
95
+ -1.0
96
+ + logvar2
97
+ - logvar1
98
+ + paddle.exp(x=(logvar1 - logvar2).astype("float32"))
99
+ + (mean1 - mean2) ** 2 * paddle.exp(x=(-logvar2).astype("float32"))
100
+ )
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/lvdm_unet_3d.py ADDED
@@ -0,0 +1,713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import abstractmethod
16
+ from dataclasses import dataclass
17
+
18
+ import paddle
19
+ from einops import rearrange
20
+ from paddle.distributed.fleet.utils import recompute
21
+
22
+ from ..configuration_utils import ConfigMixin, register_to_config
23
+ from ..utils import BaseOutput
24
+ from .lvdm_attention_temporal import SpatialTemporalTransformer, STAttentionBlock
25
+ from .lvdm_util import (
26
+ avg_pool_nd,
27
+ conv_nd,
28
+ linear,
29
+ nonlinearity,
30
+ normalization,
31
+ timestep_embedding,
32
+ zero_module,
33
+ )
34
+ from .modeling_utils import ModelMixin
35
+
36
+
37
+ @dataclass
38
+ class LVDMUNet3DModelOutput(BaseOutput):
39
+ """
40
+ Args:
41
+ sample (`paddle.Tensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
42
+ Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
43
+ """
44
+
45
+ sample: paddle.Tensor
46
+
47
+
48
+ def convert_module_to_f16(x):
49
+ pass
50
+
51
+
52
+ def convert_module_to_f32(x):
53
+ pass
54
+
55
+
56
+ class TimestepBlock(paddle.nn.Layer):
57
+ """
58
+ Any module where forward() takes timestep embeddings as a second argument.
59
+ """
60
+
61
+ @abstractmethod
62
+ def forward(self, x, emb):
63
+ """
64
+ Apply the module to `x` given `emb` timestep embeddings.
65
+ """
66
+
67
+
68
+ class TimestepEmbedSequential(paddle.nn.Sequential, TimestepBlock):
69
+ """
70
+ A sequential module that passes timestep embeddings to the children that
71
+ support it as an extra input.
72
+ """
73
+
74
+ def forward(self, x, emb, context=None, **kwargs):
75
+ for layer in self:
76
+ if isinstance(layer, TimestepBlock):
77
+ x = layer(x, emb, **kwargs)
78
+ # elif isinstance(layer, STTransformerClass):
79
+ elif isinstance(layer, SpatialTemporalTransformer):
80
+ x = layer(x, context, **kwargs)
81
+ else:
82
+ x = layer(x)
83
+ return x
84
+
85
+
86
+ class Upsample(paddle.nn.Layer):
87
+ """
88
+ An upsampling layer with an optional convolution.
89
+ :param channels: channels in the inputs and outputs.
90
+ :param use_conv: a bool determining if a convolution is applied.
91
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
92
+ upsampling occurs in the inner-two dimensions.
93
+ """
94
+
95
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, kernel_size_t=3, padding_t=1):
96
+ super().__init__()
97
+ self.channels = channels
98
+ self.out_channels = out_channels or channels
99
+ self.use_conv = use_conv
100
+ self.dims = dims
101
+ if use_conv:
102
+ self.conv = conv_nd(
103
+ dims, self.channels, self.out_channels, (kernel_size_t, 3, 3), padding=(padding_t, 1, 1)
104
+ )
105
+
106
+ def forward(self, x):
107
+ assert x.shape[1] == self.channels
108
+ if self.dims == 3:
109
+ x = paddle.nn.functional.interpolate(
110
+ x=x, size=(x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest", data_format="NCDHW"
111
+ )
112
+ else:
113
+ x = paddle.nn.functional.interpolate(x=x, scale_factor=2, mode="nearest")
114
+ if self.use_conv:
115
+ x = self.conv(x)
116
+ return x
117
+
118
+
119
+ class Downsample(paddle.nn.Layer):
120
+ """
121
+ A downsampling layer with an optional convolution.
122
+ :param channels: channels in the inputs and outputs.
123
+ :param use_conv: a bool determining if a convolution is applied.
124
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
125
+ downsampling occurs in the inner-two dimensions.
126
+ """
127
+
128
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, kernel_size_t=3, padding_t=1):
129
+ super().__init__()
130
+ self.channels = channels
131
+ self.out_channels = out_channels or channels
132
+ self.use_conv = use_conv
133
+ self.dims = dims
134
+ stride = 2 if dims != 3 else (1, 2, 2)
135
+ if use_conv:
136
+ self.op = conv_nd(
137
+ dims, self.channels, self.out_channels, (kernel_size_t, 3, 3), stride=stride, padding=(padding_t, 1, 1)
138
+ )
139
+ else:
140
+ assert self.channels == self.out_channels
141
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
142
+
143
+ def forward(self, x):
144
+ assert x.shape[1] == self.channels
145
+ return self.op(x)
146
+
147
+
148
+ class ResBlock(TimestepBlock):
149
+ """
150
+ A residual block that can optionally change the number of channels.
151
+ :param channels: the number of input channels.
152
+ :param emb_channels: the number of timestep embedding channels.
153
+ :param dropout: the rate of dropout.
154
+ :param out_channels: if specified, the number of out channels.
155
+ :param use_conv: if True and out_channels is specified, use a spatial
156
+ convolution instead of a smaller 1x1 convolution to change the
157
+ channels in the skip connection.
158
+ :param dims: determines if the signal is 1D, 2D, or 3D.
159
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
160
+ :param up: if True, use this block for upsampling.
161
+ :param down: if True, use this block for downsampling.
162
+ """
163
+
164
+ def __init__(
165
+ self,
166
+ channels,
167
+ emb_channels,
168
+ dropout,
169
+ out_channels=None,
170
+ use_conv=False,
171
+ use_scale_shift_norm=False,
172
+ dims=2,
173
+ use_checkpoint=False,
174
+ up=False,
175
+ down=False,
176
+ kernel_size_t=3,
177
+ padding_t=1,
178
+ nonlinearity_type="silu",
179
+ **kwargs
180
+ ):
181
+ super().__init__()
182
+ self.channels = channels
183
+ self.emb_channels = emb_channels
184
+ self.dropout = dropout
185
+ self.out_channels = out_channels or channels
186
+ self.use_conv = use_conv
187
+ self.use_checkpoint = use_checkpoint
188
+ self.use_scale_shift_norm = use_scale_shift_norm
189
+ self.nonlinearity_type = nonlinearity_type
190
+ self.in_layers = paddle.nn.Sequential(
191
+ normalization(channels),
192
+ nonlinearity(nonlinearity_type),
193
+ conv_nd(dims, channels, self.out_channels, (kernel_size_t, 3, 3), padding=(padding_t, 1, 1)),
194
+ )
195
+ self.updown = up or down
196
+ if up:
197
+ self.h_upd = Upsample(channels, False, dims, kernel_size_t=kernel_size_t, padding_t=padding_t)
198
+ self.x_upd = Upsample(channels, False, dims, kernel_size_t=kernel_size_t, padding_t=padding_t)
199
+ elif down:
200
+ self.h_upd = Downsample(channels, False, dims, kernel_size_t=kernel_size_t, padding_t=padding_t)
201
+ self.x_upd = Downsample(channels, False, dims, kernel_size_t=kernel_size_t, padding_t=padding_t)
202
+ else:
203
+ self.h_upd = self.x_upd = paddle.nn.Identity()
204
+ self.emb_layers = paddle.nn.Sequential(
205
+ nonlinearity(nonlinearity_type),
206
+ linear(emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels),
207
+ )
208
+ self.out_layers = paddle.nn.Sequential(
209
+ normalization(self.out_channels),
210
+ nonlinearity(nonlinearity_type),
211
+ paddle.nn.Dropout(p=dropout),
212
+ zero_module(
213
+ conv_nd(dims, self.out_channels, self.out_channels, (kernel_size_t, 3, 3), padding=(padding_t, 1, 1))
214
+ ),
215
+ )
216
+ if self.out_channels == channels:
217
+ self.skip_connection = paddle.nn.Identity()
218
+ elif use_conv:
219
+ self.skip_connection = conv_nd(
220
+ dims, channels, self.out_channels, (kernel_size_t, 3, 3), padding=(padding_t, 1, 1)
221
+ )
222
+ else:
223
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
224
+
225
+ def forward(self, x, emb, **kwargs):
226
+ """
227
+ Apply the block to a Tensor, conditioned on a timestep embedding.
228
+ :param x: an [N x C x ...] Tensor of features.
229
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
230
+ :return: an [N x C x ...] Tensor of outputs.
231
+ """
232
+ if self.use_checkpoint:
233
+ return recompute(self._forward, x, emb)
234
+ else:
235
+ return self._forward(x, emb)
236
+
237
+ def _forward(self, x, emb):
238
+ if self.updown:
239
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
240
+ h = in_rest(x)
241
+ h = self.h_upd(h)
242
+ x = self.x_upd(x)
243
+ h = in_conv(h)
244
+ else:
245
+ h = self.in_layers(x)
246
+ emb_out = self.emb_layers(emb).astype(h.dtype)
247
+ if emb_out.dim() == 3:
248
+ emb_out = rearrange(emb_out, "b t c -> b c t")
249
+ while len(emb_out.shape) < h.dim():
250
+ emb_out = emb_out[..., None]
251
+ if self.use_scale_shift_norm:
252
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
253
+ scale, shift = paddle.chunk(x=emb_out, chunks=2, axis=1)
254
+ h = out_norm(h) * (1 + scale) + shift
255
+ h = out_rest(h)
256
+ else:
257
+ h = h + emb_out
258
+ h = self.out_layers(h)
259
+ out = self.skip_connection(x) + h
260
+ return out
261
+
262
+
263
+ # def make_spatialtemporal_transformer(module_name='attention_temporal',
264
+ # class_name='SpatialTemporalTransformer'):
265
+ # module = __import__(f'.lvdm_attention_temporal', fromlist=[
266
+ # class_name])
267
+ # global STTransformerClass
268
+ # STTransformerClass = getattr(module, class_name)
269
+ # return STTransformerClass
270
+
271
+
272
+ def make_spatialtemporal_transformer(module_name="attention_temporal", class_name="SpatialTemporalTransformer"):
273
+ # Todo: Support loading more types of transformers
274
+ assert module_name == "attention_temporal" and class_name == "SpatialTemporalTransformer"
275
+ return SpatialTemporalTransformer
276
+
277
+
278
+ class LVDMUNet3DModel(ModelMixin, ConfigMixin):
279
+ """
280
+ The full UNet model with attention and timestep embedding.
281
+ :param in_channels: channels in the input Tensor.
282
+ :param model_channels: base channel count for the model.
283
+ :param out_channels: channels in the output Tensor.
284
+ :param num_res_blocks: number of residual blocks per downsample.
285
+ :param attention_resolutions: a collection of downsample rates at which
286
+ attention will take place. May be a set, list, or tuple.
287
+ For example, if this contains 4, then at 4x downsampling, attention
288
+ will be used.
289
+ :param dropout: the dropout probability.
290
+ :param channel_mult: channel multiplier for each level of the UNet.
291
+ :param conv_resample: if True, use learned convolutions for upsampling and
292
+ downsampling.
293
+ :param dims: determines if the signal is 1D, 2D, or 3D.
294
+ :param num_classes: if specified (as an int), then this model will be
295
+ class-conditional with `num_classes` classes.
296
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
297
+ :param num_heads: the number of attention heads in each attention layer.
298
+ :param num_heads_channels: if specified, ignore num_heads and instead use
299
+ a fixed channel width per attention head.
300
+ :param num_heads_upsample: works with num_heads to set a different number
301
+ of heads for upsampling. Deprecated.
302
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
303
+ :param resblock_updown: use residual blocks for up/downsampling.
304
+ """
305
+
306
+ @register_to_config
307
+ def __init__(
308
+ self,
309
+ image_size,
310
+ in_channels,
311
+ model_channels,
312
+ out_channels,
313
+ num_res_blocks,
314
+ attention_resolutions,
315
+ dropout=0,
316
+ channel_mult=(1, 2, 4, 8),
317
+ conv_resample=True,
318
+ dims=3,
319
+ num_classes=None,
320
+ use_checkpoint=False,
321
+ use_fp16=False,
322
+ num_heads=-1,
323
+ num_head_channels=-1,
324
+ num_heads_upsample=-1,
325
+ use_scale_shift_norm=False,
326
+ resblock_updown=False,
327
+ transformer_depth=1,
328
+ context_dim=None,
329
+ legacy=True,
330
+ kernel_size_t=1,
331
+ padding_t=1,
332
+ use_temporal_transformer=False,
333
+ temporal_length=None,
334
+ use_relative_position=False,
335
+ nonlinearity_type="silu",
336
+ ST_transformer_module="attention_temporal",
337
+ ST_transformer_class="SpatialTemporalTransformer",
338
+ **kwargs
339
+ ):
340
+ super().__init__()
341
+ if use_temporal_transformer:
342
+ assert (
343
+ context_dim is not None
344
+ ), "Fool!! You forgot to include the dimension of your cross-attention conditioning..."
345
+ if context_dim is not None:
346
+ assert (
347
+ use_temporal_transformer
348
+ ), "Fool!! You forgot to use the temporal transformer for your cross-attention conditioning..."
349
+ from omegaconf.listconfig import ListConfig
350
+
351
+ if type(context_dim) == ListConfig:
352
+ context_dim = list(context_dim)
353
+ if num_heads_upsample == -1:
354
+ num_heads_upsample = num_heads
355
+ if num_heads == -1:
356
+ assert num_head_channels != -1, "Either num_heads or num_head_channels has to be set"
357
+ if num_head_channels == -1:
358
+ assert num_heads != -1, "Either num_heads or num_head_channels has to be set"
359
+ self.image_size = image_size
360
+ self.in_channels = in_channels
361
+ self.model_channels = model_channels
362
+ self.out_channels = out_channels
363
+ self.num_res_blocks = num_res_blocks
364
+ self.attention_resolutions = attention_resolutions
365
+ self.dropout = dropout
366
+ self.channel_mult = channel_mult
367
+ self.conv_resample = conv_resample
368
+ self.num_classes = num_classes
369
+ self.use_checkpoint = use_checkpoint
370
+ # Todo: support customted self.dtype
371
+ # self.dtype = 'float16' if use_fp16 else 'float32'
372
+ self.num_heads = num_heads
373
+ self.num_head_channels = num_head_channels
374
+ self.num_heads_upsample = num_heads_upsample
375
+ self.use_relative_position = use_relative_position
376
+ self.temporal_length = temporal_length
377
+ self.nonlinearity_type = nonlinearity_type
378
+ time_embed_dim = model_channels * 4
379
+ self.time_embed_dim = time_embed_dim
380
+ self.time_embed = paddle.nn.Sequential(
381
+ linear(model_channels, time_embed_dim),
382
+ nonlinearity(nonlinearity_type),
383
+ linear(time_embed_dim, time_embed_dim),
384
+ )
385
+ if self.num_classes is not None:
386
+ self.label_emb = paddle.nn.Embedding(num_classes, time_embed_dim)
387
+ STTransformerClass = make_spatialtemporal_transformer(
388
+ module_name=ST_transformer_module, class_name=ST_transformer_class
389
+ )
390
+ self.input_blocks = paddle.nn.LayerList(
391
+ sublayers=[
392
+ TimestepEmbedSequential(
393
+ conv_nd(dims, in_channels, model_channels, (kernel_size_t, 3, 3), padding=(padding_t, 1, 1))
394
+ )
395
+ ]
396
+ )
397
+ self._feature_size = model_channels
398
+ input_block_chans = [model_channels]
399
+ ch = model_channels
400
+ ds = 1
401
+ for level, mult in enumerate(channel_mult):
402
+ for _ in range(num_res_blocks):
403
+ layers = [
404
+ ResBlock(
405
+ ch,
406
+ time_embed_dim,
407
+ dropout,
408
+ out_channels=mult * model_channels,
409
+ dims=dims,
410
+ use_checkpoint=use_checkpoint,
411
+ use_scale_shift_norm=use_scale_shift_norm,
412
+ kernel_size_t=kernel_size_t,
413
+ padding_t=padding_t,
414
+ nonlinearity_type=nonlinearity_type,
415
+ **kwargs,
416
+ )
417
+ ]
418
+ ch = mult * model_channels
419
+ if ds in attention_resolutions:
420
+ if num_head_channels == -1:
421
+ dim_head = ch // num_heads
422
+ else:
423
+ num_heads = ch // num_head_channels
424
+ dim_head = num_head_channels
425
+ if legacy:
426
+ dim_head = ch // num_heads if use_temporal_transformer else num_head_channels
427
+ layers.append(
428
+ STAttentionBlock(
429
+ ch,
430
+ use_checkpoint=use_checkpoint,
431
+ num_heads=num_heads,
432
+ num_head_channels=dim_head,
433
+ temporal_length=temporal_length,
434
+ use_relative_position=use_relative_position,
435
+ )
436
+ if not use_temporal_transformer
437
+ else STTransformerClass(
438
+ ch,
439
+ num_heads,
440
+ dim_head,
441
+ depth=transformer_depth,
442
+ context_dim=context_dim,
443
+ temporal_length=temporal_length,
444
+ use_relative_position=use_relative_position,
445
+ **kwargs,
446
+ )
447
+ )
448
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
449
+ self._feature_size += ch
450
+ input_block_chans.append(ch)
451
+ if level != len(channel_mult) - 1:
452
+ out_ch = ch
453
+ self.input_blocks.append(
454
+ TimestepEmbedSequential(
455
+ ResBlock(
456
+ ch,
457
+ time_embed_dim,
458
+ dropout,
459
+ out_channels=out_ch,
460
+ dims=dims,
461
+ use_checkpoint=use_checkpoint,
462
+ use_scale_shift_norm=use_scale_shift_norm,
463
+ down=True,
464
+ kernel_size_t=kernel_size_t,
465
+ padding_t=padding_t,
466
+ nonlinearity_type=nonlinearity_type,
467
+ **kwargs,
468
+ )
469
+ if resblock_updown
470
+ else Downsample(
471
+ ch,
472
+ conv_resample,
473
+ dims=dims,
474
+ out_channels=out_ch,
475
+ kernel_size_t=kernel_size_t,
476
+ padding_t=padding_t,
477
+ )
478
+ )
479
+ )
480
+ ch = out_ch
481
+ input_block_chans.append(ch)
482
+ ds *= 2
483
+ self._feature_size += ch
484
+ if num_head_channels == -1:
485
+ dim_head = ch // num_heads
486
+ else:
487
+ num_heads = ch // num_head_channels
488
+ dim_head = num_head_channels
489
+ if legacy:
490
+ dim_head = ch // num_heads if use_temporal_transformer else num_head_channels
491
+ self.middle_block = TimestepEmbedSequential(
492
+ ResBlock(
493
+ ch,
494
+ time_embed_dim,
495
+ dropout,
496
+ dims=dims,
497
+ use_checkpoint=use_checkpoint,
498
+ use_scale_shift_norm=use_scale_shift_norm,
499
+ kernel_size_t=kernel_size_t,
500
+ padding_t=padding_t,
501
+ nonlinearity_type=nonlinearity_type,
502
+ **kwargs,
503
+ ),
504
+ STAttentionBlock(
505
+ ch,
506
+ use_checkpoint=use_checkpoint,
507
+ num_heads=num_heads,
508
+ num_head_channels=dim_head,
509
+ temporal_length=temporal_length,
510
+ use_relative_position=use_relative_position,
511
+ )
512
+ if not use_temporal_transformer
513
+ else STTransformerClass(
514
+ ch,
515
+ num_heads,
516
+ dim_head,
517
+ depth=transformer_depth,
518
+ context_dim=context_dim,
519
+ temporal_length=temporal_length,
520
+ use_relative_position=use_relative_position,
521
+ **kwargs,
522
+ ),
523
+ ResBlock(
524
+ ch,
525
+ time_embed_dim,
526
+ dropout,
527
+ dims=dims,
528
+ use_checkpoint=use_checkpoint,
529
+ use_scale_shift_norm=use_scale_shift_norm,
530
+ kernel_size_t=kernel_size_t,
531
+ padding_t=padding_t,
532
+ nonlinearity_type=nonlinearity_type,
533
+ **kwargs,
534
+ ),
535
+ )
536
+ self._feature_size += ch
537
+ self.output_blocks = paddle.nn.LayerList(sublayers=[])
538
+ for level, mult in list(enumerate(channel_mult))[::-1]:
539
+ for i in range(num_res_blocks + 1):
540
+ ich = input_block_chans.pop()
541
+ layers = [
542
+ ResBlock(
543
+ ch + ich,
544
+ time_embed_dim,
545
+ dropout,
546
+ out_channels=model_channels * mult,
547
+ dims=dims,
548
+ use_checkpoint=use_checkpoint,
549
+ use_scale_shift_norm=use_scale_shift_norm,
550
+ kernel_size_t=kernel_size_t,
551
+ padding_t=padding_t,
552
+ nonlinearity_type=nonlinearity_type,
553
+ **kwargs,
554
+ )
555
+ ]
556
+ ch = model_channels * mult
557
+ if ds in attention_resolutions:
558
+ if num_head_channels == -1:
559
+ dim_head = ch // num_heads
560
+ else:
561
+ num_heads = ch // num_head_channels
562
+ dim_head = num_head_channels
563
+ if legacy:
564
+ dim_head = ch // num_heads if use_temporal_transformer else num_head_channels
565
+ layers.append(
566
+ STAttentionBlock(
567
+ ch,
568
+ use_checkpoint=use_checkpoint,
569
+ num_heads=num_heads,
570
+ num_head_channels=dim_head,
571
+ temporal_length=temporal_length,
572
+ use_relative_position=use_relative_position,
573
+ )
574
+ if not use_temporal_transformer
575
+ else STTransformerClass(
576
+ ch,
577
+ num_heads,
578
+ dim_head,
579
+ depth=transformer_depth,
580
+ context_dim=context_dim,
581
+ temporal_length=temporal_length,
582
+ use_relative_position=use_relative_position,
583
+ **kwargs,
584
+ )
585
+ )
586
+ if level and i == num_res_blocks:
587
+ out_ch = ch
588
+ layers.append(
589
+ ResBlock(
590
+ ch,
591
+ time_embed_dim,
592
+ dropout,
593
+ out_channels=out_ch,
594
+ dims=dims,
595
+ use_checkpoint=use_checkpoint,
596
+ use_scale_shift_norm=use_scale_shift_norm,
597
+ up=True,
598
+ kernel_size_t=kernel_size_t,
599
+ padding_t=padding_t,
600
+ nonlinearity_type=nonlinearity_type,
601
+ **kwargs,
602
+ )
603
+ if resblock_updown
604
+ else Upsample(
605
+ ch,
606
+ conv_resample,
607
+ dims=dims,
608
+ out_channels=out_ch,
609
+ kernel_size_t=kernel_size_t,
610
+ padding_t=padding_t,
611
+ )
612
+ )
613
+ ds //= 2
614
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
615
+ self._feature_size += ch
616
+ self.out = paddle.nn.Sequential(
617
+ normalization(ch),
618
+ nonlinearity(nonlinearity_type),
619
+ zero_module(conv_nd(dims, model_channels, out_channels, (kernel_size_t, 3, 3), padding=(padding_t, 1, 1))),
620
+ )
621
+
622
+ def convert_to_fp16(self):
623
+ """
624
+ Convert the torso of the model to float16.
625
+ """
626
+ self.input_blocks.apply(fn=convert_module_to_f16)
627
+ self.middle_block.apply(fn=convert_module_to_f16)
628
+ self.output_blocks.apply(fn=convert_module_to_f16)
629
+
630
+ def convert_to_fp32(self):
631
+ """
632
+ Convert the torso of the model to float32.
633
+ """
634
+ self.input_blocks.apply(fn=convert_module_to_f32)
635
+ self.middle_block.apply(fn=convert_module_to_f32)
636
+ self.output_blocks.apply(fn=convert_module_to_f32)
637
+
638
+ def forward(self, x, timesteps=None, time_emb_replace=None, context=None, y=None, **kwargs):
639
+ """
640
+ Apply the model to an input batch.
641
+ :param x: an [N x C x ...] Tensor of inputs.
642
+ :param timesteps: a 1-D batch of timesteps.
643
+ :param context: conditioning plugged in via crossattn
644
+ :param y: an [N] Tensor of labels, if class-conditional.
645
+ :return: an [N x C x ...] Tensor of outputs.
646
+ """
647
+ # Fix 0D tensor bug
648
+ if timesteps.ndim == 0:
649
+ timesteps = timesteps.unsqueeze(0)
650
+ hs = []
651
+ if time_emb_replace is None:
652
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
653
+ emb = self.time_embed(t_emb)
654
+ else:
655
+ emb = time_emb_replace
656
+ if y is not None:
657
+ assert y.shape == (x.shape[0],)
658
+ emb = emb + self.label_emb(y)
659
+ h = x.astype(self.dtype)
660
+ for module in self.input_blocks:
661
+ h = module(h, emb, context, **kwargs)
662
+ hs.append(h)
663
+ h = self.middle_block(h, emb, context, **kwargs)
664
+ for module in self.output_blocks:
665
+ h = paddle.concat(x=[h, hs.pop()], axis=1)
666
+ h = module(h, emb, context, **kwargs)
667
+ h = h.astype(x.dtype)
668
+ h = self.out(h)
669
+
670
+ return LVDMUNet3DModelOutput(sample=h)
671
+
672
+
673
+ class FrameInterpPredUNet(LVDMUNet3DModel):
674
+ """
675
+ A Unet for unconditional generation, frame prediction and interpolation.
676
+ may need to input `mask` to indicate condition, as well as noise level `s` for condition augmentation.
677
+ """
678
+
679
+ def __init__(self, image_size, in_channels, cond_aug_mode=None, *args, **kwargs):
680
+ super().__init__(image_size, in_channels, *args, **kwargs)
681
+ if cond_aug_mode == "time_embed":
682
+ self.time_embed_cond = paddle.nn.Sequential(
683
+ linear(self.model_channels, self.time_embed_dim),
684
+ nonlinearity(self.nonlinearity_type),
685
+ linear(self.time_embed_dim, self.time_embed_dim),
686
+ )
687
+ elif cond_aug_mode == "learned_embed":
688
+ pass
689
+
690
+ def forward(self, x, timesteps, context=None, y=None, s=None, mask=None, **kwargs):
691
+ # Fix 0D tensor bug
692
+ if timesteps.ndim == 0:
693
+ timesteps = timesteps.unsqueeze(0)
694
+ if s is not None:
695
+ s_emb = timestep_embedding(s, self.model_channels, repeat_only=False)
696
+ s_emb = self.time_embed_cond(s_emb)
697
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
698
+ emb = self.time_embed(t_emb)
699
+ assert emb.dim() == 2
700
+ mask_ = mask[:, :, :, (0), (0)]
701
+ t = mask.shape[2]
702
+ emb_mix = (
703
+ emb.unsqueeze(axis=2).tile(repeat_times=[1, 1, t]) * (1 - mask_)
704
+ + s_emb.unsqueeze(axis=2).tile(repeat_times=[1, 1, t]) * mask_
705
+ )
706
+ assert emb_mix.dim() == 3
707
+ emb_mix = rearrange(emb_mix, "b c t -> b t c")
708
+ time_emb_replace = emb_mix
709
+ timesteps = None
710
+ else:
711
+ time_emb_replace = None
712
+ timesteps = timesteps
713
+ return super().forward(x, timesteps, time_emb_replace=time_emb_replace, context=context, y=y, **kwargs)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/modeling_outputs.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+
17
+ from ..utils import BaseOutput
18
+
19
+
20
+ @dataclass
21
+ class AutoencoderKLOutput(BaseOutput):
22
+ """
23
+ Output of AutoencoderKL encoding method.
24
+
25
+ Args:
26
+ latent_dist (`DiagonalGaussianDistribution`):
27
+ Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
28
+ `DiagonalGaussianDistribution` allows for sampling latents from the distribution.
29
+ """
30
+
31
+ latent_dist: "DiagonalGaussianDistribution" # noqa: F821
32
+
33
+ @dataclass
34
+ class Transformer2DModelOutput(BaseOutput):
35
+ """
36
+ The output of [`Transformer2DModel`].
37
+
38
+ Args:
39
+ sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
40
+ The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
41
+ distributions for the unnoised latent pixels.
42
+ """
43
+
44
+ sample: "paddle.Tensor" # noqa: F821
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/modelscope_autoencoder_img2vid.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from dataclasses import dataclass
16
+ from typing import Union
17
+
18
+ import paddle
19
+ import paddle.nn as nn
20
+ import paddle.nn.functional as F
21
+
22
+ from ..configuration_utils import ConfigMixin, register_to_config
23
+ from ..loaders import FromOriginalVAEMixin
24
+ from ..utils import BaseOutput, apply_forward_hook
25
+ from .modeling_utils import ModelMixin
26
+ from .vae import DecoderOutput, DiagonalGaussianDistribution
27
+
28
+
29
+ @paddle.no_grad()
30
+ def get_first_stage_encoding(encoder_posterior):
31
+ scale_factor = 0.18215
32
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
33
+ z = encoder_posterior.sample()
34
+ elif isinstance(encoder_posterior, paddle.Tensor):
35
+ z = encoder_posterior
36
+ else:
37
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
38
+ return scale_factor * z
39
+
40
+
41
+ @dataclass
42
+ class AutoencoderKLOutput(BaseOutput):
43
+ """
44
+ Output of AutoencoderKL encoding method.
45
+
46
+ Args:
47
+ latent_dist (`DiagonalGaussianDistribution`):
48
+ Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
49
+ `DiagonalGaussianDistribution` allows for sampling latents from the distribution.
50
+ """
51
+
52
+ latent_dist: DiagonalGaussianDistribution
53
+
54
+
55
+ def Normalize(in_channels, num_groups=32):
56
+ return paddle.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, epsilon=1e-6)
57
+
58
+
59
+ def nonlinearity(x):
60
+ # swish
61
+ return x * F.sigmoid(x)
62
+
63
+
64
+ class ResnetBlock(nn.Layer):
65
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512):
66
+ super().__init__()
67
+ self.in_channels = in_channels
68
+ out_channels = in_channels if out_channels is None else out_channels
69
+ self.out_channels = out_channels
70
+ self.use_conv_shortcut = conv_shortcut
71
+
72
+ self.norm1 = Normalize(in_channels)
73
+ self.conv1 = paddle.nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
74
+ if temb_channels > 0:
75
+ self.temb_proj = paddle.nn.Linear(temb_channels, out_channels)
76
+ self.norm2 = Normalize(out_channels)
77
+ self.dropout = paddle.nn.Dropout(dropout)
78
+ self.conv2 = paddle.nn.Conv2D(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
79
+ if self.in_channels != self.out_channels:
80
+ if self.use_conv_shortcut:
81
+ self.conv_shortcut = paddle.nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
82
+ else:
83
+ self.nin_shortcut = paddle.nn.Conv2D(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
84
+
85
+ def forward(self, x, temb):
86
+ h = x
87
+ h = self.norm1(h)
88
+ h = nonlinearity(h)
89
+ h = self.conv1(h)
90
+
91
+ if temb is not None:
92
+ h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
93
+
94
+ h = self.norm2(h)
95
+ h = nonlinearity(h)
96
+ h = self.dropout(h)
97
+ h = self.conv2(h)
98
+
99
+ if self.in_channels != self.out_channels:
100
+ if self.use_conv_shortcut:
101
+ x = self.conv_shortcut(x)
102
+ else:
103
+ x = self.nin_shortcut(x)
104
+
105
+ return x + h
106
+
107
+
108
+ class AttnBlock(nn.Layer):
109
+ def __init__(self, in_channels):
110
+ super().__init__()
111
+ self.in_channels = in_channels
112
+
113
+ self.norm = Normalize(in_channels)
114
+ self.q = paddle.nn.Conv2D(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
115
+ self.k = paddle.nn.Conv2D(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
116
+ self.v = paddle.nn.Conv2D(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
117
+ self.proj_out = paddle.nn.Conv2D(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
118
+
119
+ def forward(self, x):
120
+ h_ = x
121
+ h_ = self.norm(h_)
122
+ q = self.q(h_)
123
+ k = self.k(h_)
124
+ v = self.v(h_)
125
+
126
+ # compute attention
127
+ b, c, h, w = q.shape
128
+ q = q.reshape([b, c, h * w])
129
+ q = q.transpose([0, 2, 1])
130
+ k = k.reshape([b, c, h * w])
131
+ w_ = paddle.bmm(q, k)
132
+ w_ = w_ * (int(c) ** (-0.5))
133
+ w_ = F.softmax(w_, axis=2)
134
+
135
+ # attend to values
136
+ v = v.reshape([b, c, h * w])
137
+ w_ = w_.transpose([0, 2, 1])
138
+ h_ = paddle.bmm(v, w_)
139
+ h_ = h_.reshape([b, c, h, w])
140
+
141
+ h_ = self.proj_out(h_)
142
+
143
+ return x + h_
144
+
145
+
146
+ class Upsample(nn.Layer):
147
+ def __init__(self, in_channels, with_conv):
148
+ super().__init__()
149
+ self.with_conv = with_conv
150
+ if self.with_conv:
151
+ self.conv = paddle.nn.Conv2D(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
152
+
153
+ def forward(self, x):
154
+ x = F.interpolate(x, scale_factor=2.0, mode="nearest")
155
+ if self.with_conv:
156
+ x = self.conv(x)
157
+ return x
158
+
159
+
160
+ class Downsample(nn.Layer):
161
+ def __init__(self, in_channels, with_conv):
162
+ super().__init__()
163
+ self.with_conv = with_conv
164
+ if self.with_conv:
165
+ # no asymmetric padding in torch conv, must do it ourselves
166
+ self.conv = paddle.nn.Conv2D(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
167
+
168
+ def forward(self, x):
169
+ if self.with_conv:
170
+ pad = (0, 1, 0, 1)
171
+ x = F.pad(x, pad, mode="constant", value=0)
172
+ x = self.conv(x)
173
+ else:
174
+ x = F.avg_pool2d(x, kernel_size=2, stride=2)
175
+ return x
176
+
177
+
178
+ class Encoder(nn.Layer):
179
+ def __init__(
180
+ self,
181
+ ch,
182
+ out_ch,
183
+ ch_mult,
184
+ num_res_blocks,
185
+ attn_resolutions,
186
+ in_channels,
187
+ resolution,
188
+ z_channels,
189
+ dropout=0.0,
190
+ resamp_with_conv=True,
191
+ double_z=True,
192
+ use_linear_attn=False,
193
+ attn_type="vanilla",
194
+ **ignore_kwargs
195
+ ):
196
+ super().__init__()
197
+ self.ch = ch
198
+ self.temb_ch = 0
199
+ self.num_resolutions = len(ch_mult)
200
+ self.num_res_blocks = num_res_blocks
201
+ self.resolution = resolution
202
+ self.in_channels = in_channels
203
+
204
+ # downsampling
205
+ self.conv_in = paddle.nn.Conv2D(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
206
+
207
+ curr_res = resolution
208
+ in_ch_mult = (1,) + tuple(ch_mult)
209
+ self.in_ch_mult = in_ch_mult
210
+ self.down = nn.LayerList()
211
+ for i_level in range(self.num_resolutions):
212
+ block = nn.LayerList()
213
+ attn = nn.LayerList()
214
+ block_in = ch * in_ch_mult[i_level]
215
+ block_out = ch * ch_mult[i_level]
216
+ for i_block in range(self.num_res_blocks):
217
+ block.append(
218
+ ResnetBlock(
219
+ in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout
220
+ )
221
+ )
222
+ block_in = block_out
223
+ if curr_res in attn_resolutions:
224
+ attn.append(AttnBlock(block_in))
225
+ down = nn.Layer()
226
+ down.block = block
227
+ down.attn = attn
228
+ if i_level != self.num_resolutions - 1:
229
+ down.downsample = Downsample(block_in, resamp_with_conv)
230
+ curr_res = curr_res // 2
231
+ self.down.append(down)
232
+
233
+ # middle
234
+ self.mid = nn.Layer()
235
+ self.mid.block_1 = ResnetBlock(
236
+ in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout
237
+ )
238
+ self.mid.attn_1 = AttnBlock(block_in)
239
+ self.mid.block_2 = ResnetBlock(
240
+ in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout
241
+ )
242
+
243
+ # end
244
+ self.norm_out = Normalize(block_in)
245
+ self.conv_out = paddle.nn.Conv2D(
246
+ block_in, 2 * z_channels if double_z else z_channels, kernel_size=3, stride=1, padding=1
247
+ )
248
+
249
+ def forward(self, x):
250
+ # timestep embedding
251
+ temb = None
252
+
253
+ # downsampling
254
+ hs = [self.conv_in(x)]
255
+ for i_level in range(self.num_resolutions):
256
+ for i_block in range(self.num_res_blocks):
257
+ h = self.down[i_level].block[i_block](hs[-1], temb)
258
+ if len(self.down[i_level].attn) > 0:
259
+ h = self.down[i_level].attn[i_block](h)
260
+ hs.append(h)
261
+ if i_level != self.num_resolutions - 1:
262
+ hs.append(self.down[i_level].downsample(hs[-1]))
263
+
264
+ # middle
265
+ h = hs[-1]
266
+ h = self.mid.block_1(h, temb)
267
+ h = self.mid.attn_1(h)
268
+ h = self.mid.block_2(h, temb)
269
+
270
+ # end
271
+ h = self.norm_out(h)
272
+ h = nonlinearity(h)
273
+ h = self.conv_out(h)
274
+ return h
275
+
276
+
277
+ class Decoder(nn.Layer):
278
+ def __init__(
279
+ self,
280
+ ch,
281
+ out_ch,
282
+ ch_mult,
283
+ num_res_blocks,
284
+ attn_resolutions,
285
+ in_channels,
286
+ resolution,
287
+ z_channels,
288
+ resamp_with_conv=True,
289
+ dropout=0.0,
290
+ give_pre_end=False,
291
+ tanh_out=False,
292
+ use_linear_attn=False,
293
+ attn_type="vanilla",
294
+ **ignorekwargs
295
+ ):
296
+ super().__init__()
297
+ self.ch = ch
298
+ self.temb_ch = 0
299
+ self.num_resolutions = len(ch_mult)
300
+ self.num_res_blocks = num_res_blocks
301
+ self.resolution = resolution
302
+ self.in_channels = in_channels
303
+ self.give_pre_end = give_pre_end
304
+ self.tanh_out = tanh_out
305
+
306
+ # compute block_in and curr_res at lowest res
307
+ block_in = ch * ch_mult[self.num_resolutions - 1]
308
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
309
+ self.z_shape = (1, z_channels, curr_res, curr_res)
310
+
311
+ # z to block_in
312
+ self.conv_in = paddle.nn.Conv2D(z_channels, block_in, kernel_size=3, stride=1, padding=1)
313
+
314
+ # middle
315
+ self.mid = nn.Layer()
316
+ self.mid.block_1 = ResnetBlock(
317
+ in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout
318
+ )
319
+ self.mid.attn_1 = AttnBlock(block_in)
320
+ self.mid.block_2 = ResnetBlock(
321
+ in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout
322
+ )
323
+
324
+ # upsampling
325
+ self.up = nn.LayerList()
326
+ for i_level in reversed(range(self.num_resolutions)):
327
+ block = nn.LayerList()
328
+ attn = nn.LayerList()
329
+ block_out = ch * ch_mult[i_level]
330
+ for i_block in range(self.num_res_blocks + 1):
331
+ block.append(
332
+ ResnetBlock(
333
+ in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout
334
+ )
335
+ )
336
+ block_in = block_out
337
+ if curr_res in attn_resolutions:
338
+ attn.append(AttnBlock(block_in))
339
+ up = nn.Layer()
340
+ up.block = block
341
+ up.attn = attn
342
+ if i_level != 0:
343
+ up.upsample = Upsample(block_in, resamp_with_conv)
344
+ curr_res = curr_res * 2
345
+ if len(self.up) == 0:
346
+ self.up.append(up)
347
+ else:
348
+ self.up.insert(0, up)
349
+
350
+ # end
351
+ self.norm_out = Normalize(block_in)
352
+ self.conv_out = paddle.nn.Conv2D(block_in, out_ch, kernel_size=3, stride=1, padding=1)
353
+
354
+ def forward(self, z):
355
+ self.last_z_shape = z.shape
356
+
357
+ # timestep embedding
358
+ temb = None
359
+
360
+ # z to block_in
361
+ h = self.conv_in(z)
362
+
363
+ # middle
364
+ h = self.mid.block_1(h, temb)
365
+ h = self.mid.attn_1(h)
366
+ h = self.mid.block_2(h, temb)
367
+
368
+ # upsampling
369
+ for i_level in reversed(range(self.num_resolutions)):
370
+ for i_block in range(self.num_res_blocks + 1):
371
+ h = self.up[i_level].block[i_block](h, temb)
372
+ if len(self.up[i_level].attn) > 0:
373
+ h = self.up[i_level].attn[i_block](h)
374
+ if i_level != 0:
375
+ h = self.up[i_level].upsample(h)
376
+
377
+ # end
378
+ if self.give_pre_end:
379
+ return h
380
+
381
+ h = self.norm_out(h)
382
+ h = nonlinearity(h)
383
+ h = self.conv_out(h)
384
+ if self.tanh_out:
385
+ h = paddle.tanh(h)
386
+ return h
387
+
388
+
389
+ class AutoencoderKL_imgtovideo(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
390
+ """img to video AutoencoderKL"""
391
+
392
+ _supports_gradient_checkpointing = True
393
+
394
+ @register_to_config
395
+ def __init__(
396
+ self,
397
+ ch,
398
+ out_ch,
399
+ in_channels,
400
+ resolution,
401
+ z_channels,
402
+ embed_dim,
403
+ attn_resolutions,
404
+ ch_mult=(1, 2, 4, 8),
405
+ num_res_blocks=2,
406
+ dropout=0.0,
407
+ resamp_with_conv=True,
408
+ double_z=True,
409
+ use_linear_attn=False,
410
+ attn_type="vanilla",
411
+ pretrained=None,
412
+ ignore_keys=[],
413
+ image_key="image",
414
+ colorize_nlabels=None,
415
+ monitor=None,
416
+ ema_decay=None,
417
+ learn_logvar=False,
418
+ **kwargs
419
+ ):
420
+ super().__init__()
421
+ self.learn_logvar = learn_logvar
422
+ self.image_key = image_key
423
+ self.encoder = Encoder(
424
+ ch=ch,
425
+ out_ch=out_ch,
426
+ ch_mult=ch_mult,
427
+ num_res_blocks=num_res_blocks,
428
+ attn_resolutions=attn_resolutions,
429
+ in_channels=in_channels,
430
+ resolution=resolution,
431
+ z_channels=z_channels,
432
+ double_z=double_z,
433
+ )
434
+ self.decoder = Decoder(
435
+ ch=ch,
436
+ out_ch=out_ch,
437
+ ch_mult=ch_mult,
438
+ num_res_blocks=num_res_blocks,
439
+ attn_resolutions=attn_resolutions,
440
+ in_channels=in_channels,
441
+ resolution=resolution,
442
+ z_channels=z_channels,
443
+ )
444
+
445
+ self.quant_conv = paddle.nn.Conv2D(2 * z_channels, 2 * embed_dim, 1)
446
+ self.post_quant_conv = paddle.nn.Conv2D(embed_dim, z_channels, 1)
447
+ self.embed_dim = embed_dim
448
+ if colorize_nlabels is not None:
449
+ assert type(colorize_nlabels) == int
450
+ self.register_buffer("colorize", paddle.randn([3, colorize_nlabels, 1, 1]))
451
+ if monitor is not None:
452
+ self.monitor = monitor
453
+
454
+ self.use_ema = ema_decay is not None
455
+
456
+ @apply_forward_hook
457
+ def encode(self, x: paddle.Tensor, return_dict: bool = True) -> AutoencoderKLOutput:
458
+ h = self.encoder(x)
459
+ moments = self.quant_conv(h)
460
+ posterior = DiagonalGaussianDistribution(moments)
461
+
462
+ if not return_dict:
463
+ return (posterior,)
464
+
465
+ return AutoencoderKLOutput(latent_dist=posterior)
466
+
467
+ @apply_forward_hook
468
+ def decode(self, z: paddle.Tensor, return_dict: bool = True) -> Union[DecoderOutput, paddle.Tensor]:
469
+ z = self.post_quant_conv(z)
470
+ decoded = self.decoder(z)
471
+
472
+ if not return_dict:
473
+ return (decoded,)
474
+
475
+ return DecoderOutput(sample=decoded)
476
+
477
+ def forward(self, input, sample_posterior=True, return_dict: bool = True) -> Union[DecoderOutput, paddle.Tensor]:
478
+ posterior = self.encode(input).latent_dist
479
+ if sample_posterior:
480
+ z = posterior.sample()
481
+ else:
482
+ z = posterior.mode()
483
+ dec = self.decode(z).sample
484
+ if not return_dict:
485
+ return (dec,)
486
+
487
+ return DecoderOutput(sample=dec)
VLMEvalKit_old/PaddleMIX/ppdiffusers/ppdiffusers/models/paddleinfer_runtime.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import shutil
18
+ from pathlib import Path
19
+ from typing import Dict, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ..configuration_utils import FrozenDict
24
+ from ..utils import (
25
+ DIFFUSERS_CACHE,
26
+ FROM_HF_HUB,
27
+ HF_HUB_OFFLINE,
28
+ PADDLE_INFER_MODEL_NAME,
29
+ PADDLE_INFER_WEIGHTS_NAME,
30
+ PPDIFFUSERS_CACHE,
31
+ _add_variant,
32
+ _get_model_file,
33
+ is_paddle_available,
34
+ logging,
35
+ )
36
+ from ..version import VERSION as __version__
37
+
38
+ __all__ = ["PaddleInferRuntimeModel"]
39
+
40
+ if is_paddle_available():
41
+ import paddle
42
+ import paddle.inference as paddle_infer
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class PaddleInferRuntimeModel:
48
+ def __init__(self, model=None, config=None, **kwargs):
49
+ logger.info("ppdiffusers.PaddleInferRuntimeModel")
50
+ self.model = model
51
+ self.config = config
52
+ self.model_save_dir = kwargs.get("model_save_dir", None)
53
+ self.latest_model_name = kwargs.get("latest_model_name", None)
54
+ self.latest_params_name = kwargs.get("latest_params_name", None)
55
+ if self.latest_model_name is None:
56
+ self.latest_model_name = PADDLE_INFER_MODEL_NAME
57
+ if self.latest_params_name is None:
58
+ self.latest_params_name = PADDLE_INFER_WEIGHTS_NAME
59
+
60
+ def __call__(self, **kwargs):
61
+ kwargs.pop("output_shape", None)
62
+ kwargs.pop("infer_op", None)
63
+ inputs = {}
64
+ # for k, v in kwargs.items():
65
+ # if k == "timestep":
66
+ # if v.ndim == 0:
67
+ # # fix 0D tensor error
68
+ # v = v.reshape((1,))
69
+ # # fix dtype error
70
+ # v = v.astype("float32")
71
+ # inputs[k] = v
72
+ # input_names = self.model.get_input_names()
73
+ # for i, name in enumerate(input_names):
74
+ # input_tensor = self.model.get_input_handle(name)
75
+ # if name not in inputs:
76
+ # raise ValueError(f"Input {name} is not in the model.")
77
+ # if isinstance(inputs[name], int):
78
+ # inputs[name] = paddle.to_tensor(inputs[name])
79
+ # if inputs[name].ndim == 0: # fix 0D tensor error
80
+ # inputs[name] = inputs[name].reshape((1,))
81
+ # logger.warning(f"Input {name} is 0D tensor, reshape to (1,)")
82
+ # # if not isinstance(input_tensor, paddle.Tensor):
83
+ # # input_tensor = paddle.to_tensor(input_tensor)
84
+ # # logger.warning(f"Input {name} is not paddle tensor, convert to paddle tensor")
85
+ # # if input_tensor.ndim == 0: # fix 0D tensor error
86
+ # # input_tensor = input_tensor.reshape((1,))
87
+ # input_tensor.reshape(inputs[name].shape)
88
+ # input_tensor.copy_from_cpu(inputs[name].numpy())
89
+ # # do the inference
90
+ # self.model.run()
91
+ # results = []
92
+ # # get out data from output tensor
93
+ # output_names = self.model.get_output_names()
94
+ # for i, name in enumerate(output_names):
95
+ # output_tensor = self.model.get_output_handle(name)
96
+ # output_data = output_tensor.copy_to_cpu()
97
+ # results.append(paddle.to_tensor(output_data))
98
+ # return results
99
+ for k, v in kwargs.items():
100
+ if isinstance(v, int):
101
+ v = paddle.to_tensor(v)
102
+ if k == "timestep" or k == "num_frames":
103
+ if v.ndim == 0:
104
+ # fix 0D tensor error
105
+ v = v.reshape((1,))
106
+ # fix dtype error
107
+ v = v.astype("float32")
108
+ if isinstance(v, np.ndarray):
109
+ v = paddle.to_tensor(v)
110
+ inputs[k] = v
111
+ input_list = []
112
+ input_names = self.model.get_input_names()
113
+ for i, name in enumerate(input_names):
114
+ if name not in inputs:
115
+ raise ValueError(f"Input {name} is not in the model.")
116
+ input_list.append(inputs[name])
117
+ # do the inference (zero copy)
118
+ self.model.run(input_list)
119
+ results = []
120
+ # get out data from output tensor
121
+ output_names = self.model.get_output_names()
122
+ for i, name in enumerate(output_names):
123
+ output_tensor = self.model.get_output_handle(name)
124
+ output_data = output_tensor.copy_to_cpu()
125
+ results.append(paddle.to_tensor(output_data))
126
+ return results
127
+
128
+ @staticmethod
129
+ def load_model(
130
+ model_path: Union[str, Path],
131
+ params_path: Union[str, Path] = None,
132
+ use_optim_cache: bool = False,
133
+ infer_config: Optional["paddle_infer.Congig"] = None,
134
+ ):
135
+ """
136
+ Loads an FastDeploy Inference Model with fastdeploy.RuntimeOption
137
+ Arguments:
138
+ model_path (`str` or `Path`):
139
+ Model path from which to load
140
+ params_path (`str` or `Path`):
141
+ Params path from which to load
142
+ use_optim_cache (`bool`, *optional*, defaults to `False`):
143
+ Whether to automatically load the optimized parameters from cache.(If the cache does not exist, it will be automatically generated)
144
+ runtime_options (fd.RuntimeOption, *optional*):
145
+ The RuntimeOption of fastdeploy to initialize the fastdeploy runtime. Default setting
146
+ the device to cpu and the backend to paddle inference
147
+ """
148
+ if infer_config is None:
149
+ infer_config = paddle_infer.Config()
150
+
151
+ if use_optim_cache:
152
+ # 首次运行,自动生成优化模型
153
+ params_dir = os.path.dirname(params_path)
154
+ optim_cache_dir = os.path.join(params_dir, "_optim_cache")
155
+ if not os.path.exists(optim_cache_dir):
156
+ os.makedirs(optim_cache_dir)
157
+ infer_config.switch_ir_optim(True)
158
+ infer_config.set_optim_cache_dir(optim_cache_dir)
159
+ infer_config.enable_save_optim_model(True)
160
+ else:
161
+ # 第二次运行,加载缓存的optim模型
162
+ infer_config.switch_ir_optim(False)
163
+ optimized_params_path = os.path.join(optim_cache_dir, "_optimized.pdiparams")
164
+ optimized_model_path = os.path.join(optim_cache_dir, "_optimized.pdmodel")
165
+ model_path = optimized_model_path
166
+ params_path = optimized_params_path
167
+
168
+ infer_config.set_prog_file(model_path)
169
+ infer_config.set_params_file(params_path)
170
+ return paddle_infer.create_predictor(infer_config)
171
+
172
+ def _save_pretrained(
173
+ self,
174
+ save_directory: Union[str, Path],
175
+ model_file_name: Optional[str] = None,
176
+ params_file_name: Optional[str] = None,
177
+ **kwargs
178
+ ):
179
+ """
180
+ Save a model and its configuration file to a directory, so that it can be re-loaded using the
181
+ [`~FastDeployRuntimeModel.from_pretrained`] class method. It will always save the
182
+ latest_model_name.
183
+ Arguments:
184
+ save_directory (`str` or `Path`):
185
+ Directory where to save the model file.
186
+ model_file_name(`str`, *optional*):
187
+ Overwrites the default model file name from `"inference.pdmodel"` to `model_file_name`. This allows you to save the
188
+ model with a different name.
189
+ params_file_name(`str`, *optional*):
190
+ Overwrites the default model file name from `"inference.pdiparams"` to `params_file_name`. This allows you to save the
191
+ model with a different name.
192
+ """
193
+ model_file_name = model_file_name if model_file_name is not None else PADDLE_INFER_MODEL_NAME
194
+ params_file_name = params_file_name if params_file_name is not None else PADDLE_INFER_WEIGHTS_NAME
195
+
196
+ src_model_path = self.model_save_dir.joinpath(self.latest_model_name)
197
+ dst_model_path = Path(save_directory).joinpath(model_file_name)
198
+
199
+ try:
200
+ shutil.copyfile(src_model_path, dst_model_path)
201
+ except shutil.SameFileError:
202
+ pass
203
+
204
+ def save_pretrained(
205
+ self,
206
+ save_directory: Union[str, os.PathLike],
207
+ **kwargs,
208
+ ):
209
+ """
210
+ Save a model to a directory, so that it can be re-loaded using the [`~FastDeployRuntimeModel.from_pretrained`] class
211
+ method.:
212
+ Arguments:
213
+ save_directory (`str` or `os.PathLike`):
214
+ Directory to which to save. Will be created if it doesn't exist.
215
+ """
216
+ if os.path.isfile(save_directory):
217
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
218
+ return
219
+
220
+ os.makedirs(save_directory, exist_ok=True)
221
+
222
+ # saving model weights/files
223
+ self._save_pretrained(save_directory, **kwargs)
224
+
225
+ @classmethod
226
+ def _from_pretrained(
227
+ cls,
228
+ pretrained_model_name_or_path: Union[str, Path],
229
+ model_file_name: Optional[str] = None,
230
+ params_file_name: Optional[str] = None,
231
+ use_auth_token: Optional[Union[bool, str, None]] = None,
232
+ revision: Optional[str] = None,
233
+ subfolder: Optional[str] = None,
234
+ force_download: bool = False,
235
+ cache_dir: Optional[str] = None,
236
+ infer_config: Optional["paddle_infer.Config"] = None,
237
+ use_optim_cache: bool = False,
238
+ from_hf_hub: Optional[bool] = False,
239
+ proxies: Optional[Dict] = None,
240
+ resume_download: bool = False,
241
+ local_files_only: bool = False,
242
+ user_agent: Union[Dict, str, None] = None,
243
+ is_onnx_model: bool = False,
244
+ **kwargs,
245
+ ):
246
+ """
247
+ Load a model from a directory or the HF Hub.
248
+ Arguments:
249
+ pretrained_model_name_or_path (`str` or `Path`):
250
+ Directory from which to load
251
+ model_file_name (`str`):
252
+ Overwrites the default model file name from `"inference.pdmodel"` to `file_name`. This allows you to load
253
+ different model files from the same repository or directory.
254
+ params_file_name (`str`):
255
+ Overwrites the default params file name from `"inference.pdiparams"` to `file_name`. This allows you to load
256
+ different model files from the same repository or directory.
257
+ use_auth_token (`str` or `bool`):
258
+ Is needed to load models from a private or gated repository
259
+ revision (`str`):
260
+ Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id
261
+ cache_dir (`Union[str, Path]`, *optional*):
262
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
263
+ standard cache should not be used.
264
+ force_download (`bool`, *optional*, defaults to `False`):
265
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
266
+ cached versions if they exist.
267
+ runtime_options (`fastdeploy.RuntimeOption`, *optional*):
268
+ The RuntimeOption of fastdeploy.
269
+ subfolder (`str`, *optional*, defaults to `""`):
270
+ In case the relevant files are located inside a subfolder of the model repo (either remote in
271
+ huggingface.co or downloaded locally), you can specify the folder name here.
272
+ kwargs (`Dict`, *optional*):
273
+ kwargs will be passed to the model during initialization
274
+ """
275
+
276
+ model_file_name = model_file_name if model_file_name is not None else PADDLE_INFER_MODEL_NAME
277
+ params_file_name = params_file_name if params_file_name is not None else PADDLE_INFER_WEIGHTS_NAME
278
+ config = None
279
+
280
+ # load model from local directory
281
+ if os.path.isdir(pretrained_model_name_or_path):
282
+ model_path = os.path.join(pretrained_model_name_or_path, model_file_name)
283
+ params_path = os.path.join(pretrained_model_name_or_path, params_file_name)
284
+
285
+ model = PaddleInferRuntimeModel.load_model(
286
+ model_path,
287
+ params_path,
288
+ infer_config=infer_config,
289
+ use_optim_cache=use_optim_cache,
290
+ )
291
+ # 加载模型配置文件
292
+ config_path = os.path.join(pretrained_model_name_or_path, "config.json")
293
+ if os.path.exists(config_path):
294
+ with open(config_path, "r", encoding="utf-8") as f:
295
+ config = json.load(f)
296
+ config = FrozenDict(config)
297
+ kwargs["model_save_dir"] = Path(pretrained_model_name_or_path)
298
+
299
+ # load model from hub or paddle bos
300
+ else:
301
+ model_cache_path = _get_model_file(
302
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
303
+ weights_name=model_file_name,
304
+ subfolder=subfolder,
305
+ cache_dir=cache_dir,
306
+ force_download=force_download,
307
+ revision=revision,
308
+ from_hf_hub=from_hf_hub,
309
+ proxies=proxies,
310
+ resume_download=resume_download,
311
+ local_files_only=local_files_only,
312
+ use_auth_token=use_auth_token,
313
+ user_agent=user_agent,
314
+ )
315
+
316
+ params_cache_path = _get_model_file(
317
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
318
+ weights_name=params_file_name,
319
+ subfolder=subfolder,
320
+ cache_dir=cache_dir,
321
+ force_download=force_download,
322
+ revision=revision,
323
+ from_hf_hub=from_hf_hub,
324
+ proxies=proxies,
325
+ resume_download=resume_download,
326
+ local_files_only=local_files_only,
327
+ use_auth_token=use_auth_token,
328
+ user_agent=user_agent,
329
+ )
330
+ kwargs["latest_params_name"] = Path(params_cache_path).name
331
+ kwargs["model_save_dir"] = Path(model_cache_path).parent
332
+ kwargs["latest_model_name"] = Path(model_cache_path).name
333
+
334
+ model = PaddleInferRuntimeModel.load_model(
335
+ model_cache_path,
336
+ params_cache_path,
337
+ infer_config=infer_config,
338
+ use_optim_cache=use_optim_cache,
339
+ )
340
+ # 加载模型配置文件
341
+ config_path = os.path.join(kwargs["model_save_dir"], "config.json")
342
+ if os.path.exists(config_path):
343
+ with open(config_path, "r", encoding="utf-8") as f:
344
+ config = json.load(f)
345
+ config = FrozenDict(config)
346
+
347
+ return cls(model=model, config=config, **kwargs)
348
+
349
+ @classmethod
350
+ def from_pretrained(
351
+ cls,
352
+ pretrained_model_name_or_path: Union[str, Path],
353
+ model_file_name: Optional[str] = None,
354
+ params_file_name: Optional[str] = None,
355
+ infer_configs: Optional["paddle_infer.Config"] = None,
356
+ use_optim_cache: bool = False,
357
+ **kwargs,
358
+ ):
359
+ from_hf_hub = kwargs.pop("from_hf_hub", FROM_HF_HUB)
360
+ cache_dir = (
361
+ kwargs.pop("cache_dir", DIFFUSERS_CACHE) if from_hf_hub else kwargs.pop("cache_dir", PPDIFFUSERS_CACHE)
362
+ )
363
+ force_download = kwargs.pop("force_download", False)
364
+ resume_download = kwargs.pop("resume_download", False)
365
+ proxies = kwargs.pop("proxies", None)
366
+ local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
367
+ use_auth_token = kwargs.pop("use_auth_token", None)
368
+ revision = kwargs.pop("revision", None)
369
+ subfolder = kwargs.pop("subfolder", None)
370
+ variant = kwargs.pop("variant", None)
371
+
372
+ user_agent = {
373
+ "ppdiffusers": __version__,
374
+ "file_type": "model",
375
+ "framework": "paddleinfer",
376
+ }
377
+
378
+ return cls._from_pretrained(
379
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
380
+ model_file_name=_add_variant(model_file_name, variant),
381
+ params_file_name=_add_variant(params_file_name, variant),
382
+ use_auth_token=use_auth_token,
383
+ revision=revision,
384
+ subfolder=subfolder,
385
+ force_download=force_download,
386
+ cache_dir=cache_dir,
387
+ infer_config=infer_configs,
388
+ use_optim_cache=use_optim_cache,
389
+ from_hf_hub=from_hf_hub,
390
+ proxies=proxies,
391
+ resume_download=resume_download,
392
+ local_files_only=local_files_only,
393
+ user_agent=user_agent,
394
+ **kwargs,
395
+ )
396
+
397
+ @property
398
+ def dtype(self) -> Union[str, paddle.dtype]:
399
+ return "float32"