zhili-liu commited on
Commit
e2ef18f
·
verified ·
1 Parent(s): 2894af6

Upload 10 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
conversation.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ From https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
3
+ """
4
+
5
+ import dataclasses
6
+ from enum import IntEnum, auto
7
+ from typing import Any, Dict, List
8
+
9
+
10
+ class SeparatorStyle(IntEnum):
11
+ """Separator styles."""
12
+
13
+ DeepSeek = auto()
14
+ DeepSeekV2 = auto()
15
+ PLAIN = auto()
16
+ ALIGNMENT = auto()
17
+
18
+
19
+ @dataclasses.dataclass
20
+ class Conversation:
21
+ """A class that manages prompt templates and keeps all conversation history."""
22
+
23
+ # The name of this template
24
+ name: str
25
+ # The template of the system prompt
26
+ system_template: str = "{system_message}"
27
+ # The system message
28
+ system_message: str = ""
29
+ # The names of two roles
30
+ roles: List[str] = (("USER", "ASSISTANT"),)
31
+ # All messages. Each item is (role, message).
32
+ messages: List[List[str]] = ()
33
+ # The number of few shot examples
34
+ offset: int = 0
35
+ # The separator style and configurations
36
+ sep_style: SeparatorStyle = SeparatorStyle.DeepSeek
37
+ sep: str = "\n"
38
+ sep2: str = None
39
+ # Stop criteria (the default one is EOS token)
40
+ stop_str: str = None
41
+ # Stops generation if meeting any token in this list
42
+ stop_token_ids: List[int] = None
43
+
44
+ def get_prompt(self) -> str:
45
+ """Get the prompt for generation."""
46
+ system_prompt = self.system_template.format(system_message=self.system_message)
47
+ if self.sep_style == SeparatorStyle.DeepSeek:
48
+ seps = [self.sep, self.sep2]
49
+ if system_prompt == "" or system_prompt is None:
50
+ ret = ""
51
+ else:
52
+ ret = system_prompt + seps[0]
53
+ for i, (role, message) in enumerate(self.messages):
54
+ if message:
55
+ ret += role + ": " + message + seps[i % 2]
56
+ else:
57
+ ret += role + ":"
58
+ return ret
59
+ elif self.sep_style == SeparatorStyle.DeepSeekV2:
60
+ seps = [self.sep, self.sep2]
61
+ if system_prompt == "" or system_prompt is None:
62
+ ret = ""
63
+ else:
64
+ ret = system_prompt + seps[0]
65
+ for i, (role, message) in enumerate(self.messages):
66
+ if message:
67
+ if role == "User":
68
+ ret += "<|sft▁begin|>\n" + message + self.sep #<|sft▁begin|>User Input<|sft▁end|>\nResponse<|end▁of▁sentence|>
69
+ else:
70
+ ret += message + self.sep2
71
+ else:
72
+ ret = ret
73
+ return ret
74
+
75
+ elif self.sep_style == SeparatorStyle.PLAIN:
76
+ seps = [self.sep, self.sep2]
77
+ ret = ""
78
+ for i, (role, message) in enumerate(self.messages):
79
+ if message:
80
+ if type(message) is tuple:
81
+ message, _, _ = message
82
+ if i % 2 == 0:
83
+ ret += message + seps[i % 2]
84
+ else:
85
+ ret += message + seps[i % 2]
86
+ else:
87
+ ret += ""
88
+ return ret
89
+ elif self.sep_style == SeparatorStyle.ALIGNMENT:
90
+ seps = [self.sep, self.sep2]
91
+ ret = ""
92
+ for i, (role, message) in enumerate(self.messages):
93
+ if message:
94
+ if type(message) is tuple:
95
+ message, _, _ = message
96
+ if i % 2 == 0:
97
+ ret += '<image>\n' + seps[i % 2]
98
+ else:
99
+ ret += message + seps[i % 2]
100
+ else:
101
+ ret += ""
102
+ return ret
103
+ else:
104
+ raise ValueError(f"Invalid style: {self.sep_style}")
105
+
106
+ def set_system_message(self, system_message: str):
107
+ """Set the system message."""
108
+ self.system_message = system_message
109
+
110
+ def append_message(self, role: str, message: str):
111
+ """Append a new message."""
112
+ self.messages.append([role, message])
113
+
114
+ def update_last_message(self, message: str):
115
+ """Update the last output.
116
+
117
+ The last message is typically set to be None when constructing the prompt,
118
+ so we need to update it in-place after getting the response from a model.
119
+ """
120
+ self.messages[-1][1] = message
121
+
122
+ def reset_message(self):
123
+ """Reset a new message."""
124
+ self.messages = []
125
+
126
+ def to_gradio_chatbot(self):
127
+ """Convert the conversation to gradio chatbot format."""
128
+ ret = []
129
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
130
+ if i % 2 == 0:
131
+ ret.append([msg, None])
132
+ else:
133
+ ret[-1][-1] = msg
134
+ return ret
135
+
136
+ def to_openai_api_messages(self):
137
+ """Convert the conversation to OpenAI chat completion format."""
138
+ system_prompt = self.system_template.format(system_message=self.system_message)
139
+ ret = [{"role": "system", "content": system_prompt}]
140
+
141
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
142
+ if i % 2 == 0:
143
+ ret.append({"role": "user", "content": msg})
144
+ else:
145
+ if msg is not None:
146
+ ret.append({"role": "assistant", "content": msg})
147
+ return ret
148
+
149
+ def copy(self):
150
+ return Conversation(
151
+ name=self.name,
152
+ system_template=self.system_template,
153
+ system_message=self.system_message,
154
+ roles=self.roles,
155
+ messages=[[x, y] for x, y in self.messages],
156
+ offset=self.offset,
157
+ sep_style=self.sep_style,
158
+ sep=self.sep,
159
+ sep2=self.sep2,
160
+ stop_str=self.stop_str,
161
+ stop_token_ids=self.stop_token_ids,
162
+ )
163
+
164
+ def dict(self):
165
+ return {
166
+ "template_name": self.name,
167
+ "system_message": self.system_message,
168
+ "roles": self.roles,
169
+ "messages": self.messages,
170
+ "offset": self.offset,
171
+ }
172
+
173
+
174
+ # A global registry for all conversation templates
175
+ conv_templates: Dict[str, Conversation] = {}
176
+
177
+
178
+ def register_conv_template(template: Conversation, override: bool = False):
179
+ """Register a new conversation template."""
180
+ if not override:
181
+ assert template.name not in conv_templates, f"{template.name} has been registered."
182
+
183
+ conv_templates[template.name] = template
184
+
185
+
186
+ def get_conv_template(name: str) -> Conversation:
187
+ """Get a conversation template."""
188
+ return conv_templates[name].copy()
189
+
190
+
191
+ # register_conv_template(
192
+ # Conversation(
193
+ # name="deepseek",
194
+ # system_template="{system_message}",
195
+ # # system_message="You are a helpful assistant. Please answer truthfully and write out your "
196
+ # # "thinking step by step to be sure you get the right answer.",
197
+ # system_message="",
198
+ # roles=("User", "Assistant"),
199
+ # messages=(),
200
+ # offset=0,
201
+ # sep_style=SeparatorStyle.DeepSeek,
202
+ # sep="\n\n",
203
+ # sep2="<|end▁of▁sentence|>",
204
+ # stop_token_ids=[100001],
205
+ # stop_str=["User:", "<|end▁of▁sentence|>"]
206
+ # )
207
+ # )
208
+ register_conv_template(
209
+ Conversation(
210
+ name="deepseek",
211
+ system_template="{system_message}",
212
+ # system_message="You are a helpful assistant. Please answer truthfully and write out your "
213
+ # "thinking step by step to be sure you get the right answer.",
214
+ system_message="",
215
+ roles=("<|User|>", "<|Assistant|>"),
216
+ messages=(),
217
+ offset=0,
218
+ sep_style=SeparatorStyle.DeepSeek,
219
+ sep="\n\n",
220
+ sep2="<|end▁of▁sentence|>",
221
+ stop_token_ids=[100001],
222
+ stop_str=["User:", "<|end▁of▁sentence|>"]
223
+ )
224
+ )
225
+ # register_conv_template(
226
+ # Conversation(
227
+ # name="deepseekv2",
228
+ # system_template="{system_message}",
229
+ # system_message="",
230
+ # roles=("User", "Assistant"),
231
+ # messages=(),
232
+ # offset=0,
233
+ # sep_style=SeparatorStyle.DeepSeekV2,
234
+ # sep="\n<|sft▁end|>",
235
+ # sep2="<|end▁of▁sentence|>",
236
+ # stop_token_ids=[100001],
237
+ # stop_str=["User:", "<|end▁of▁sentence|>"]
238
+ # )
239
+ # )
240
+ register_conv_template(
241
+ Conversation(
242
+ name="deepseekv2",
243
+ system_template="{system_message}",
244
+ system_message="",
245
+ roles=("|<User>|", "|<Assistant>|"),
246
+ messages=(),
247
+ offset=0,
248
+ sep_style=SeparatorStyle.DeepSeekV2,
249
+ sep="\n<|sft▁end|>",
250
+ sep2="<|end▁of▁sentence|>",
251
+ stop_token_ids=[100001],
252
+ stop_str=["User:", "<|end▁of▁sentence|>"]
253
+ )
254
+ )
255
+
256
+
257
+ register_conv_template(
258
+ Conversation(
259
+ name="plain",
260
+ system_template="",
261
+ system_message="",
262
+ roles=("", ""),
263
+ messages=(),
264
+ offset=0,
265
+ sep_style=SeparatorStyle.PLAIN,
266
+ sep="",
267
+ sep2="",
268
+ stop_token_ids=[100001],
269
+ stop_str=['</s>'],
270
+ )
271
+ )
272
+
273
+
274
+ register_conv_template(
275
+ Conversation(
276
+ name="alignment",
277
+ system_template="",
278
+ system_message="",
279
+ roles=("", ""),
280
+ messages=(),
281
+ offset=0,
282
+ sep_style=SeparatorStyle.ALIGNMENT,
283
+ sep="",
284
+ sep2="",
285
+ stop_token_ids=[100001],
286
+ stop_str=['</s>'],
287
+ )
288
+ )
modeling_deepseek.py ADDED
@@ -0,0 +1,1977 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch DeepSeek model and compatible with both DeepSeekV2 and DeepSeekV3"""
21
+ import math
22
+ import warnings
23
+ from typing import List, Optional, Tuple, Union
24
+ import numpy as np
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ import torch.distributed as dist
30
+ from einops import repeat
31
+ from torch import nn
32
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
33
+ from transformers import AutoConfig
34
+
35
+ from transformers.activations import ACT2FN
36
+ from transformers.cache_utils import Cache, DynamicCache
37
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
38
+ from transformers.models.llama.modeling_llama import (
39
+ LlamaAttention,
40
+ LlamaFlashAttention2
41
+ )
42
+ from transformers.modeling_outputs import (
43
+ BaseModelOutputWithPast,
44
+ CausalLMOutputWithPast,
45
+ SequenceClassifierOutputWithPast,
46
+ )
47
+ from transformers.modeling_utils import PreTrainedModel
48
+ from transformers.pytorch_utils import (
49
+ ALL_LAYERNORM_LAYERS,
50
+ is_torch_greater_or_equal_than_1_13,
51
+ )
52
+ from transformers.utils import (
53
+ add_start_docstrings,
54
+ add_start_docstrings_to_model_forward,
55
+ is_flash_attn_2_available,
56
+ is_flash_attn_greater_or_equal_2_10,
57
+ logging,
58
+ replace_return_docstrings,
59
+ )
60
+ from transformers.utils.import_utils import is_torch_fx_available
61
+
62
+ from .configuration_deepseek import DeepseekV2Config
63
+ from transformers import AutoModelForCausalLM
64
+
65
+ if is_flash_attn_2_available():
66
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
67
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
68
+
69
+ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
70
+ # It means that the function will not be traced through and simply appear as a node in the graph.
71
+ if is_torch_fx_available():
72
+ if not is_torch_greater_or_equal_than_1_13:
73
+ import torch.fx
74
+
75
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
76
+
77
+ logger = logging.get_logger(__name__)
78
+
79
+ _CONFIG_FOR_DOC = "DeepseekV2Config"
80
+
81
+
82
+ def _get_unpad_data(attention_mask):
83
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
84
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
85
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
86
+ cu_seqlens = F.pad(
87
+ torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)
88
+ )
89
+ return (
90
+ indices,
91
+ cu_seqlens,
92
+ max_seqlen_in_batch,
93
+ )
94
+
95
+
96
+ class DeepseekV2RMSNorm(nn.Module):
97
+ def __init__(self, hidden_size, eps=1e-6):
98
+ """
99
+ DeepseekV2RMSNorm is equivalent to T5LayerNorm
100
+ """
101
+ super().__init__()
102
+ self.weight = nn.Parameter(torch.ones(hidden_size))
103
+ self.variance_epsilon = eps
104
+
105
+ def forward(self, hidden_states):
106
+ input_dtype = hidden_states.dtype
107
+ hidden_states = hidden_states.to(torch.float32)
108
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
109
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
110
+ return self.weight * hidden_states.to(input_dtype)
111
+
112
+
113
+ ALL_LAYERNORM_LAYERS.append(DeepseekV2RMSNorm)
114
+
115
+
116
+ class DeepseekV2RotaryEmbedding(nn.Module):
117
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
118
+ super().__init__()
119
+
120
+ self.dim = dim
121
+ self.max_position_embeddings = max_position_embeddings
122
+ self.base = base
123
+ inv_freq = 1.0 / (
124
+ self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)
125
+ )
126
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
127
+
128
+ # Build here to make `torch.jit.trace` work.
129
+ self._set_cos_sin_cache(
130
+ seq_len=max_position_embeddings,
131
+ device=self.inv_freq.device,
132
+ dtype=torch.get_default_dtype(),
133
+ )
134
+ self.max_seq_len_cached = None
135
+
136
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
137
+ self.max_seq_len_cached = seq_len
138
+ t = torch.arange(
139
+ self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype
140
+ )
141
+
142
+ freqs = torch.outer(t, self.inv_freq.to(t.device))
143
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
144
+ emb = torch.cat((freqs, freqs), dim=-1)
145
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
146
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
147
+
148
+ def forward(self, x, seq_len=None):
149
+ # x: [bs, num_attention_heads, seq_len, head_size]
150
+ if self.max_seq_len_cached is None or seq_len > self.max_seq_len_cached:
151
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
152
+
153
+ return (
154
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
155
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
156
+ )
157
+
158
+
159
+ # Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->DeepseekV2
160
+ class DeepseekV2LinearScalingRotaryEmbedding(DeepseekV2RotaryEmbedding):
161
+ """DeepseekV2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
162
+
163
+ def __init__(
164
+ self,
165
+ dim,
166
+ max_position_embeddings=2048,
167
+ base=10000,
168
+ device=None,
169
+ scaling_factor=1.0,
170
+ ):
171
+ self.scaling_factor = scaling_factor
172
+ super().__init__(dim, max_position_embeddings, base, device)
173
+
174
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
175
+ self.max_seq_len_cached = seq_len
176
+ t = torch.arange(
177
+ self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype
178
+ )
179
+ t = t / self.scaling_factor
180
+
181
+ freqs = torch.outer(t, self.inv_freq)
182
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
183
+ emb = torch.cat((freqs, freqs), dim=-1)
184
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
185
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
186
+
187
+
188
+ # Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->DeepseekV2
189
+ class DeepseekV2DynamicNTKScalingRotaryEmbedding(DeepseekV2RotaryEmbedding):
190
+ """DeepseekV2RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
191
+
192
+ def __init__(
193
+ self,
194
+ dim,
195
+ max_position_embeddings=2048,
196
+ base=10000,
197
+ device=None,
198
+ scaling_factor=1.0,
199
+ ):
200
+ self.scaling_factor = scaling_factor
201
+ super().__init__(dim, max_position_embeddings, base, device)
202
+
203
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
204
+ self.max_seq_len_cached = seq_len
205
+
206
+ if seq_len > self.max_position_embeddings:
207
+ base = self.base * (
208
+ (self.scaling_factor * seq_len / self.max_position_embeddings)
209
+ - (self.scaling_factor - 1)
210
+ ) ** (self.dim / (self.dim - 2))
211
+ inv_freq = 1.0 / (
212
+ base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)
213
+ )
214
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
215
+
216
+ t = torch.arange(
217
+ self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype
218
+ )
219
+
220
+ freqs = torch.outer(t, self.inv_freq)
221
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
222
+ emb = torch.cat((freqs, freqs), dim=-1)
223
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
224
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
225
+
226
+
227
+ # Inverse dim formula to find dim based on number of rotations
228
+ def yarn_find_correction_dim(
229
+ num_rotations, dim, base=10000, max_position_embeddings=2048
230
+ ):
231
+ return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (
232
+ 2 * math.log(base)
233
+ )
234
+
235
+
236
+ # Find dim range bounds based on rotations
237
+ def yarn_find_correction_range(
238
+ low_rot, high_rot, dim, base=10000, max_position_embeddings=2048
239
+ ):
240
+ low = math.floor(
241
+ yarn_find_correction_dim(low_rot, dim, base, max_position_embeddings)
242
+ )
243
+ high = math.ceil(
244
+ yarn_find_correction_dim(high_rot, dim, base, max_position_embeddings)
245
+ )
246
+ return max(low, 0), min(high, dim - 1) # Clamp values just in case
247
+
248
+
249
+ def yarn_get_mscale(scale=1, mscale=1):
250
+ if scale <= 1:
251
+ return 1.0
252
+ return 0.1 * mscale * math.log(scale) + 1.0
253
+
254
+
255
+ def yarn_linear_ramp_mask(min, max, dim):
256
+ if min == max:
257
+ max += 0.001 # Prevent singularity
258
+
259
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
260
+ ramp_func = torch.clamp(linear_func, 0, 1)
261
+ return ramp_func
262
+
263
+
264
+ class DeepseekV2YarnRotaryEmbedding(DeepseekV2RotaryEmbedding):
265
+
266
+ def __init__(
267
+ self,
268
+ dim,
269
+ max_position_embeddings=2048,
270
+ base=10000,
271
+ device=None,
272
+ scaling_factor=1.0,
273
+ original_max_position_embeddings=4096,
274
+ beta_fast=32,
275
+ beta_slow=1,
276
+ mscale=1,
277
+ mscale_all_dim=0,
278
+ ):
279
+ self.scaling_factor = scaling_factor
280
+ self.original_max_position_embeddings = original_max_position_embeddings
281
+ self.beta_fast = beta_fast
282
+ self.beta_slow = beta_slow
283
+ self.mscale = mscale
284
+ self.mscale_all_dim = mscale_all_dim
285
+ super().__init__(dim, max_position_embeddings, base, device)
286
+
287
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
288
+ self.max_seq_len_cached = seq_len
289
+ dim = self.dim
290
+
291
+ freq_extra = 1.0 / (
292
+ self.base
293
+ ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)
294
+ )
295
+ freq_inter = 1.0 / (
296
+ self.scaling_factor
297
+ * self.base
298
+ ** (torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)
299
+ )
300
+
301
+ low, high = yarn_find_correction_range(
302
+ self.beta_fast,
303
+ self.beta_slow,
304
+ dim,
305
+ self.base,
306
+ self.original_max_position_embeddings,
307
+ )
308
+ inv_freq_mask = 1.0 - yarn_linear_ramp_mask(low, high, dim // 2).to(
309
+ device=device, dtype=torch.float32
310
+ )
311
+ inv_freq = freq_inter * (1 - inv_freq_mask) + freq_extra * inv_freq_mask
312
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
313
+
314
+ t = torch.arange(seq_len, device=device, dtype=torch.float32)
315
+
316
+ freqs = torch.outer(t, inv_freq)
317
+
318
+ _mscale = float(
319
+ yarn_get_mscale(self.scaling_factor, self.mscale)
320
+ / yarn_get_mscale(self.scaling_factor, self.mscale_all_dim)
321
+ )
322
+
323
+ emb = torch.cat((freqs, freqs), dim=-1)
324
+ self.register_buffer(
325
+ "cos_cached", (emb.cos() * _mscale).to(dtype), persistent=False
326
+ )
327
+ self.register_buffer(
328
+ "sin_cached", (emb.sin() * _mscale).to(dtype), persistent=False
329
+ )
330
+
331
+
332
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
333
+ def rotate_half(x):
334
+ """Rotates half the hidden dims of the input."""
335
+ x1 = x[..., : x.shape[-1] // 2]
336
+ x2 = x[..., x.shape[-1] // 2 :]
337
+ return torch.cat((-x2, x1), dim=-1)
338
+
339
+
340
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
341
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
342
+ """Applies Rotary Position Embedding to the query and key tensors.
343
+
344
+ Args:
345
+ q (`torch.Tensor`): The query tensor.
346
+ k (`torch.Tensor`): The key tensor.
347
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
348
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
349
+ position_ids (`torch.Tensor`):
350
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
351
+ used to pass offsetted position ids when working with a KV-cache.
352
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
353
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
354
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
355
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
356
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
357
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
358
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
359
+ Returns:
360
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
361
+ """
362
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
363
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
364
+
365
+ b, h, s, d = q.shape
366
+ q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
367
+
368
+ b, h, s, d = k.shape
369
+ k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
370
+
371
+ q_embed = (q * cos) + (rotate_half(q) * sin)
372
+ k_embed = (k * cos) + (rotate_half(k) * sin)
373
+ return q_embed, k_embed
374
+
375
+
376
+ class DeepseekV2MLP(nn.Module):
377
+ def __init__(self, config, hidden_size=None, intermediate_size=None):
378
+ super().__init__()
379
+ self.config = config
380
+ self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
381
+ self.intermediate_size = (
382
+ config.intermediate_size if intermediate_size is None else intermediate_size
383
+ )
384
+
385
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
386
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
387
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
388
+ self.act_fn = ACT2FN[config.hidden_act]
389
+
390
+ def forward(self, x):
391
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
392
+ return down_proj
393
+
394
+
395
+ class MoEGate(nn.Module):
396
+ def __init__(self, config):
397
+ super().__init__()
398
+ self.config = config
399
+ self.top_k = config.num_experts_per_tok
400
+ self.n_routed_experts = config.n_routed_experts
401
+ self.routed_scaling_factor = config.routed_scaling_factor
402
+ self.scoring_func = config.scoring_func
403
+ self.alpha = config.aux_loss_alpha
404
+ self.seq_aux = config.seq_aux
405
+ self.topk_method = config.topk_method
406
+ self.n_group = config.n_group
407
+ self.topk_group = config.topk_group
408
+
409
+ # topk selection algorithm
410
+ self.norm_topk_prob = config.norm_topk_prob
411
+ self.gating_dim = config.hidden_size
412
+ self.weight = nn.Parameter(
413
+ torch.empty((self.n_routed_experts, self.gating_dim))
414
+ )
415
+ if self.topk_method == "noaux_tc":
416
+ self.e_score_correction_bias = nn.Parameter(
417
+ torch.empty((self.n_routed_experts))
418
+ )
419
+ self.reset_parameters()
420
+
421
+ def reset_parameters(self) -> None:
422
+ import torch.nn.init as init
423
+
424
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
425
+
426
+ def forward(self, hidden_states):
427
+ bsz, seq_len, h = hidden_states.shape
428
+ ### compute gating score
429
+ hidden_states = hidden_states.view(-1, h)
430
+ logits = F.linear(
431
+ hidden_states.type(torch.float32), self.weight.type(torch.float32), None
432
+ )
433
+ if self.scoring_func == "softmax":
434
+ scores = logits.softmax(dim=-1, dtype=torch.float32)
435
+ elif self.scoring_func == "sigmoid":
436
+ scores = logits.sigmoid()
437
+ else:
438
+ raise NotImplementedError(
439
+ f"insupportable scoring function for MoE gating: {self.scoring_func}"
440
+ )
441
+
442
+ ### select top-k experts
443
+ if self.topk_method == "greedy":
444
+ topk_weight, topk_idx = torch.topk(
445
+ scores, k=self.top_k, dim=-1, sorted=False
446
+ )
447
+ elif self.topk_method == "group_limited_greedy":
448
+ group_scores = (
449
+ scores.view(bsz * seq_len, self.n_group, -1).max(dim=-1).values
450
+ ) # [n, n_group]
451
+ group_idx = torch.topk(
452
+ group_scores, k=self.topk_group, dim=-1, sorted=False
453
+ )[
454
+ 1
455
+ ] # [n, top_k_group]
456
+ group_mask = torch.zeros_like(group_scores) # [n, n_group]
457
+ group_mask.scatter_(1, group_idx, 1) # [n, n_group]
458
+ score_mask = (
459
+ group_mask.unsqueeze(-1)
460
+ .expand(
461
+ bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group
462
+ )
463
+ .reshape(bsz * seq_len, -1)
464
+ ) # [n, e]
465
+ tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) # [n, e]
466
+ topk_weight, topk_idx = torch.topk(
467
+ tmp_scores, k=self.top_k, dim=-1, sorted=False
468
+ )
469
+ elif self.topk_method == "noaux_tc":
470
+ assert not self.training
471
+ scores_for_choice = scores.view(bsz * seq_len, -1) + self.e_score_correction_bias.unsqueeze(0)
472
+ group_scores = (
473
+ scores_for_choice.view(bsz * seq_len, self.n_group, -1).topk(2, dim=-1)[0].sum(dim = -1)
474
+ ) # [n, n_group]
475
+ group_idx = torch.topk(
476
+ group_scores, k=self.topk_group, dim=-1, sorted=False
477
+ )[
478
+ 1
479
+ ] # [n, top_k_group]
480
+ group_mask = torch.zeros_like(group_scores) # [n, n_group]
481
+ group_mask.scatter_(1, group_idx, 1) # [n, n_group]
482
+ score_mask = (
483
+ group_mask.unsqueeze(-1)
484
+ .expand(
485
+ bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group
486
+ )
487
+ .reshape(bsz * seq_len, -1)
488
+ ) # [n, e]
489
+ tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), 0.0) # [n, e]
490
+ _, topk_idx = torch.topk(
491
+ tmp_scores, k=self.top_k, dim=-1, sorted=False
492
+ )
493
+ topk_weight = scores.gather(1, topk_idx)
494
+
495
+ ### norm gate to sum 1
496
+ if self.top_k > 1 and self.norm_topk_prob:
497
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
498
+ topk_weight = topk_weight / denominator * self.routed_scaling_factor
499
+ else:
500
+ topk_weight = topk_weight * self.routed_scaling_factor
501
+ ### expert-level computation auxiliary loss
502
+ if self.training and self.alpha > 0.0:
503
+ scores_for_aux = scores
504
+ aux_topk = self.top_k
505
+ # always compute aux loss based on the naive greedy topk method
506
+ topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
507
+ if self.seq_aux:
508
+ scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
509
+ ce = torch.zeros(
510
+ bsz, self.n_routed_experts, device=hidden_states.device
511
+ )
512
+ ce.scatter_add_(
513
+ 1,
514
+ topk_idx_for_aux_loss,
515
+ torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device),
516
+ ).div_(seq_len * aux_topk / self.n_routed_experts)
517
+ aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(
518
+ dim=1
519
+ ).mean() * self.alpha
520
+ else:
521
+ mask_ce = F.one_hot(
522
+ topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts
523
+ )
524
+ ce = mask_ce.float().mean(0)
525
+ Pi = scores_for_aux.mean(0)
526
+ fi = ce * self.n_routed_experts
527
+ aux_loss = (Pi * fi).sum() * self.alpha
528
+ else:
529
+ aux_loss = None
530
+ return topk_idx, topk_weight, aux_loss
531
+
532
+
533
+ class AddAuxiliaryLoss(torch.autograd.Function):
534
+ """
535
+ The trick function of adding auxiliary (aux) loss,
536
+ which includes the gradient of the aux loss during backpropagation.
537
+ """
538
+
539
+ @staticmethod
540
+ def forward(ctx, x, loss):
541
+ assert loss.numel() == 1
542
+ ctx.dtype = loss.dtype
543
+ ctx.required_aux_loss = loss.requires_grad
544
+ return x
545
+
546
+ @staticmethod
547
+ def backward(ctx, grad_output):
548
+ grad_loss = None
549
+ if ctx.required_aux_loss:
550
+ grad_loss = torch.ones(1, dtype=ctx.dtype, device=grad_output.device)
551
+ return grad_output, grad_loss
552
+
553
+
554
+ class DeepseekV2MoE(nn.Module):
555
+ """
556
+ A mixed expert module containing shared experts.
557
+ """
558
+
559
+ def __init__(self, config):
560
+ super().__init__()
561
+ self.config = config
562
+ self.num_experts_per_tok = config.num_experts_per_tok
563
+
564
+ if hasattr(config, "ep_size") and config.ep_size > 1:
565
+ assert config.ep_size == dist.get_world_size()
566
+ self.ep_size = config.ep_size
567
+ self.experts_per_rank = config.n_routed_experts // config.ep_size
568
+ self.ep_rank = dist.get_rank()
569
+ self.experts = nn.ModuleList(
570
+ [
571
+ (
572
+ DeepseekV2MLP(
573
+ config, intermediate_size=config.moe_intermediate_size
574
+ )
575
+ if i >= self.ep_rank * self.experts_per_rank
576
+ and i < (self.ep_rank + 1) * self.experts_per_rank
577
+ else None
578
+ )
579
+ for i in range(config.n_routed_experts)
580
+ ]
581
+ )
582
+ else:
583
+ self.ep_size = 1
584
+ self.experts_per_rank = config.n_routed_experts
585
+ self.ep_rank = 0
586
+ self.experts = nn.ModuleList(
587
+ [
588
+ DeepseekV2MLP(
589
+ config, intermediate_size=config.moe_intermediate_size
590
+ )
591
+ for i in range(config.n_routed_experts)
592
+ ]
593
+ )
594
+ self.gate = MoEGate(config)
595
+ if config.n_shared_experts is not None:
596
+ intermediate_size = config.moe_intermediate_size * config.n_shared_experts
597
+ self.shared_experts = DeepseekV2MLP(
598
+ config=config, intermediate_size=intermediate_size
599
+ )
600
+
601
+ def forward(self, hidden_states):
602
+ identity = hidden_states
603
+ orig_shape = hidden_states.shape
604
+ topk_idx, topk_weight, aux_loss = self.gate(hidden_states)
605
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
606
+ flat_topk_idx = topk_idx.view(-1)
607
+ if self.training:
608
+ hidden_states = hidden_states.repeat_interleave(
609
+ self.num_experts_per_tok, dim=0
610
+ )
611
+ y = torch.empty_like(hidden_states)
612
+ for i, expert in enumerate(self.experts):
613
+ y[flat_topk_idx == i] = expert(hidden_states[flat_topk_idx == i])
614
+ y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
615
+ y = y.to(hidden_states.dtype).view(*orig_shape)
616
+ y = AddAuxiliaryLoss.apply(y, aux_loss)
617
+ else:
618
+ y = self.moe_infer(hidden_states, topk_idx, topk_weight).view(*orig_shape)
619
+ if self.config.n_shared_experts is not None:
620
+ y = y + self.shared_experts(identity)
621
+ return y
622
+
623
+ @torch.no_grad()
624
+ def moe_infer(self, x, topk_ids, topk_weight):
625
+ cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts)))
626
+ cnts.scatter_(1, topk_ids, 1)
627
+ tokens_per_expert = cnts.sum(dim=0)
628
+ idxs = topk_ids.view(-1).argsort()
629
+ sorted_tokens = x[idxs // topk_ids.shape[1]]
630
+ sorted_tokens_shape = sorted_tokens.shape
631
+ if self.ep_size > 1:
632
+ tokens_per_ep_rank = tokens_per_expert.view(self.ep_size, -1).sum(dim=1)
633
+ tokens_per_expert_group = tokens_per_expert.new_empty(
634
+ tokens_per_expert.shape[0]
635
+ )
636
+ dist.all_to_all_single(tokens_per_expert_group, tokens_per_expert)
637
+ output_splits = (
638
+ tokens_per_expert_group.view(self.ep_size, -1)
639
+ .sum(1)
640
+ .cpu()
641
+ .numpy()
642
+ .tolist()
643
+ )
644
+ gathered_tokens = sorted_tokens.new_empty(
645
+ tokens_per_expert_group.sum(dim=0).cpu().item(), sorted_tokens.shape[1]
646
+ )
647
+ input_split_sizes = tokens_per_ep_rank.cpu().numpy().tolist()
648
+ dist.all_to_all(
649
+ list(gathered_tokens.split(output_splits)),
650
+ list(sorted_tokens.split(input_split_sizes)),
651
+ )
652
+ tokens_per_expert_post_gather = tokens_per_expert_group.view(
653
+ self.ep_size, self.experts_per_rank
654
+ ).sum(dim=0)
655
+ gatherd_idxs = np.zeros(shape=(gathered_tokens.shape[0],), dtype=np.int32)
656
+ s = 0
657
+ for i, k in enumerate(tokens_per_expert_group.cpu().numpy()):
658
+ gatherd_idxs[s : s + k] = i % self.experts_per_rank
659
+ s += k
660
+ gatherd_idxs = gatherd_idxs.argsort()
661
+ sorted_tokens = gathered_tokens[gatherd_idxs]
662
+ tokens_per_expert = tokens_per_expert_post_gather
663
+ tokens_per_expert = tokens_per_expert.cpu().numpy()
664
+
665
+ outputs = []
666
+ start_idx = 0
667
+ for i, num_tokens in enumerate(tokens_per_expert):
668
+ end_idx = start_idx + num_tokens
669
+ if num_tokens == 0:
670
+ continue
671
+ expert = self.experts[i + self.ep_rank * self.experts_per_rank]
672
+ tokens_for_this_expert = sorted_tokens[start_idx:end_idx]
673
+ expert_out = expert(tokens_for_this_expert)
674
+ outputs.append(expert_out)
675
+ start_idx = end_idx
676
+
677
+ outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
678
+ if self.ep_size > 1:
679
+ new_x = torch.empty_like(outs)
680
+ new_x[gatherd_idxs] = outs
681
+ gathered_tokens = new_x.new_empty(*sorted_tokens_shape)
682
+ dist.all_to_all(
683
+ list(gathered_tokens.split(input_split_sizes)),
684
+ list(new_x.split(output_splits)),
685
+ )
686
+ outs = gathered_tokens
687
+
688
+ new_x = torch.empty_like(outs)
689
+ new_x[idxs] = outs
690
+ final_out = (
691
+ new_x.view(*topk_ids.shape, -1)
692
+ .type(topk_weight.dtype)
693
+ .mul_(topk_weight.unsqueeze(dim=-1))
694
+ .sum(dim=1)
695
+ .type(new_x.dtype)
696
+ )
697
+ return final_out
698
+
699
+
700
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
701
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
702
+ """
703
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
704
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
705
+ """
706
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
707
+ if n_rep == 1:
708
+ return hidden_states
709
+ hidden_states = hidden_states[:, :, None, :, :].expand(
710
+ batch, num_key_value_heads, n_rep, slen, head_dim
711
+ )
712
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
713
+
714
+
715
+ # Copied from transformers.models.llama.modeling_llama.LlamaAttention with Llama->DeepseekV2
716
+ class DeepseekV2Attention(nn.Module):
717
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
718
+
719
+ def __init__(self, config: DeepseekV2Config, layer_idx: Optional[int] = None):
720
+ super().__init__()
721
+ self.config = config
722
+ self.layer_idx = layer_idx
723
+ if layer_idx is None:
724
+ logger.warning_once(
725
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
726
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
727
+ "when creating this class."
728
+ )
729
+
730
+ self.attention_dropout = config.attention_dropout
731
+ self.hidden_size = config.hidden_size
732
+ self.num_heads = config.num_attention_heads
733
+
734
+ self.max_position_embeddings = config.max_position_embeddings
735
+ self.rope_theta = config.rope_theta
736
+ self.q_lora_rank = config.q_lora_rank
737
+ self.qk_rope_head_dim = config.qk_rope_head_dim
738
+ self.kv_lora_rank = config.kv_lora_rank
739
+ self.v_head_dim = config.v_head_dim
740
+ self.qk_nope_head_dim = config.qk_nope_head_dim
741
+ self.q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
742
+
743
+ self.is_causal = True
744
+
745
+ if self.q_lora_rank is None:
746
+ self.q_proj = nn.Linear(
747
+ self.hidden_size, self.num_heads * self.q_head_dim, bias=False
748
+ )
749
+ else:
750
+ self.q_a_proj = nn.Linear(
751
+ self.hidden_size, config.q_lora_rank, bias=config.attention_bias
752
+ )
753
+ self.q_a_layernorm = DeepseekV2RMSNorm(config.q_lora_rank)
754
+ self.q_b_proj = nn.Linear(
755
+ config.q_lora_rank, self.num_heads * self.q_head_dim, bias=False
756
+ )
757
+
758
+ self.kv_a_proj_with_mqa = nn.Linear(
759
+ self.hidden_size,
760
+ config.kv_lora_rank + config.qk_rope_head_dim,
761
+ bias=config.attention_bias,
762
+ )
763
+ self.kv_a_layernorm = DeepseekV2RMSNorm(config.kv_lora_rank)
764
+ self.kv_b_proj = nn.Linear(
765
+ config.kv_lora_rank,
766
+ self.num_heads
767
+ * (self.q_head_dim - self.qk_rope_head_dim + self.v_head_dim),
768
+ bias=False,
769
+ )
770
+
771
+ self.o_proj = nn.Linear(
772
+ self.num_heads * self.v_head_dim,
773
+ self.hidden_size,
774
+ bias=config.attention_bias,
775
+ )
776
+ self._init_rope()
777
+
778
+ self.softmax_scale = self.q_head_dim ** (-0.5)
779
+ if self.config.rope_scaling is not None:
780
+ mscale_all_dim = self.config.rope_scaling.get("mscale_all_dim", 0)
781
+ scaling_factor = self.config.rope_scaling["factor"]
782
+ if mscale_all_dim:
783
+ mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
784
+ self.softmax_scale = self.softmax_scale * mscale * mscale
785
+
786
+ def _init_rope(self):
787
+ if self.config.rope_scaling is None:
788
+ self.rotary_emb = DeepseekV2RotaryEmbedding(
789
+ self.qk_rope_head_dim,
790
+ max_position_embeddings=self.max_position_embeddings,
791
+ base=self.rope_theta,
792
+ )
793
+ else:
794
+ scaling_type = self.config.rope_scaling["type"]
795
+ scaling_factor = self.config.rope_scaling["factor"]
796
+ if scaling_type == "linear":
797
+ self.rotary_emb = DeepseekV2LinearScalingRotaryEmbedding(
798
+ self.qk_rope_head_dim,
799
+ max_position_embeddings=self.max_position_embeddings,
800
+ scaling_factor=scaling_factor,
801
+ base=self.rope_theta,
802
+ )
803
+ elif scaling_type == "dynamic":
804
+ self.rotary_emb = DeepseekV2DynamicNTKScalingRotaryEmbedding(
805
+ self.qk_rope_head_dim,
806
+ max_position_embeddings=self.max_position_embeddings,
807
+ scaling_factor=scaling_factor,
808
+ base=self.rope_theta,
809
+ )
810
+ elif scaling_type == "yarn":
811
+ kwargs = {
812
+ key: self.config.rope_scaling[key]
813
+ for key in [
814
+ "original_max_position_embeddings",
815
+ "beta_fast",
816
+ "beta_slow",
817
+ "mscale",
818
+ "mscale_all_dim",
819
+ ]
820
+ if key in self.config.rope_scaling
821
+ }
822
+ self.rotary_emb = DeepseekV2YarnRotaryEmbedding(
823
+ self.qk_rope_head_dim,
824
+ max_position_embeddings=self.max_position_embeddings,
825
+ scaling_factor=scaling_factor,
826
+ base=self.rope_theta,
827
+ **kwargs,
828
+ )
829
+ else:
830
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
831
+
832
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
833
+ return (
834
+ tensor.view(bsz, seq_len, self.num_heads, self.v_head_dim)
835
+ .transpose(1, 2)
836
+ .contiguous()
837
+ )
838
+
839
+ def forward(
840
+ self,
841
+ hidden_states: torch.Tensor,
842
+ attention_mask: Optional[torch.Tensor] = None,
843
+ position_ids: Optional[torch.LongTensor] = None,
844
+ past_key_value: Optional[Cache] = None,
845
+ output_attentions: bool = False,
846
+ use_cache: bool = False,
847
+ **kwargs,
848
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
849
+ if "padding_mask" in kwargs:
850
+ warnings.warn(
851
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
852
+ )
853
+ bsz, q_len, _ = hidden_states.size()
854
+
855
+ if self.q_lora_rank is None:
856
+ q = self.q_proj(hidden_states)
857
+ else:
858
+ q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
859
+ q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2)
860
+ q_nope, q_pe = torch.split(
861
+ q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
862
+ )
863
+
864
+ compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
865
+ compressed_kv, k_pe = torch.split(
866
+ compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1
867
+ )
868
+ compressed_kv = self.kv_a_layernorm(compressed_kv)
869
+ k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2)
870
+
871
+ kv_seq_len = k_pe.shape[-2]
872
+ if past_key_value is not None:
873
+ if self.layer_idx is None:
874
+ raise ValueError(
875
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
876
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
877
+ "with a layer index."
878
+ )
879
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
880
+
881
+ cos, sin = self.rotary_emb(q_pe, seq_len=kv_seq_len)
882
+ q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)
883
+
884
+ if past_key_value is not None:
885
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
886
+ compressed_kv = compressed_kv.unsqueeze(1)
887
+ k_pe, compressed_kv = past_key_value.update(k_pe, compressed_kv, self.layer_idx, cache_kwargs)
888
+ compressed_kv = compressed_kv.squeeze(1)
889
+
890
+ kv_b_proj = self.kv_b_proj.weight.view(self.num_heads, -1, self.kv_lora_rank)
891
+ q_absorb = kv_b_proj[:, :self.qk_nope_head_dim, :]
892
+ out_absorb = kv_b_proj[:, self.qk_nope_head_dim:, :]
893
+
894
+ q_nope = torch.matmul(q_nope, q_absorb)
895
+ attn_weights = (torch.matmul(q_pe, k_pe.mT) +
896
+ torch.matmul(q_nope, compressed_kv.unsqueeze(-3).mT)) * self.softmax_scale
897
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
898
+ raise ValueError(
899
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
900
+ f" {attn_weights.size()}"
901
+ )
902
+ assert attention_mask is not None
903
+ if attention_mask is not None:
904
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
905
+ raise ValueError(
906
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
907
+ )
908
+ attn_weights = attn_weights + attention_mask
909
+
910
+ # upcast attention to fp32
911
+ attn_weights = nn.functional.softmax(
912
+ attn_weights, dim=-1, dtype=torch.float32
913
+ ).to(q_pe.dtype)
914
+ attn_weights = nn.functional.dropout(
915
+ attn_weights, p=self.attention_dropout, training=self.training
916
+ )
917
+ attn_output = torch.einsum('bhql,blc->bhqc', attn_weights, compressed_kv)
918
+
919
+ attn_output = torch.matmul(attn_output, out_absorb.mT)
920
+
921
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.v_head_dim):
922
+ raise ValueError(
923
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.v_head_dim)}, but is"
924
+ f" {attn_output.size()}"
925
+ )
926
+
927
+ attn_output = attn_output.transpose(1, 2).contiguous()
928
+
929
+ attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim)
930
+
931
+ attn_output = self.o_proj(attn_output)
932
+
933
+ if not output_attentions:
934
+ attn_weights = None
935
+
936
+ return attn_output, attn_weights, past_key_value
937
+
938
+
939
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 with Llama->DeepseekV2
940
+ class DeepseekV2FlashAttention2(DeepseekV2Attention):
941
+ """
942
+ DeepseekV2 flash attention module. This module inherits from `DeepseekV2Attention` as the weights of the module stays
943
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
944
+ flash attention and deal with padding tokens in case the input contains any of them.
945
+ """
946
+
947
+ def __init__(self, *args, **kwargs):
948
+ super().__init__(*args, **kwargs)
949
+
950
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
951
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
952
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
953
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
954
+
955
+ def forward(
956
+ self,
957
+ hidden_states: torch.Tensor,
958
+ attention_mask: Optional[torch.LongTensor] = None,
959
+ position_ids: Optional[torch.LongTensor] = None,
960
+ past_key_value: Optional[Cache] = None,
961
+ output_attentions: bool = False,
962
+ use_cache: bool = False,
963
+ **kwargs,
964
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
965
+ # DeepseekV2FlashAttention2 attention does not support output_attentions
966
+ if "padding_mask" in kwargs:
967
+ warnings.warn(
968
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
969
+ )
970
+
971
+ # overwrite attention_mask with padding_mask
972
+ attention_mask = kwargs.pop("padding_mask")
973
+
974
+ output_attentions = False
975
+
976
+ bsz, q_len, _ = hidden_states.size()
977
+
978
+ if self.q_lora_rank is None:
979
+ q = self.q_proj(hidden_states)
980
+ else:
981
+ q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
982
+ q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2)
983
+ q_nope, q_pe = torch.split(
984
+ q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
985
+ )
986
+
987
+ # Flash attention requires the input to have the shape
988
+ # batch_size x seq_length x head_dim x hidden_dim
989
+ # therefore we just need to keep the original shape
990
+ compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
991
+ compressed_kv, k_pe = torch.split(
992
+ compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1
993
+ )
994
+ k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2)
995
+ kv = (
996
+ self.kv_b_proj(self.kv_a_layernorm(compressed_kv))
997
+ .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
998
+ .transpose(1, 2)
999
+ )
1000
+
1001
+ k_nope, value_states = torch.split(
1002
+ kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1
1003
+ )
1004
+ kv_seq_len = value_states.shape[-2]
1005
+
1006
+ kv_seq_len = value_states.shape[-2]
1007
+ if past_key_value is not None:
1008
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
1009
+
1010
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
1011
+ q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)
1012
+
1013
+ query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
1014
+ query_states[:, :, :, : self.qk_nope_head_dim] = q_nope
1015
+ query_states[:, :, :, self.qk_nope_head_dim :] = q_pe
1016
+
1017
+ key_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
1018
+ key_states[:, :, :, : self.qk_nope_head_dim] = k_nope
1019
+ key_states[:, :, :, self.qk_nope_head_dim :] = k_pe
1020
+
1021
+ if self.q_head_dim != self.v_head_dim:
1022
+ value_states = F.pad(value_states, [0, self.q_head_dim - self.v_head_dim])
1023
+
1024
+ # TODO: support compressed_kv for kv_cache (instead of key_states, value_states) in flash_attention version
1025
+ if past_key_value is not None:
1026
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
1027
+ key_states, value_states = past_key_value.update(
1028
+ key_states, value_states, self.layer_idx, cache_kwargs
1029
+ )
1030
+
1031
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
1032
+ # to be able to avoid many of these transpose/reshape/view.
1033
+ query_states = query_states.transpose(1, 2)
1034
+ key_states = key_states.transpose(1, 2)
1035
+ value_states = value_states.transpose(1, 2)
1036
+
1037
+ dropout_rate = self.attention_dropout if self.training else 0.0
1038
+
1039
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
1040
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
1041
+ # cast them back in the correct dtype just to be sure everything works as expected.
1042
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
1043
+ # in fp32. (DeepseekV2RMSNorm handles it correctly)
1044
+
1045
+ input_dtype = query_states.dtype
1046
+ if input_dtype == torch.float32:
1047
+ # Handle the case where the model is quantized
1048
+ if hasattr(self.config, "_pre_quantization_dtype"):
1049
+ target_dtype = self.config._pre_quantization_dtype
1050
+ elif torch.is_autocast_enabled():
1051
+ target_dtype = torch.get_autocast_gpu_dtype()
1052
+ else:
1053
+ target_dtype = (
1054
+ self.q_proj.weight.dtype
1055
+ if self.q_lora_rank is None
1056
+ else self.q_a_proj.weight.dtype
1057
+ )
1058
+
1059
+ logger.warning_once(
1060
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
1061
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
1062
+ f" {target_dtype}."
1063
+ )
1064
+
1065
+ query_states = query_states.to(target_dtype)
1066
+ key_states = key_states.to(target_dtype)
1067
+ value_states = value_states.to(target_dtype)
1068
+
1069
+ attn_output = self._flash_attention_forward(
1070
+ query_states,
1071
+ key_states,
1072
+ value_states,
1073
+ attention_mask,
1074
+ q_len,
1075
+ dropout=dropout_rate,
1076
+ softmax_scale=self.softmax_scale,
1077
+ )
1078
+ if self.q_head_dim != self.v_head_dim:
1079
+ attn_output = attn_output[:, :, :, : self.v_head_dim]
1080
+
1081
+ attn_output = attn_output.reshape(
1082
+ bsz, q_len, self.num_heads * self.v_head_dim
1083
+ ).contiguous()
1084
+ attn_output = self.o_proj(attn_output)
1085
+
1086
+ if not output_attentions:
1087
+ attn_weights = None
1088
+
1089
+ return attn_output, attn_weights, past_key_value
1090
+
1091
+ def _flash_attention_forward(
1092
+ self,
1093
+ query_states,
1094
+ key_states,
1095
+ value_states,
1096
+ attention_mask,
1097
+ query_length,
1098
+ dropout=0.0,
1099
+ softmax_scale=None,
1100
+ ):
1101
+ """
1102
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
1103
+ first unpad the input, then computes the attention scores and pad the final attention scores.
1104
+
1105
+ Args:
1106
+ query_states (`torch.Tensor`):
1107
+ Input query states to be passed to Flash Attention API
1108
+ key_states (`torch.Tensor`):
1109
+ Input key states to be passed to Flash Attention API
1110
+ value_states (`torch.Tensor`):
1111
+ Input value states to be passed to Flash Attention API
1112
+ attention_mask (`torch.Tensor`):
1113
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
1114
+ position of padding tokens and 1 for the position of non-padding tokens.
1115
+ dropout (`int`, *optional*):
1116
+ Attention dropout
1117
+ softmax_scale (`float`, *optional*):
1118
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
1119
+ """
1120
+ if not self._flash_attn_uses_top_left_mask:
1121
+ causal = self.is_causal
1122
+ else:
1123
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in DeepseekV2FlashAttention2 __init__.
1124
+ causal = self.is_causal and query_length != 1
1125
+
1126
+ # Contains at least one padding token in the sequence
1127
+ if attention_mask is not None:
1128
+ batch_size = query_states.shape[0]
1129
+ (
1130
+ query_states,
1131
+ key_states,
1132
+ value_states,
1133
+ indices_q,
1134
+ cu_seq_lens,
1135
+ max_seq_lens,
1136
+ ) = self._upad_input(
1137
+ query_states, key_states, value_states, attention_mask, query_length
1138
+ )
1139
+
1140
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
1141
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
1142
+
1143
+ attn_output_unpad = flash_attn_varlen_func(
1144
+ query_states,
1145
+ key_states,
1146
+ value_states,
1147
+ cu_seqlens_q=cu_seqlens_q,
1148
+ cu_seqlens_k=cu_seqlens_k,
1149
+ max_seqlen_q=max_seqlen_in_batch_q,
1150
+ max_seqlen_k=max_seqlen_in_batch_k,
1151
+ dropout_p=dropout,
1152
+ softmax_scale=softmax_scale,
1153
+ causal=causal,
1154
+ )
1155
+
1156
+ attn_output = pad_input(
1157
+ attn_output_unpad, indices_q, batch_size, query_length
1158
+ )
1159
+ else:
1160
+ attn_output = flash_attn_func(
1161
+ query_states,
1162
+ key_states,
1163
+ value_states,
1164
+ dropout,
1165
+ softmax_scale=softmax_scale,
1166
+ causal=causal,
1167
+ )
1168
+
1169
+ return attn_output
1170
+
1171
+ def _upad_input(
1172
+ self, query_layer, key_layer, value_layer, attention_mask, query_length
1173
+ ):
1174
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
1175
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
1176
+
1177
+ key_layer = index_first_axis(
1178
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),
1179
+ indices_k,
1180
+ )
1181
+ value_layer = index_first_axis(
1182
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),
1183
+ indices_k,
1184
+ )
1185
+ if query_length == kv_seq_len:
1186
+ query_layer = index_first_axis(
1187
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim),
1188
+ indices_k,
1189
+ )
1190
+ cu_seqlens_q = cu_seqlens_k
1191
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
1192
+ indices_q = indices_k
1193
+ elif query_length == 1:
1194
+ max_seqlen_in_batch_q = 1
1195
+ cu_seqlens_q = torch.arange(
1196
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
1197
+ ) # There is a memcpy here, that is very bad.
1198
+ indices_q = cu_seqlens_q[:-1]
1199
+ query_layer = query_layer.squeeze(1)
1200
+ else:
1201
+ # The -q_len: slice assumes left padding.
1202
+ attention_mask = attention_mask[:, -query_length:]
1203
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(
1204
+ query_layer, attention_mask
1205
+ )
1206
+
1207
+ return (
1208
+ query_layer,
1209
+ key_layer,
1210
+ value_layer,
1211
+ indices_q,
1212
+ (cu_seqlens_q, cu_seqlens_k),
1213
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
1214
+ )
1215
+
1216
+
1217
+ ATTENTION_CLASSES = {
1218
+ "eager": DeepseekV2Attention,
1219
+ "flash_attention_2": DeepseekV2FlashAttention2,
1220
+
1221
+ "mla_eager": DeepseekV2Attention,
1222
+ "mla_flash_attention_2": DeepseekV2FlashAttention2,
1223
+
1224
+ "mha_eager": LlamaAttention,
1225
+ "mha_flash_attention_2": LlamaFlashAttention2
1226
+ }
1227
+
1228
+
1229
+ class DeepseekV2DecoderLayer(nn.Module):
1230
+ def __init__(self, config: DeepseekV2Config, layer_idx: int):
1231
+ super().__init__()
1232
+ self.hidden_size = config.hidden_size
1233
+
1234
+ if config.use_mla:
1235
+ attn_implementation = "mla_" + config._attn_implementation
1236
+ else:
1237
+ attn_implementation = "mha_" + config._attn_implementation
1238
+
1239
+ self.self_attn = ATTENTION_CLASSES[attn_implementation](
1240
+ config=config, layer_idx=layer_idx
1241
+ )
1242
+
1243
+ self.mlp = (
1244
+ DeepseekV2MoE(config)
1245
+ if (
1246
+ config.n_routed_experts is not None
1247
+ and layer_idx >= config.first_k_dense_replace
1248
+ and layer_idx % config.moe_layer_freq == 0
1249
+ )
1250
+ else DeepseekV2MLP(config)
1251
+ )
1252
+ self.input_layernorm = DeepseekV2RMSNorm(
1253
+ config.hidden_size, eps=config.rms_norm_eps
1254
+ )
1255
+ self.post_attention_layernorm = DeepseekV2RMSNorm(
1256
+ config.hidden_size, eps=config.rms_norm_eps
1257
+ )
1258
+
1259
+ def forward(
1260
+ self,
1261
+ hidden_states: torch.Tensor,
1262
+ attention_mask: Optional[torch.Tensor] = None,
1263
+ position_ids: Optional[torch.LongTensor] = None,
1264
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1265
+ output_attentions: Optional[bool] = False,
1266
+ use_cache: Optional[bool] = False,
1267
+ **kwargs,
1268
+ ) -> Tuple[
1269
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
1270
+ ]:
1271
+ """
1272
+ Args:
1273
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1274
+ attention_mask (`torch.FloatTensor`, *optional*):
1275
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
1276
+ query_sequence_length, key_sequence_length)` if default attention is used.
1277
+ output_attentions (`bool`, *optional*):
1278
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1279
+ returned tensors for more detail.
1280
+ use_cache (`bool`, *optional*):
1281
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1282
+ (see `past_key_values`).
1283
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
1284
+ """
1285
+ if "padding_mask" in kwargs:
1286
+ warnings.warn(
1287
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
1288
+ )
1289
+ residual = hidden_states
1290
+
1291
+ hidden_states = self.input_layernorm(hidden_states)
1292
+
1293
+ # Self Attention
1294
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
1295
+ hidden_states=hidden_states,
1296
+ attention_mask=attention_mask,
1297
+ position_ids=position_ids,
1298
+ past_key_value=past_key_value,
1299
+ output_attentions=output_attentions,
1300
+ use_cache=use_cache,
1301
+ **kwargs,
1302
+ )
1303
+ hidden_states = residual + hidden_states
1304
+
1305
+ # Fully Connected
1306
+ residual = hidden_states
1307
+ hidden_states = self.post_attention_layernorm(hidden_states)
1308
+ hidden_states = self.mlp(hidden_states)
1309
+ hidden_states = residual + hidden_states
1310
+
1311
+ outputs = (hidden_states,)
1312
+
1313
+ if output_attentions:
1314
+ outputs += (self_attn_weights,)
1315
+
1316
+ if use_cache:
1317
+ outputs += (present_key_value,)
1318
+
1319
+ return outputs
1320
+
1321
+
1322
+ DeepseekV2_START_DOCSTRING = r"""
1323
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1324
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1325
+ etc.)
1326
+
1327
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1328
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1329
+ and behavior.
1330
+
1331
+ Parameters:
1332
+ config ([`DeepseekV2Config`]):
1333
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1334
+ load the weights associated with the model, only the configuration. Check out the
1335
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1336
+ """
1337
+
1338
+
1339
+ @add_start_docstrings(
1340
+ "The bare DeepseekV2 Model outputting raw hidden-states without any specific head on top.",
1341
+ DeepseekV2_START_DOCSTRING,
1342
+ )
1343
+ class DeepseekV2PreTrainedModel(PreTrainedModel):
1344
+ config_class = DeepseekV2Config
1345
+ base_model_prefix = "model"
1346
+ supports_gradient_checkpointing = True
1347
+ _no_split_modules = ["DeepseekV2DecoderLayer"]
1348
+ _skip_keys_device_placement = "past_key_values"
1349
+ _supports_flash_attn_2 = True
1350
+ _supports_cache_class = True
1351
+
1352
+ def _init_weights(self, module):
1353
+ std = self.config.initializer_range
1354
+ if isinstance(module, nn.Linear):
1355
+ module.weight.data.normal_(mean=0.0, std=std)
1356
+ if module.bias is not None:
1357
+ module.bias.data.zero_()
1358
+ elif isinstance(module, nn.Embedding):
1359
+ module.weight.data.normal_(mean=0.0, std=std)
1360
+ if module.padding_idx is not None:
1361
+ module.weight.data[module.padding_idx].zero_()
1362
+
1363
+
1364
+ DeepseekV2_INPUTS_DOCSTRING = r"""
1365
+ Args:
1366
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1367
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1368
+ it.
1369
+
1370
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1371
+ [`PreTrainedTokenizer.__call__`] for details.
1372
+
1373
+ [What are input IDs?](../glossary#input-ids)
1374
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1375
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1376
+
1377
+ - 1 for tokens that are **not masked**,
1378
+ - 0 for tokens that are **masked**.
1379
+
1380
+ [What are attention masks?](../glossary#attention-mask)
1381
+
1382
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1383
+ [`PreTrainedTokenizer.__call__`] for details.
1384
+
1385
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1386
+ `past_key_values`).
1387
+
1388
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1389
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1390
+ information on the default strategy.
1391
+
1392
+ - 1 indicates the head is **not masked**,
1393
+ - 0 indicates the head is **masked**.
1394
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1395
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1396
+ config.n_positions - 1]`.
1397
+
1398
+ [What are position IDs?](../glossary#position-ids)
1399
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1400
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1401
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1402
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1403
+
1404
+ Two formats are allowed:
1405
+ - a [`~cache_utils.Cache`] instance;
1406
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1407
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1408
+ cache format.
1409
+
1410
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1411
+ legacy cache format will be returned.
1412
+
1413
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1414
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1415
+ of shape `(batch_size, sequence_length)`.
1416
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1417
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1418
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1419
+ model's internal embedding lookup matrix.
1420
+ use_cache (`bool`, *optional*):
1421
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1422
+ `past_key_values`).
1423
+ output_attentions (`bool`, *optional*):
1424
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1425
+ tensors for more detail.
1426
+ output_hidden_states (`bool`, *optional*):
1427
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1428
+ more detail.
1429
+ return_dict (`bool`, *optional*):
1430
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1431
+ """
1432
+
1433
+
1434
+ @add_start_docstrings(
1435
+ "The bare DeepseekV2 Model outputting raw hidden-states without any specific head on top.",
1436
+ DeepseekV2_START_DOCSTRING,
1437
+ )
1438
+ class DeepseekV2Model(DeepseekV2PreTrainedModel):
1439
+ """
1440
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DeepseekV2DecoderLayer`]
1441
+
1442
+ Args:
1443
+ config: DeepseekV2Config
1444
+ """
1445
+
1446
+ def __init__(self, config: DeepseekV2Config):
1447
+ super().__init__(config)
1448
+ self.padding_idx = config.pad_token_id
1449
+ self.vocab_size = config.vocab_size
1450
+
1451
+ self.embed_tokens = nn.Embedding(
1452
+ config.vocab_size, config.hidden_size, self.padding_idx
1453
+ )
1454
+ self.layers = nn.ModuleList(
1455
+ [
1456
+ DeepseekV2DecoderLayer(config, layer_idx)
1457
+ for layer_idx in range(config.num_hidden_layers)
1458
+ ]
1459
+ )
1460
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1461
+ self.norm = DeepseekV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1462
+
1463
+ self.gradient_checkpointing = False
1464
+ # Initialize weights and apply final processing
1465
+ self.post_init()
1466
+
1467
+ def get_input_embeddings(self):
1468
+ return self.embed_tokens
1469
+
1470
+ def set_input_embeddings(self, value):
1471
+ self.embed_tokens = value
1472
+
1473
+ @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING)
1474
+ def forward(
1475
+ self,
1476
+ input_ids: torch.LongTensor = None,
1477
+ attention_mask: Optional[torch.Tensor] = None,
1478
+ position_ids: Optional[torch.LongTensor] = None,
1479
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1480
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1481
+ use_cache: Optional[bool] = None,
1482
+ output_attentions: Optional[bool] = None,
1483
+ output_hidden_states: Optional[bool] = None,
1484
+ return_dict: Optional[bool] = None,
1485
+ cache_position: Optional[torch.LongTensor] = None
1486
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1487
+ output_attentions = (
1488
+ output_attentions
1489
+ if output_attentions is not None
1490
+ else self.config.output_attentions
1491
+ )
1492
+ output_hidden_states = (
1493
+ output_hidden_states
1494
+ if output_hidden_states is not None
1495
+ else self.config.output_hidden_states
1496
+ )
1497
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1498
+
1499
+ return_dict = (
1500
+ return_dict if return_dict is not None else self.config.use_return_dict
1501
+ )
1502
+
1503
+ # retrieve input_ids and inputs_embeds
1504
+ if input_ids is not None and inputs_embeds is not None:
1505
+ raise ValueError(
1506
+ "You cannot specify both input_ids and inputs_embeds at the same time"
1507
+ )
1508
+ elif input_ids is not None:
1509
+ batch_size, seq_length = input_ids.shape[:2]
1510
+ elif inputs_embeds is not None:
1511
+ batch_size, seq_length = inputs_embeds.shape[:2]
1512
+ else:
1513
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1514
+
1515
+ if self.gradient_checkpointing and self.training:
1516
+ if use_cache:
1517
+ logger.warning_once(
1518
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers."
1519
+ )
1520
+ use_cache = False
1521
+
1522
+ past_key_values_length = 0
1523
+ if use_cache:
1524
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1525
+ if use_legacy_cache:
1526
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1527
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1528
+
1529
+ if position_ids is None:
1530
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1531
+ position_ids = torch.arange(
1532
+ past_key_values_length,
1533
+ seq_length + past_key_values_length,
1534
+ dtype=torch.long,
1535
+ device=device,
1536
+ )
1537
+ position_ids = position_ids.unsqueeze(0)
1538
+
1539
+ if inputs_embeds is None:
1540
+ inputs_embeds = self.embed_tokens(input_ids)
1541
+
1542
+ if self._use_flash_attention_2:
1543
+ # 2d mask is passed through the layers
1544
+ attention_mask = (
1545
+ attention_mask
1546
+ if (attention_mask is not None and 0 in attention_mask)
1547
+ else None
1548
+ )
1549
+ else:
1550
+ # 4d mask is passed through the layers
1551
+ attention_mask = _prepare_4d_causal_attention_mask(
1552
+ attention_mask,
1553
+ (batch_size, seq_length),
1554
+ inputs_embeds,
1555
+ past_key_values_length,
1556
+ )
1557
+
1558
+ # embed positions
1559
+ hidden_states = inputs_embeds
1560
+
1561
+ # decoder layers
1562
+ all_hidden_states = () if output_hidden_states else None
1563
+ all_self_attns = () if output_attentions else None
1564
+ next_decoder_cache = None
1565
+
1566
+ for decoder_layer in self.layers:
1567
+ if output_hidden_states:
1568
+ all_hidden_states += (hidden_states,)
1569
+
1570
+ if self.gradient_checkpointing and self.training:
1571
+ layer_outputs = self._gradient_checkpointing_func(
1572
+ decoder_layer.__call__,
1573
+ hidden_states,
1574
+ attention_mask,
1575
+ position_ids,
1576
+ past_key_values,
1577
+ output_attentions,
1578
+ use_cache,
1579
+ )
1580
+ else:
1581
+ layer_outputs = decoder_layer(
1582
+ hidden_states,
1583
+ attention_mask=attention_mask,
1584
+ position_ids=position_ids,
1585
+ past_key_value=past_key_values,
1586
+ output_attentions=output_attentions,
1587
+ use_cache=use_cache,
1588
+ )
1589
+
1590
+ hidden_states = layer_outputs[0]
1591
+
1592
+ if use_cache:
1593
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1594
+
1595
+ if output_attentions:
1596
+ all_self_attns += (layer_outputs[1],)
1597
+
1598
+ hidden_states = self.norm(hidden_states)
1599
+
1600
+ # add hidden states from the last decoder layer
1601
+ if output_hidden_states:
1602
+ all_hidden_states += (hidden_states,)
1603
+
1604
+ next_cache = None
1605
+ if use_cache:
1606
+ next_cache = (
1607
+ next_decoder_cache.to_legacy_cache()
1608
+ if use_legacy_cache
1609
+ else next_decoder_cache
1610
+ )
1611
+ if not return_dict:
1612
+ return tuple(
1613
+ v
1614
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
1615
+ if v is not None
1616
+ )
1617
+ return BaseModelOutputWithPast(
1618
+ last_hidden_state=hidden_states,
1619
+ past_key_values=next_cache,
1620
+ hidden_states=all_hidden_states,
1621
+ attentions=all_self_attns,
1622
+ )
1623
+
1624
+
1625
+ class DeepseekV2ForCausalLM(DeepseekV2PreTrainedModel):
1626
+ _tied_weights_keys = ["lm_head.weight"]
1627
+
1628
+ def __init__(self, config):
1629
+ super().__init__(config)
1630
+ self.model = DeepseekV2Model(config)
1631
+ self.vocab_size = config.vocab_size
1632
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1633
+
1634
+ # Initialize weights and apply final processing
1635
+ self.post_init()
1636
+
1637
+ def get_input_embeddings(self):
1638
+ return self.model.embed_tokens
1639
+
1640
+ def set_input_embeddings(self, value):
1641
+ self.model.embed_tokens = value
1642
+
1643
+ def get_output_embeddings(self):
1644
+ return self.lm_head
1645
+
1646
+ def set_output_embeddings(self, new_embeddings):
1647
+ self.lm_head = new_embeddings
1648
+
1649
+ def set_decoder(self, decoder):
1650
+ self.model = decoder
1651
+
1652
+ def get_decoder(self):
1653
+ return self.model
1654
+
1655
+ @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING)
1656
+ @replace_return_docstrings(
1657
+ output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
1658
+ )
1659
+ def forward(
1660
+ self,
1661
+ input_ids: torch.LongTensor = None,
1662
+ attention_mask: Optional[torch.Tensor] = None,
1663
+ position_ids: Optional[torch.LongTensor] = None,
1664
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1665
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1666
+ labels: Optional[torch.LongTensor] = None,
1667
+ use_cache: Optional[bool] = None,
1668
+ output_attentions: Optional[bool] = None,
1669
+ output_hidden_states: Optional[bool] = None,
1670
+ return_dict: Optional[bool] = None,
1671
+ cache_position: Optional[torch.LongTensor] = None
1672
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1673
+ r"""
1674
+ Args:
1675
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1676
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers.,
1677
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1678
+ (masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`.
1679
+
1680
+ Returns:
1681
+
1682
+ Example:
1683
+
1684
+ ```python
1685
+ >>> from transformers import AutoTokenizer, DeepseekV2ForCausalLM
1686
+
1687
+ >>> model = DeepseekV2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1688
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1689
+
1690
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1691
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1692
+
1693
+ >>> # Generate
1694
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1695
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1696
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1697
+ ```"""
1698
+ output_attentions = (
1699
+ output_attentions
1700
+ if output_attentions is not None
1701
+ else self.config.output_attentions
1702
+ )
1703
+ output_hidden_states = (
1704
+ output_hidden_states
1705
+ if output_hidden_states is not None
1706
+ else self.config.output_hidden_states
1707
+ )
1708
+ return_dict = (
1709
+ return_dict if return_dict is not None else self.config.use_return_dict
1710
+ )
1711
+
1712
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1713
+ outputs = self.model(
1714
+ input_ids=input_ids,
1715
+ attention_mask=attention_mask,
1716
+ position_ids=position_ids,
1717
+ past_key_values=past_key_values,
1718
+ inputs_embeds=inputs_embeds,
1719
+ use_cache=use_cache,
1720
+ output_attentions=output_attentions,
1721
+ output_hidden_states=output_hidden_states,
1722
+ return_dict=return_dict,
1723
+ cache_position=cache_position
1724
+ )
1725
+
1726
+ hidden_states = outputs[0]
1727
+ logits = self.lm_head(hidden_states)
1728
+ logits = logits.float()
1729
+
1730
+ loss = None
1731
+ if labels is not None:
1732
+ # Shift so that tokens < n predict n
1733
+ shift_logits = logits[..., :-1, :].contiguous()
1734
+ shift_labels = labels[..., 1:].contiguous()
1735
+ # Flatten the tokens
1736
+ loss_fct = CrossEntropyLoss()
1737
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1738
+ shift_labels = shift_labels.view(-1)
1739
+ # Enable model parallelism
1740
+ shift_labels = shift_labels.to(shift_logits.device)
1741
+ loss = loss_fct(shift_logits, shift_labels)
1742
+
1743
+ if not return_dict:
1744
+ output = (logits,) + outputs[1:]
1745
+ return (loss,) + output if loss is not None else output
1746
+
1747
+ return CausalLMOutputWithPast(
1748
+ loss=loss,
1749
+ logits=logits,
1750
+ past_key_values=outputs.past_key_values,
1751
+ hidden_states=outputs.hidden_states,
1752
+ attentions=outputs.attentions,
1753
+ )
1754
+
1755
+ def prepare_inputs_for_generation(
1756
+ self,
1757
+ input_ids,
1758
+ past_key_values=None,
1759
+ attention_mask=None,
1760
+ inputs_embeds=None,
1761
+ **kwargs,
1762
+ ):
1763
+ past_length = 0
1764
+ if past_key_values is not None:
1765
+ if isinstance(past_key_values, Cache):
1766
+ cache_length = past_key_values.get_seq_length()
1767
+ past_length = past_key_values.seen_tokens
1768
+ max_cache_length = past_key_values.get_max_length()
1769
+ else:
1770
+ cache_length = past_length = past_key_values[0][0].shape[2]
1771
+ max_cache_length = None
1772
+
1773
+ # Keep only the unprocessed tokens:
1774
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1775
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1776
+ # input)
1777
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1778
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length):]
1779
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1780
+ # input_ids based on the past_length.
1781
+ elif past_length < input_ids.shape[1]:
1782
+ input_ids = input_ids[:, past_length:]
1783
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1784
+
1785
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1786
+ if (
1787
+ max_cache_length is not None
1788
+ and attention_mask is not None
1789
+ and cache_length + input_ids.shape[1] > max_cache_length
1790
+ ):
1791
+ attention_mask = attention_mask[:, -max_cache_length:]
1792
+
1793
+ position_ids = kwargs.get("position_ids", None)
1794
+ if attention_mask is not None and position_ids is None:
1795
+ # create position_ids on the fly for batch generation
1796
+ position_ids = attention_mask.long().cumsum(-1) - 1
1797
+ position_ids.masked_fill_(attention_mask == 0, 1)
1798
+ if past_key_values:
1799
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1800
+
1801
+ if self.generation_config.cache_implementation == "static":
1802
+ # generation with static cache
1803
+ cache_position = kwargs.get("cache_position", None)
1804
+ if cache_position is None:
1805
+ past_length = 0
1806
+ else:
1807
+ past_length = cache_position[-1] + 1
1808
+ input_ids = input_ids[:, past_length:]
1809
+ position_ids = position_ids[:, past_length:]
1810
+
1811
+ # TODO @gante we should only keep a `cache_position` in generate, and do +=1.
1812
+ # same goes for position ids. Could also help with continued generation.
1813
+ cache_position = torch.arange(past_length, past_length + position_ids.shape[-1], device=position_ids.device)
1814
+
1815
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1816
+ if inputs_embeds is not None and (past_key_values is None or len(past_key_values)==0): # support newer transformer version
1817
+ model_inputs = {"inputs_embeds": inputs_embeds}
1818
+ else:
1819
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1820
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
1821
+ # TODO: use `next_tokens` directly instead.
1822
+ model_inputs = {"input_ids": input_ids.contiguous()}
1823
+
1824
+ model_inputs.update(
1825
+ {
1826
+ "position_ids": position_ids.contiguous(),
1827
+ "cache_position": cache_position,
1828
+ "past_key_values": past_key_values,
1829
+ "use_cache": kwargs.get("use_cache"),
1830
+ "attention_mask": attention_mask,
1831
+ }
1832
+ )
1833
+ return model_inputs
1834
+
1835
+ @staticmethod
1836
+ def _reorder_cache(past_key_values, beam_idx):
1837
+ reordered_past = ()
1838
+ for layer_past in past_key_values:
1839
+ reordered_past += (
1840
+ tuple(
1841
+ past_state.index_select(0, beam_idx.to(past_state.device))
1842
+ for past_state in layer_past
1843
+ ),
1844
+ )
1845
+ return reordered_past
1846
+
1847
+
1848
+ @add_start_docstrings(
1849
+ """
1850
+ The DeepseekV2 Model transformer with a sequence classification head on top (linear layer).
1851
+
1852
+ [`DeepseekV2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1853
+ (e.g. GPT-2) do.
1854
+
1855
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1856
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1857
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1858
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1859
+ each row of the batch).
1860
+ """,
1861
+ DeepseekV2_START_DOCSTRING,
1862
+ )
1863
+ class DeepseekV2ForSequenceClassification(DeepseekV2PreTrainedModel):
1864
+ def __init__(self, config):
1865
+ super().__init__(config)
1866
+ self.num_labels = config.num_labels
1867
+ self.model = DeepseekV2Model(config)
1868
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1869
+
1870
+ # Initialize weights and apply final processing
1871
+ self.post_init()
1872
+
1873
+ def get_input_embeddings(self):
1874
+ return self.model.embed_tokens
1875
+
1876
+ def set_input_embeddings(self, value):
1877
+ self.model.embed_tokens = value
1878
+
1879
+ @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING)
1880
+ def forward(
1881
+ self,
1882
+ input_ids: torch.LongTensor = None,
1883
+ attention_mask: Optional[torch.Tensor] = None,
1884
+ position_ids: Optional[torch.LongTensor] = None,
1885
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1886
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1887
+ labels: Optional[torch.LongTensor] = None,
1888
+ use_cache: Optional[bool] = None,
1889
+ output_attentions: Optional[bool] = None,
1890
+ output_hidden_states: Optional[bool] = None,
1891
+ return_dict: Optional[bool] = None,
1892
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1893
+ r"""
1894
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1895
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, transformers.,
1896
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1897
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1898
+ """
1899
+ return_dict = (
1900
+ return_dict if return_dict is not None else self.config.use_return_dict
1901
+ )
1902
+
1903
+ transformer_outputs = self.model(
1904
+ input_ids,
1905
+ attention_mask=attention_mask,
1906
+ position_ids=position_ids,
1907
+ past_key_values=past_key_values,
1908
+ inputs_embeds=inputs_embeds,
1909
+ use_cache=use_cache,
1910
+ output_attentions=output_attentions,
1911
+ output_hidden_states=output_hidden_states,
1912
+ return_dict=return_dict,
1913
+ )
1914
+ hidden_states = transformer_outputs[0]
1915
+ logits = self.score(hidden_states)
1916
+
1917
+ if input_ids is not None:
1918
+ batch_size = input_ids.shape[0]
1919
+ else:
1920
+ batch_size = inputs_embeds.shape[0]
1921
+
1922
+ if self.config.pad_token_id is None and batch_size != 1:
1923
+ raise ValueError(
1924
+ "Cannot handle batch sizes > 1 if no padding token is defined."
1925
+ )
1926
+ if self.config.pad_token_id is None:
1927
+ sequence_lengths = -1
1928
+ else:
1929
+ if input_ids is not None:
1930
+ sequence_lengths = (
1931
+ torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1932
+ ).to(logits.device)
1933
+ else:
1934
+ sequence_lengths = -1
1935
+
1936
+ pooled_logits = logits[
1937
+ torch.arange(batch_size, device=logits.device), sequence_lengths
1938
+ ]
1939
+
1940
+ loss = None
1941
+ if labels is not None:
1942
+ labels = labels.to(logits.device)
1943
+ if self.config.problem_type is None:
1944
+ if self.num_labels == 1:
1945
+ self.config.problem_type = "regression"
1946
+ elif self.num_labels > 1 and (
1947
+ labels.dtype == torch.long or labels.dtype == torch.int
1948
+ ):
1949
+ self.config.problem_type = "single_label_classification"
1950
+ else:
1951
+ self.config.problem_type = "multi_label_classification"
1952
+
1953
+ if self.config.problem_type == "regression":
1954
+ loss_fct = MSELoss()
1955
+ if self.num_labels == 1:
1956
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1957
+ else:
1958
+ loss = loss_fct(pooled_logits, labels)
1959
+ elif self.config.problem_type == "single_label_classification":
1960
+ loss_fct = CrossEntropyLoss()
1961
+ loss = loss_fct(
1962
+ pooled_logits.view(-1, self.num_labels), labels.view(-1)
1963
+ )
1964
+ elif self.config.problem_type == "multi_label_classification":
1965
+ loss_fct = BCEWithLogitsLoss()
1966
+ loss = loss_fct(pooled_logits, labels)
1967
+ if not return_dict:
1968
+ output = (pooled_logits,) + transformer_outputs[1:]
1969
+ return ((loss,) + output) if loss is not None else output
1970
+
1971
+ return SequenceClassifierOutputWithPast(
1972
+ loss=loss,
1973
+ logits=pooled_logits,
1974
+ past_key_values=transformer_outputs.past_key_values,
1975
+ hidden_states=transformer_outputs.hidden_states,
1976
+ attentions=transformer_outputs.attentions,
1977
+ )
modeling_deepseek_vl_v2.py ADDED
@@ -0,0 +1,691 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import logging
3
+ import gc
4
+
5
+ from einops import rearrange, repeat
6
+ from typing import Optional, List, Tuple, Callable, Union
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+
12
+ from transformers.utils import (
13
+ add_start_docstrings,
14
+ add_start_docstrings_to_model_forward,
15
+ )
16
+ from transformers.modeling_outputs import ModelOutput
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers import (
19
+ AutoConfig,
20
+ AutoModelForCausalLM,
21
+ PreTrainedModel
22
+ )
23
+ from transformers.utils import logging
24
+
25
+ # from .siglip_vit import VisionTransformer # we do not use vit in this file
26
+ from .configuration_deepseek import DeepseekV2Config
27
+ from .modeling_deepseek import DeepseekV2ForCausalLM
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class MlpProjector(nn.Module):
34
+
35
+ def __init__(self, cfg):
36
+
37
+ super().__init__()
38
+
39
+ self.cfg = cfg
40
+
41
+ if cfg.projector_type == "identity":
42
+ modules = nn.Identity()
43
+
44
+ elif cfg.projector_type == "linear":
45
+ modules = nn.Linear(cfg.input_dim, cfg.n_embed)
46
+
47
+ elif cfg.projector_type == "mlp_gelu":
48
+ mlp_depth = cfg.depth
49
+ modules = [nn.Linear(cfg.input_dim, cfg.n_embed)]
50
+ for _ in range(1, mlp_depth):
51
+ modules.append(nn.GELU())
52
+ modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
53
+ modules = nn.Sequential(*modules)
54
+
55
+ elif cfg.projector_type == "downsample_mlp_gelu":
56
+ mlp_depth = cfg.depth
57
+ mlp_ratio = cfg.mlp_ratio
58
+ modules = [nn.Linear(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio, cfg.n_embed * mlp_ratio)]
59
+ for _ in range(1, mlp_depth - 1):
60
+ modules.append(nn.GELU())
61
+ modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed * mlp_ratio))
62
+ modules.append(nn.GELU())
63
+ modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed))
64
+ modules = nn.Sequential(*modules)
65
+
66
+ else:
67
+ raise ValueError(f"Unknown projector type: {cfg.projector_type}")
68
+
69
+ if cfg.token_pooling:
70
+ self.token_pooling_layer = nn.Linear(cfg.input_dim * 4, cfg.input_dim)
71
+
72
+ self.layers = modules
73
+
74
+ def forward(self, x):
75
+ if self.cfg.token_pooling:
76
+ batch_size, wxh, channels = x.shape
77
+ w = h = int(wxh ** 0.5)
78
+ x = x.view(batch_size, w, h, channels)
79
+ x = x.permute(0, 3, 1, 2)
80
+ # import ipdb; ipdb.set_trace()
81
+ patches = x.unfold(2, 2, 2).unfold(3, 2, 2)
82
+ batch_size, channels, h_patches, w_patches, _, _ = patches.size()
83
+ # 在通道维度上拼接
84
+ patches = patches.contiguous().view(batch_size, channels, h_patches * w_patches, -1)
85
+
86
+ # 通过线性层
87
+ patches = patches.permute(0, 2, 1, 3).contiguous()
88
+ patches = patches.view(batch_size, h_patches * w_patches, channels * 4)
89
+
90
+ x = self.token_pooling_layer(patches)
91
+
92
+ elif self.cfg.projector_type == 'downsample_mlp_gelu':
93
+ bs, hw, input_dim = x.shape
94
+ h = w = int((hw) ** 0.5)
95
+
96
+ """compute padding"""
97
+ if h % self.cfg.downsample_ratio:
98
+ pad = self.cfg.downsample_ratio - h % self.cfg.downsample_ratio
99
+ else:
100
+ pad = 0
101
+ x = x.reshape(bs, h, w, input_dim)
102
+ if pad > 0:
103
+ x = F.pad(x, (0, 0, 0, pad, 0, pad), "constant", 0)
104
+
105
+ """4 to 1 concat"""
106
+ x = x.permute(0, 3, 1, 2) # B, C, H, W
107
+ x = F.unfold(x, kernel_size=self.cfg.downsample_ratio, stride=self.cfg.downsample_ratio,
108
+ padding=0) # B, C*4, HW // 4
109
+ x = x.permute(0, 2, 1)
110
+
111
+ return self.layers(x)
112
+
113
+
114
+ class VisionEncoderConfig(PretrainedConfig):
115
+ model_type: str = "vision"
116
+
117
+ model_name: str = "siglip_large_patch16_384"
118
+ image_size: int = 384
119
+ patch_size: int = 16
120
+ width: int = 1024
121
+ layers: int = 24
122
+ heads: int = 16
123
+ mlp_ratio: int = 4
124
+ global_pool: str = "map"
125
+ ignore_head: bool = True
126
+ class_token: bool = False
127
+ num_classes: int = 0
128
+ use_checkpoint: bool = False
129
+ weight_init: str = "skip"
130
+ deterministic: bool = False
131
+ num_recomputing_layers: int = 0
132
+
133
+ def __init__(
134
+ self,
135
+ model_name: str = "siglip_large_patch16_384",
136
+ image_size: int = 384,
137
+ patch_size: int = 16,
138
+ width: int = 1024,
139
+ layers: int = 24,
140
+ heads: int = 16,
141
+ mlp_ratio: int = 4,
142
+ global_pool: str = "map",
143
+ ignore_head: bool = True,
144
+ class_token: bool = False,
145
+ num_classes: int = 0,
146
+ use_checkpoint: bool = False,
147
+ **kwargs
148
+ ):
149
+ self.model_name = model_name
150
+ self.image_size = image_size
151
+ self.patch_size = patch_size
152
+ self.width = width
153
+ self.layers = layers
154
+ self.heads = heads
155
+ self.mlp_ratio = mlp_ratio
156
+ self.global_pool = global_pool
157
+ self.ignore_head = ignore_head
158
+ self.class_token = class_token
159
+ self.num_classes = num_classes
160
+ self.use_checkpoint = use_checkpoint
161
+
162
+ super().__init__(**kwargs)
163
+
164
+
165
+ class MlpProjectorConfig(PretrainedConfig):
166
+ model_type = "mlp_projector"
167
+ projector_type: str = "downsample_mlp_gelu"
168
+ input_dim: int = 1152
169
+ n_embed: int = 2048
170
+ depth: int = 2
171
+ mlp_ratio: int = 1
172
+ downsample_ratio: int = 2
173
+ token_pooling: bool = False
174
+
175
+ def __init__(
176
+ self,
177
+ projector_type: str = "downsample_mlp_gelu",
178
+ input_dim: int = 1152,
179
+ n_embed: int = 2048,
180
+ depth: int = 2,
181
+ mlp_ratio: int = 1,
182
+ downsample_ratio: int = 2,
183
+ **kwargs
184
+ ):
185
+ self.projector_type = projector_type
186
+ self.input_dim = input_dim
187
+ self.n_embed = n_embed
188
+ self.depth = depth
189
+ self.mlp_ratio = mlp_ratio
190
+ self.downsample_ratio = downsample_ratio
191
+
192
+ super().__init__(**kwargs)
193
+
194
+
195
+ @dataclass
196
+ class DeepSeekVLV2CausalLMOutputWithPast(ModelOutput):
197
+ """
198
+ Base class for DeepSeek-VL2 causal language model (or autoregressive) outputs.
199
+
200
+ Args:
201
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
202
+ Language modeling loss (for next-token prediction).
203
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
204
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
205
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
206
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
207
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
208
+
209
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
210
+ `past_key_values` input) to speed up sequential decoding.
211
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
212
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
213
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
214
+
215
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
216
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
217
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
218
+ sequence_length)`.
219
+
220
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
221
+ heads.
222
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
223
+ The rope index difference between sequence length and multimodal rope.
224
+ """
225
+
226
+ loss: Optional[torch.FloatTensor] = None
227
+ logits: torch.FloatTensor = None
228
+ past_key_values: Optional[List[torch.FloatTensor]] = None
229
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
230
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
231
+ rope_deltas: Optional[torch.LongTensor] = None
232
+
233
+
234
+ class DeepseekVLV2Config(PretrainedConfig):
235
+ model_type = "deepseek_vl_v2"
236
+ vision_config: VisionEncoderConfig
237
+ projector_config: MlpProjectorConfig
238
+ language_config: DeepseekV2Config
239
+
240
+ tile_tag: str = "2D"
241
+ global_view_pos: str = "head"
242
+ candidate_resolutions: Tuple[Tuple[int, int]] = ((384, 384),)
243
+
244
+ def __init__(
245
+ self,
246
+ tile_tag: str = "tile_tag",
247
+ global_view_pos: str = "head",
248
+ candidate_resolutions: Tuple[Tuple[int, int]] = ((384, 384),),
249
+ **kwargs
250
+ ):
251
+ super().__init__(**kwargs)
252
+
253
+ vision_config = kwargs.get("vision_config", {})
254
+ self.vision_config = VisionEncoderConfig(**vision_config)
255
+
256
+ projector_config = kwargs.get("projector_config", {})
257
+ self.projector_config = MlpProjectorConfig(**projector_config)
258
+
259
+ language_config = kwargs.get("language_config", {})
260
+ if isinstance(language_config, DeepseekV2Config):
261
+ self.language_config = language_config
262
+ else:
263
+ self.language_config = DeepseekV2Config(**language_config)
264
+
265
+ self.tile_tag = tile_tag
266
+ self.global_view_pos = global_view_pos
267
+ self.candidate_resolutions = candidate_resolutions
268
+
269
+
270
+ class DeepseekVLV2PreTrainedModel(PreTrainedModel):
271
+ config_class = DeepseekVLV2Config
272
+ base_model_prefix = "deepseek_vl_v2"
273
+ _no_split_modules = []
274
+ _skip_keys_device_placement = "past_key_values"
275
+
276
+
277
+ class DeepseekVLV2ForCausalLM(DeepseekVLV2PreTrainedModel):
278
+
279
+ def __init__(self, config: DeepseekVLV2Config):
280
+ super().__init__(config)
281
+
282
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
283
+
284
+ # ----------- vision encoder ------------
285
+ vision_config = config.vision_config
286
+ self.vision = None
287
+ # self.vision = VisionTransformer(
288
+ # img_size=vision_config.image_size,
289
+ # patch_size=vision_config.patch_size,
290
+ # embed_dim=vision_config.width,
291
+ # depth=vision_config.layers,
292
+ # num_heads=vision_config.heads,
293
+ # mlp_ratio=vision_config.mlp_ratio,
294
+ # class_token=vision_config.class_token,
295
+ # global_pool=vision_config.global_pool,
296
+ # ignore_head=vision_config.ignore_head,
297
+ # weight_init=vision_config.weight_init,
298
+ # num_classes=0,
299
+ # deterministic=vision_config.deterministic,
300
+ # num_recomputing_layers=vision_config.num_recomputing_layers
301
+ # )
302
+
303
+ # ----------- vl projector ------------
304
+ projector_config = config.projector_config
305
+ self.projector = MlpProjector(projector_config)
306
+
307
+ # image token format 形式
308
+ # FIXME 目前tile tag & global_view_pos的默认取值都是之前的实验策略;后续应当去掉默认取值,改为没有取值就raise error
309
+ self.tile_tag = config.tile_tag
310
+ self.global_view_pos = config.global_view_pos
311
+
312
+ # 用于format image token sequence的特殊token
313
+ embed_std = 1 / torch.sqrt(torch.tensor(projector_config.n_embed, dtype=torch.float32))
314
+ if self.tile_tag == "2D":
315
+ # <|view_separator|>, <|\n|>
316
+ self.image_newline = nn.Parameter(torch.randn(projector_config.n_embed) * embed_std)
317
+ # fix the typo: view_seperater
318
+ self.view_seperator = nn.Parameter(torch.randn(projector_config.n_embed) * embed_std)
319
+ elif self.tile_tag == "1D":
320
+ # <|tile_x|>, <|tile_global|>
321
+ candidate_resolutions = config.candidate_resolutions
322
+ if len(candidate_resolutions) == 0:
323
+ raise ValueError(
324
+ f"len(candidate_resolutions) should be larger than 0, but got {len(candidate_resolutions)}")
325
+ tile_variants_num = len(candidate_resolutions)
326
+ self.tile_indicators = nn.Parameter(
327
+ torch.randn(size=(tile_variants_num + 1, config.aligner.params.n_embed)) * embed_std
328
+ )
329
+ else:
330
+ raise ValueError(f"tile tag should be either 1D or 2D, but got {self.tile_tag}")
331
+
332
+ # ----------- language model ------------
333
+ language_config = config.language_config
334
+ self.language = DeepseekV2ForCausalLM(language_config)
335
+
336
+ def prepare_inputs_embeds(
337
+ self,
338
+ input_ids: torch.LongTensor,
339
+ images: Optional[torch.FloatTensor] = None,
340
+ images_seq_mask: Optional[torch.LongTensor] = None,
341
+ images_spatial_crop: Optional[torch.LongTensor] = None,
342
+ **ignore_kwargs
343
+ ):
344
+ """
345
+
346
+ Args:
347
+ input_ids (torch.LongTensor): [b, T]
348
+ images (torch.FloatTensor): [b, max_n_images, 3, height, width]
349
+ images_seq_mask (torch.BoolTensor): [b, T]
350
+ images_spatial_crop (torch.LongTensor): [b, max_n_images, 2]
351
+
352
+ Returns:
353
+ input_embeds (torch.Tensor): [b, T, D]
354
+ """
355
+
356
+ if images is None or images_spatial_crop.sum() == 0:
357
+ return self.language.get_input_embeddings()(input_ids)
358
+
359
+ bs, max_n_images, _ = images_spatial_crop.shape
360
+ batch_num_tiles = [0 for _ in range(bs)]
361
+ total_tiles = []
362
+ for idx in range(bs):
363
+ for jdx in range(max_n_images):
364
+ num_width_tiles, num_height_tiles = images_spatial_crop[idx, jdx]
365
+ if num_width_tiles == 0 or num_height_tiles == 0:
366
+ break
367
+ batch_num_tiles[idx] += (1 + num_width_tiles * num_height_tiles)
368
+
369
+ total_tiles.append(images[idx, :batch_num_tiles[idx]])
370
+
371
+ # [batch_all_tiles, 3, height, width]
372
+ total_tiles = torch.cat(total_tiles, dim=0)
373
+ assert total_tiles.shape[0] == sum(batch_num_tiles)
374
+ if total_tiles.shape[0] == 0:
375
+ return self.language.get_input_embeddings()(input_ids)
376
+
377
+ # [batch_all_tiles, vit_seq_len, c]
378
+ images_feature = self.vision(total_tiles)
379
+
380
+ # [batch_all_tiles, hw, D]
381
+ images_embeds = self.projector(images_feature)
382
+ _, hw, n_dim = images_embeds.shape
383
+ h = w = int(hw ** 0.5)
384
+
385
+ # put image tokens into the input_embeds, [b, T, D]
386
+ input_embeds = self.language.get_input_embeddings()(input_ids)
387
+
388
+ # 根据self.tile_tag & self.global_view_pos填充image token sequence
389
+ tile_index = 0
390
+ for idx in range(images_spatial_crop.shape[0]):
391
+ images_in_this_batch = []
392
+ for jdx in range(images_spatial_crop.shape[1]):
393
+
394
+ # extra global & local features
395
+ num_width_tiles, num_height_tiles = images_spatial_crop[idx, jdx]
396
+ if num_width_tiles == 0 or num_height_tiles == 0:
397
+ break
398
+
399
+ num_tiles_in_image = num_width_tiles * num_height_tiles
400
+
401
+ # [hw, D]
402
+ global_features = images_embeds[tile_index]
403
+
404
+ # [num_height_tiles * num_width_tiles, hw, D]
405
+ local_features = images_embeds[tile_index + 1: tile_index + 1 + num_tiles_in_image]
406
+
407
+ tile_index += num_tiles_in_image + 1
408
+
409
+ # format global and local features
410
+ if self.tile_tag == "2D":
411
+
412
+ # ----------------- global view add newline -----------------
413
+ # [hw, D] -> [h, w, D]
414
+ global_features = global_features.view(h, w, n_dim)
415
+ # [D] -> [h, 1, D]
416
+ new_lines_in_global = repeat(self.image_newline, "d -> h 1 d", h=h)
417
+ # cat([h, w, D], [h, 1, D], dim=1) -> [h, w + 1, D]
418
+ global_features = torch.cat([global_features, new_lines_in_global], dim=1)
419
+ # [h, w + 1, D] -> [h * (w + 1), D]
420
+ global_features = global_features.view(-1, n_dim)
421
+
422
+ # ----------------- local view add newline -----------------
423
+ # [num_height_tiles * num_width_tiles, h * w, D] -> [num_height_tiles * h, num_width_tiles * w, D]
424
+ local_features = rearrange(
425
+ local_features,
426
+ "(th tw) (h w) d -> (th h) (tw w) d",
427
+ th=num_height_tiles,
428
+ tw=num_width_tiles,
429
+ h=h,
430
+ w=w
431
+ )
432
+
433
+ # [D] -> [num_height_tiles * h, 1, D]
434
+ new_lines_in_local = repeat(
435
+ self.image_newline,
436
+ "d -> (th h) 1 d",
437
+ th=num_height_tiles,
438
+ h=h
439
+ )
440
+
441
+ # [num_height_tiles * h, num_width_tiles * w + 1, D]
442
+ local_features = torch.cat([local_features, new_lines_in_local], dim=1)
443
+
444
+ # [num_height_tiles * h, num_width_tiles * w + 1, D]
445
+ # --> [(num_height_tiles * h) * (num_width_tiles * w + 1), D]
446
+ local_features = local_features.view(-1, n_dim)
447
+
448
+ # ----------------- merge global and local tiles -----------------
449
+ if self.global_view_pos == "head":
450
+ global_local_features = torch.cat(
451
+ [global_features, self.view_seperator[None, :], local_features], dim=0)
452
+ else:
453
+ global_local_features = torch.cat(
454
+ [local_features, self.view_seperator[None, :], global_features], dim=0)
455
+
456
+ else:
457
+ # abandoned,实际上不会走这个逻辑
458
+ global_features = torch.cat(
459
+ [self.tile_indicators[0:1], global_features], dim=0
460
+ )
461
+ local_features = torch.cat(
462
+ [self.tile_indicators[1:num_tiles_in_image + 1].unsqueeze(1), local_features], dim=1
463
+ )
464
+ local_features = rearrange(local_features, 'crop_num hw d -> (crop_num hw) d')
465
+
466
+ if self.global_view_pos == "head":
467
+ global_local_features = torch.cat([global_features, local_features], dim=0)
468
+ else:
469
+ global_local_features = torch.cat([local_features, global_features], dim=0)
470
+
471
+ images_in_this_batch.append(global_local_features)
472
+
473
+ if len(images_in_this_batch) > 0:
474
+ images_in_this_batch = torch.cat(images_in_this_batch, dim=0)
475
+ input_embeds[idx].masked_scatter_(images_seq_mask[idx].unsqueeze(-1), images_in_this_batch)
476
+
477
+ return input_embeds
478
+
479
+ @torch.no_grad()
480
+ def incremental_prefilling(
481
+ self,
482
+ input_ids: Optional[torch.LongTensor] = None,
483
+ attention_mask: Optional[torch.Tensor] = None,
484
+ inputs_embeds: Optional[torch.FloatTensor] = None,
485
+
486
+ images: Optional[torch.FloatTensor] = None,
487
+ images_seq_mask: Optional[torch.LongTensor] = None,
488
+ images_spatial_crop: Optional[torch.LongTensor] = None,
489
+ chunk_size: int = 1024
490
+ ):
491
+ if inputs_embeds is None:
492
+ inputs_embeds = self.prepare_inputs_embeds(
493
+ input_ids=input_ids,
494
+ images=images,
495
+ images_seq_mask=images_seq_mask,
496
+ images_spatial_crop=images_spatial_crop,
497
+ )
498
+
499
+ del images
500
+ del images_seq_mask
501
+ del images_spatial_crop
502
+
503
+ if attention_mask is not None:
504
+ attention_mask = attention_mask.to(inputs_embeds.device)
505
+
506
+ self._clear_cuda_cache()
507
+
508
+ bzs, seq_len, _ = inputs_embeds.shape
509
+ past_key_values = None
510
+
511
+ # remain the last token for the next forward
512
+ prefilling_len = seq_len - 1
513
+ for i in range(0, prefilling_len, chunk_size):
514
+ chunk_start = i
515
+ chunk_end = min(i + chunk_size, prefilling_len)
516
+ chunk_inputs_embeds = inputs_embeds[:, chunk_start: chunk_end]
517
+ chunk_attention_mask = attention_mask[:, 0: chunk_end]
518
+ # print(f"start = {chunk_start}, end = {chunk_end}, prefilling_len = {prefilling_len}, seq_len = {seq_len}")
519
+
520
+ # compute position_ids
521
+ if past_key_values is not None:
522
+ position_ids = torch.arange(
523
+ chunk_start,
524
+ chunk_end,
525
+ dtype=torch.long,
526
+ device=inputs_embeds.device
527
+ ).unsqueeze(0)
528
+ past_key_values = self._move_past_key_values_to_gpu(past_key_values, inputs_embeds.device)
529
+ else:
530
+ position_ids = None
531
+
532
+ # chunk-forward
533
+ with torch.no_grad():
534
+ outputs = self.forward(
535
+ inputs_embeds=chunk_inputs_embeds,
536
+ attention_mask=chunk_attention_mask,
537
+ past_key_values=past_key_values,
538
+ position_ids=position_ids,
539
+ use_cache=True,
540
+ )
541
+ # update past_key_values
542
+ past_key_values = outputs.past_key_values
543
+ past_key_values = self._move_past_key_values_to_cpu(past_key_values)
544
+
545
+ del outputs, position_ids
546
+ self._clear_cuda_cache()
547
+
548
+ prefilling_key_values = []
549
+ for layer_past in past_key_values:
550
+ prefilling_key_values.append(
551
+ (
552
+ layer_past[0][:, :, 0: prefilling_len, ...].to(inputs_embeds.device),
553
+ layer_past[1][:, :, 0: prefilling_len, ...].to(inputs_embeds.device),
554
+ )
555
+ )
556
+
557
+ return inputs_embeds, prefilling_key_values
558
+
559
+ def forward(
560
+ self,
561
+ input_ids: Optional[torch.LongTensor] = None,
562
+
563
+ attention_mask: Optional[torch.Tensor] = None,
564
+ position_ids: Optional[torch.LongTensor] = None,
565
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
566
+ inputs_embeds: Optional[torch.FloatTensor] = None,
567
+
568
+ images: Optional[torch.FloatTensor] = None,
569
+ images_seq_mask: Optional[torch.LongTensor] = None,
570
+ images_spatial_crop: Optional[torch.LongTensor] = None,
571
+
572
+ labels: Optional[torch.LongTensor] = None,
573
+ use_cache: Optional[bool] = None,
574
+ output_attentions: Optional[bool] = None,
575
+ output_hidden_states: Optional[bool] = None,
576
+ return_dict: Optional[bool] = None,
577
+ cache_position: Optional[torch.LongTensor] = None,
578
+ ):
579
+
580
+ output_attentions = (
581
+ output_attentions
582
+ if output_attentions is not None
583
+ else self.config.output_attentions
584
+ )
585
+ output_hidden_states = (
586
+ output_hidden_states
587
+ if output_hidden_states is not None
588
+ else self.config.output_hidden_states
589
+ )
590
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
591
+
592
+ return_dict = (
593
+ return_dict if return_dict is not None else self.config.use_return_dict
594
+ )
595
+ if inputs_embeds is None:
596
+ inputs_embeds = self.prepare_inputs_embeds(
597
+ input_ids=input_ids,
598
+ images=images,
599
+ images_seq_mask=images_seq_mask,
600
+ images_spatial_crop=images_spatial_crop,
601
+ )
602
+
603
+ if attention_mask is not None:
604
+ attention_mask = attention_mask.to(inputs_embeds.device)
605
+
606
+ # print(inputs_embeds.shape)
607
+ outputs = self.language.forward(
608
+ input_ids=None,
609
+ attention_mask=attention_mask,
610
+ position_ids=position_ids,
611
+ past_key_values=past_key_values,
612
+ inputs_embeds=inputs_embeds,
613
+ labels=labels,
614
+ use_cache=use_cache,
615
+ output_attentions=output_attentions,
616
+ output_hidden_states=output_hidden_states,
617
+ return_dict=return_dict,
618
+ cache_position=cache_position
619
+ )
620
+
621
+ return outputs
622
+
623
+ def _clear_cuda_cache(self):
624
+ """clear CUDA memory cache"""
625
+ gc.collect()
626
+ if torch.cuda.is_available():
627
+ torch.cuda.empty_cache()
628
+ torch.cuda.synchronize()
629
+
630
+ def _move_past_key_values_to_cpu(self, past_key_values):
631
+ # print(f"past_key_values -> cpu")
632
+ if past_key_values is None:
633
+ return None
634
+ return tuple(tuple(t.cpu() for t in layer) for layer in past_key_values)
635
+
636
+ def _move_past_key_values_to_gpu(self, past_key_values, device="cuda:0"):
637
+ # print(f"past_key_values -> gpu")
638
+ if past_key_values is None:
639
+ return None
640
+ return tuple(tuple(t.to(device) for t in layer) for layer in past_key_values)
641
+
642
+ def prepare_inputs_for_generation(
643
+ self,
644
+ input_ids,
645
+ past_key_values=None,
646
+ inputs_embeds=None,
647
+
648
+ images: Optional[torch.FloatTensor] = None,
649
+ images_seq_mask: Optional[torch.LongTensor] = None,
650
+ images_spatial_crop: Optional[torch.LongTensor] = None,
651
+
652
+ attention_mask=None,
653
+ cache_position=None,
654
+
655
+ pixel_values=None,
656
+ image_sizes=None,
657
+ num_logits_to_keep=None,
658
+ **kwargs,
659
+ ):
660
+ # Overwritten -- in specific circumstances we don't want to forward image inputs to the model
661
+ model_inputs = self.language.prepare_inputs_for_generation(
662
+ input_ids,
663
+ past_key_values=past_key_values,
664
+ inputs_embeds=inputs_embeds,
665
+ attention_mask=attention_mask,
666
+ cache_position=cache_position,
667
+ num_logits_to_keep=num_logits_to_keep,
668
+ **kwargs,
669
+ )
670
+
671
+ # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
672
+ # Otherwise we need pixel values to be passed to model
673
+ cache_position = model_inputs["cache_position"]
674
+ if cache_position[0] == 0:
675
+ model_inputs["images"] = images
676
+ model_inputs["images_seq_mask"] = images_seq_mask
677
+ model_inputs["images_spatial_crop"] = images_spatial_crop
678
+
679
+ return model_inputs
680
+
681
+ @staticmethod
682
+ def _reorder_cache(past_key_values, beam_idx):
683
+ reordered_past = ()
684
+ for layer_past in past_key_values:
685
+ reordered_past += (
686
+ tuple(
687
+ past_state.index_select(0, beam_idx.to(past_state.device))
688
+ for past_state in layer_past
689
+ ),
690
+ )
691
+ return reordered_past
processing_deepseek_vl_v2.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023-2024 DeepSeek.
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of
4
+ # this software and associated documentation files (the "Software"), to deal in
5
+ # the Software without restriction, including without limitation the rights to
6
+ # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
7
+ # the Software, and to permit persons to whom the Software is furnished to do so,
8
+ # subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in all
11
+ # copies or substantial portions of the Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
15
+ # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
16
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
17
+ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19
+
20
+ from dataclasses import dataclass
21
+ from typing import Dict, Tuple, List, Literal, Optional
22
+ import math
23
+
24
+ import torch
25
+ from torch.nn.utils.rnn import pad_sequence
26
+ import torchvision.transforms as T
27
+ from transformers import LlamaTokenizerFast
28
+ from transformers.processing_utils import ProcessorMixin
29
+ from PIL import Image, ImageOps
30
+
31
+ from .conversation import get_conv_template
32
+
33
+
34
+ def select_best_resolution(image_size, candidate_resolutions):
35
+ # used for cropping
36
+ original_width, original_height = image_size
37
+ best_fit = None
38
+ max_effective_resolution = 0
39
+ min_wasted_resolution = float("inf")
40
+
41
+ for width, height in candidate_resolutions:
42
+ scale = min(width / original_width, height / original_height)
43
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
44
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
45
+ wasted_resolution = (width * height) - effective_resolution
46
+
47
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
48
+ max_effective_resolution = effective_resolution
49
+ min_wasted_resolution = wasted_resolution
50
+ best_fit = (width, height)
51
+
52
+ return best_fit
53
+
54
+
55
+ class DictOutput(object):
56
+ def keys(self):
57
+ return self.__dict__.keys()
58
+
59
+ def __getitem__(self, item):
60
+ return self.__dict__[item]
61
+
62
+ def __setitem__(self, key, value):
63
+ self.__dict__[key] = value
64
+
65
+
66
+ # 对于inference sample也可以维护input_ids,反正最后不会用到
67
+ @dataclass
68
+ class VLChatProcessorOutput(DictOutput):
69
+ sft_format: str
70
+ input_ids: torch.LongTensor
71
+ target_ids: torch.LongTensor
72
+ images: torch.Tensor
73
+ images_seq_mask: torch.BoolTensor
74
+ images_spatial_crop: torch.LongTensor
75
+ num_image_tokens: List[int]
76
+
77
+ def __len__(self):
78
+ return len(self.input_ids)
79
+
80
+
81
+ @dataclass
82
+ class BatchCollateOutput(DictOutput):
83
+ sft_format: List[str]
84
+ input_ids: torch.LongTensor
85
+ labels: torch.LongTensor
86
+ images: torch.Tensor
87
+ attention_mask: torch.Tensor
88
+ images_seq_mask: torch.BoolTensor
89
+ images_spatial_crop: torch.LongTensor
90
+ seq_lens: List[int]
91
+
92
+ def to(self, device, dtype=torch.bfloat16):
93
+ self.input_ids = self.input_ids.to(device)
94
+ self.labels = self.labels.to(device)
95
+ self.attention_mask = self.attention_mask.to(device)
96
+ self.images_seq_mask = self.images_seq_mask.to(device)
97
+ self.images_spatial_crop = self.images_spatial_crop.to(device)
98
+ self.images = self.images.to(device=device, dtype=dtype)
99
+ return self
100
+
101
+
102
+ class ImageTransform(object):
103
+ def __init__(
104
+ self,
105
+ mean: Optional[Tuple[float, float, float]] = (0.5, 0.5, 0.5),
106
+ std: Optional[Tuple[float, float, float]] = (0.5, 0.5, 0.5),
107
+ normalize: bool = True
108
+ ):
109
+ self.mean = mean
110
+ self.std = std
111
+ self.normalize = normalize
112
+
113
+ transform_pipelines = [
114
+ T.ToTensor()
115
+ ]
116
+
117
+ if normalize:
118
+ transform_pipelines.append(T.Normalize(mean, std))
119
+
120
+ self.transform = T.Compose(transform_pipelines)
121
+
122
+ def __call__(self, pil_img: Image.Image):
123
+ x = self.transform(pil_img)
124
+ return x
125
+
126
+
127
+
128
+ class DeepseekVLV2Processor(ProcessorMixin):
129
+ tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
130
+ attributes = ["tokenizer"]
131
+
132
+ def __init__(
133
+ self,
134
+ tokenizer: LlamaTokenizerFast,
135
+ candidate_resolutions: Tuple[Tuple[int, int]],
136
+ patch_size: int,
137
+ downsample_ratio: int,
138
+ image_mean: Tuple[float, float, float] = (0.5, 0.5, 0.5),
139
+ image_std: Tuple[float, float, float] = (0.5, 0.5, 0.5),
140
+ normalize: bool = True,
141
+ image_token: str = "<image>",
142
+ pad_token: str = "<|▁pad▁|>",
143
+ add_special_token: bool = False,
144
+ sft_format: str = "deepseek",
145
+ mask_prompt: bool = True,
146
+ ignore_id: int = -100,
147
+ **kwargs,
148
+ ):
149
+
150
+ self.candidate_resolutions = candidate_resolutions
151
+ self.image_size = candidate_resolutions[0][0]
152
+ self.patch_size = patch_size
153
+ self.image_mean = image_mean
154
+ self.image_std = image_std
155
+ self.normalize = normalize
156
+ self.downsample_ratio = downsample_ratio
157
+
158
+ self.image_transform = ImageTransform(mean=image_mean, std=image_std, normalize=normalize)
159
+ self.tokenizer = tokenizer
160
+ self.tokenizer.padding_side = 'left' # must set this,padding side with make a difference in batch inference
161
+
162
+ # add the pad_token as special token to use 'tokenizer.pad_token' and 'tokenizer.pad_token_id'
163
+ if tokenizer.pad_token is None:
164
+ self.tokenizer.add_special_tokens({'pad_token': pad_token})
165
+ print(f"Add pad token = ['{pad_token}'] to the tokenizer\n"
166
+ f"{pad_token}:{tokenizer.encode(pad_token, add_special_tokens=False)[0]}")
167
+
168
+ # add image token
169
+ image_token_id = self.tokenizer.vocab.get(image_token)
170
+ if image_token_id is None:
171
+ special_tokens = [image_token]
172
+ special_tokens_dict = {"additional_special_tokens": special_tokens}
173
+ self.tokenizer.add_special_tokens(special_tokens_dict)
174
+ self.image_token_id = self.tokenizer.vocab.get(image_token)
175
+ print(f"Add image token = ['{image_token}'] to the tokenizer\n"
176
+ f"{image_token}:{tokenizer.encode(image_token, add_special_tokens=False)[0]}")
177
+
178
+ # add five special tokens for grounding-related tasks
179
+ # <|ref|>, <|/ref|>, <|det|>, <|/det|>, <|grounding|>
180
+ special_tokens = ['<|ref|>', '<|/ref|>', '<|det|>', '<|/det|>', '<|grounding|>']
181
+ special_tokens_dict = {"additional_special_tokens": special_tokens}
182
+ self.tokenizer.add_special_tokens(special_tokens_dict)
183
+ print(f"Add grounding-related tokens = {special_tokens} to the tokenizer with input_ids\n"
184
+ f"<|ref|>:{tokenizer.encode('<|ref|>', add_special_tokens=False)[0]}\n"
185
+ f"<|/ref|>:{tokenizer.encode('<|/ref|>', add_special_tokens=False)[0]}\n"
186
+ f"<|det|>:{tokenizer.encode('<|det|>', add_special_tokens=False)[0]}\n"
187
+ f"<|/det|>:{tokenizer.encode('<|/det|>', add_special_tokens=False)[0]}\n"
188
+ f"<|grounding|>:{tokenizer.encode('<|grounding|>', add_special_tokens=False)[0]}")
189
+
190
+ # add special tokens for SFT data
191
+ special_tokens = ["<|User|>", "<|Assistant|>"]
192
+ special_tokens_dict = {"additional_special_tokens": special_tokens}
193
+ self.tokenizer.add_special_tokens(special_tokens_dict)
194
+ print(f"Add chat tokens = {special_tokens} to the tokenizer with input_ids\n"
195
+ f"<|User|>:{tokenizer.encode('<|User|>', add_special_tokens=False)[0]}\n"
196
+ f"<|Assistant|>:{tokenizer.encode('<|Assistant|>', add_special_tokens=False)[0]}\n")
197
+
198
+ self.image_token = image_token
199
+ self.pad_token = pad_token
200
+ self.add_special_token = add_special_token
201
+ self.sft_format = sft_format
202
+ self.mask_prompt = mask_prompt
203
+ self.ignore_id = ignore_id
204
+
205
+ super().__init__(
206
+ tokenizer,
207
+ **kwargs,
208
+ )
209
+
210
+ def new_chat_template(self):
211
+ conv = get_conv_template(self.sft_format)
212
+ return conv
213
+
214
+ def format_messages(
215
+ self,
216
+ conversations: List[Dict[str, str]],
217
+ sft_format: str = "deepseek",
218
+ system_prompt: str = "",
219
+ ):
220
+ """
221
+ Applies the SFT template to conversation.
222
+
223
+ Args:
224
+ conversations (List[Dict]): A List of messages.
225
+ sft_format (str, optional): The format of the SFT template to use. Defaults to "deepseek".
226
+ system_prompt (str, optional): The system prompt to use in the SFT template. Defaults to "".
227
+
228
+ Returns:
229
+ sft_prompt (str): The formatted text.
230
+ """
231
+
232
+ conv = get_conv_template(sft_format)
233
+ conv.set_system_message(system_prompt)
234
+ for message in conversations:
235
+ conv.append_message(message["role"], message["content"].strip())
236
+ sft_prompt = conv.get_prompt().strip()
237
+
238
+ return sft_prompt
239
+
240
+ def format_messages_v2(self, messages, pil_images, systems=None):
241
+ """play the role of format_messages_v2 and get_images_info in the last version"""
242
+ tokenized_data = []
243
+ masked_tokenized_data = [] # labels
244
+ images_list = []
245
+ images_seq_mask = []
246
+ images_spatial_crop = []
247
+ num_image_tokens = []
248
+
249
+ image_index = 0
250
+
251
+ conv = get_conv_template(self.sft_format)
252
+ conv_system_message = conv.system_message
253
+
254
+ for idx, message in enumerate(messages):
255
+ if idx == 0:
256
+ tokenized_data += [self.bos_id]
257
+ masked_tokenized_data += [self.bos_id]
258
+ images_seq_mask += [False]
259
+ conv.system_message = conv_system_message
260
+ else:
261
+ conv.system_message = ''
262
+
263
+ if message['role'] == conv.roles[0] or message['role'] == "user":
264
+ conv.reset_message()
265
+ conv.append_message(conv.roles[0], str(message['content']).strip())
266
+ conv.append_message(conv.roles[1], '')
267
+ formatted_question = conv.get_prompt()
268
+ tokenized_str, images, seq_mask, spatial_crop, n_image_tokens = self.tokenize_with_images(
269
+ formatted_question,
270
+ pil_images[image_index: image_index + formatted_question.count(self.image_token)],
271
+ bos=False,
272
+ eos=False,
273
+ cropping=len(pil_images) <= 2
274
+ )
275
+ image_index += formatted_question.count(self.image_token)
276
+
277
+ tokenized_data += tokenized_str
278
+ if self.mask_prompt:
279
+ masked_tokenized_data += [self.ignore_id] * len(tokenized_str)
280
+ else:
281
+ masked_tokenized_data += tokenized_str
282
+ images_list += images
283
+ images_seq_mask += seq_mask
284
+ images_spatial_crop += spatial_crop
285
+ num_image_tokens += n_image_tokens
286
+
287
+ elif message['role'] == conv.roles[1] or message['role'] == "assistant":
288
+ formatted_answer = message['content'].strip()
289
+ assert formatted_answer.count(
290
+ self.image_token) == 0, f"there should be no {self.image_token} in the assistant's reply, but got {messages}"
291
+ tokenized_str, images, seq_mask, spatial_crop, n_image_tokens = self.tokenize_with_images(
292
+ formatted_answer,
293
+ [],
294
+ bos=False,
295
+ eos=True,
296
+ cropping=len(pil_images) <= 2)
297
+
298
+ tokenized_data += tokenized_str
299
+ masked_tokenized_data += tokenized_str
300
+ images_seq_mask += seq_mask
301
+
302
+ elif message['role'] == 'system' or message['role'] == 'deepseekapi-sys':
303
+ # 如果message里面有system,那就只允许出现在message的第一句,同时conv原本的system就会失效
304
+ assert idx == 0, 'system information should only exist in the begining of the conversation'
305
+ formatted_system = message['content'].strip()
306
+ tokenized_str = self.encode(formatted_system, bos=False, eos=False)
307
+ tokenized_data += tokenized_str
308
+ if self.mask_prompt:
309
+ masked_tokenized_data += [self.ignore_id] * len(tokenized_str)
310
+ else:
311
+ masked_tokenized_data += tokenized_str
312
+ seq_mask = [False] * len(tokenized_str)
313
+ images_seq_mask += seq_mask
314
+
315
+ else:
316
+ assert False, f"Unknown role: {message['role']}"
317
+
318
+ assert len(tokenized_data) == len(
319
+ images_seq_mask), f"format_messages_v2: tokenized_str's length {len(tokenized_str)} is not equal to imags_seq_mask's length {len(images_seq_mask)}"
320
+ assert len(images_spatial_crop) == len(num_image_tokens), f"image number should be compatible"
321
+
322
+ return tokenized_data, masked_tokenized_data, images_list, images_seq_mask, images_spatial_crop, num_image_tokens
323
+
324
+ def format_prompts(
325
+ self,
326
+ prompts: str,
327
+ sft_format: str = "deepseek",
328
+ system_prompt: str = "",
329
+ ):
330
+ """
331
+ Applies the SFT template to prompts.
332
+
333
+ Args:
334
+ prompts (str): the non-sft formatted prompt;
335
+ sft_format (str, optional): The format of the SFT template to use. Defaults to "deepseek".
336
+ system_prompt (str, optional): The system prompt to use in the SFT template. Defaults to "".
337
+
338
+ Returns:
339
+ sft_prompt (str): The formatted text.
340
+ """
341
+
342
+ conv = get_conv_template(sft_format)
343
+ conv.set_system_message(system_prompt)
344
+ conv.append_message(conv.roles[0], prompts.strip())
345
+ conv.append_message(conv.roles[1], "")
346
+
347
+ sft_prompt = conv.get_prompt().strip()
348
+
349
+ return sft_prompt
350
+
351
+ @property
352
+ def bos_id(self):
353
+ return self.tokenizer.bos_token_id
354
+
355
+ @property
356
+ def eos_id(self):
357
+ return self.tokenizer.eos_token_id
358
+
359
+ @property
360
+ def pad_id(self):
361
+ return self.tokenizer.pad_token_id
362
+
363
+ def encode(self, text: str, bos: bool = True, eos: bool = False):
364
+ t = self.tokenizer.encode(text, add_special_tokens=False)
365
+
366
+ if bos:
367
+ t = [self.bos_id] + t
368
+ if eos:
369
+ t = t + [self.eos_id]
370
+
371
+ return t
372
+
373
+ def decode(self, t: List[int], **kwargs) -> str:
374
+ return self.tokenizer.decode(t, **kwargs)
375
+
376
+ def process_one(
377
+ self,
378
+ prompt: str = None,
379
+ conversations: List[Dict[str, str]] = None,
380
+ images: List[Image.Image] = None,
381
+ apply_sft_format: bool = False,
382
+ inference_mode: bool = True,
383
+ system_prompt: str = "",
384
+ **kwargs,
385
+ ):
386
+ """
387
+
388
+ Args:
389
+ prompt (str): the formatted prompt;
390
+ conversations (List[Dict]): conversations with a list of messages;
391
+ images (List[ImageType]): the list of images;
392
+ apply_sft_format (bool): if prompt is not None, then apply the SFT format to prompt;
393
+ if conversations is not None, then it will always apply the SFT format to conversations;
394
+ inference_mode (bool): if True, then remove the last eos token;
395
+ system_prompt (str): the system prompt;
396
+ **kwargs:
397
+
398
+ Returns:
399
+ outputs (BaseProcessorOutput): the output of the processor,
400
+ - input_ids (torch.LongTensor): [N + image tokens]
401
+ - target_ids (torch.LongTensor): [N + image tokens]
402
+ - images (torch.FloatTensor): [n_images, 3, H, W]
403
+ - image_id (int): the id of the image token
404
+ - num_image_tokens (List[int]): the number of image tokens
405
+ """
406
+
407
+ assert (
408
+ prompt is None or conversations is None
409
+ ), "prompt and conversations cannot be used at the same time."
410
+
411
+ if prompt is None:
412
+ # apply sft format
413
+ sft_format = self.format_messages(
414
+ conversations=conversations,
415
+ sft_format=self.sft_format,
416
+ system_prompt=system_prompt,
417
+ )
418
+ tokenized_str, masked_tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens = self.format_messages_v2(
419
+ conversations, images)
420
+ else:
421
+ if apply_sft_format:
422
+ sft_format = self.format_prompts(
423
+ prompts=prompt,
424
+ sft_format=self.sft_format,
425
+ system_prompt=system_prompt
426
+ )
427
+ else:
428
+ sft_format = prompt
429
+ tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens = self.tokenize_with_images(
430
+ sft_format, images, bos=True, eos=True, cropping=len(images) <= 2)
431
+ masked_tokenized_str = []
432
+ for token_index in tokenized_str:
433
+ if token_index != self.image_token_id:
434
+ masked_tokenized_str.append(token_index)
435
+ else:
436
+ masked_tokenized_str.append(self.ignore_id)
437
+
438
+ assert len(tokenized_str) == len(images_seq_mask) == len(masked_tokenized_str), \
439
+ (f"tokenized_str's length {len(tokenized_str)}, input_ids' length {len(masked_tokenized_str)}, "
440
+ f"imags_seq_mask's length {len(images_seq_mask)}, are not equal")
441
+
442
+ input_ids = torch.LongTensor(tokenized_str)
443
+ target_ids = torch.LongTensor(masked_tokenized_str)
444
+ images_seq_mask = torch.tensor(images_seq_mask, dtype=torch.bool)
445
+
446
+ # set input_ids < 0 | input_ids == self.image_token_id as ignore_id
447
+ target_ids[(input_ids < 0) | (input_ids == self.image_token_id)] = self.ignore_id
448
+ input_ids[input_ids < 0] = self.pad_id
449
+
450
+ if inference_mode:
451
+ # 去掉结尾的eos token
452
+ assert input_ids[-1] == self.eos_id
453
+ input_ids = input_ids[:-1]
454
+ target_ids = target_ids[:-1]
455
+ images_seq_mask = images_seq_mask[:-1]
456
+
457
+ if len(images_list) == 0:
458
+ images = torch.zeros((1, 3, self.image_size, self.image_size))
459
+ images_spatial_crop = torch.zeros((1, 2), dtype=torch.long)
460
+ else:
461
+ images = torch.stack(images_list, dim=0)
462
+ images_spatial_crop = torch.tensor(images_spatial_crop, dtype=torch.long)
463
+
464
+ prepare = VLChatProcessorOutput(
465
+ sft_format=sft_format,
466
+ input_ids=input_ids,
467
+ target_ids=target_ids,
468
+ images=images,
469
+ images_seq_mask=images_seq_mask,
470
+ images_spatial_crop=images_spatial_crop,
471
+ num_image_tokens=num_image_tokens
472
+ )
473
+
474
+ return prepare
475
+
476
+ def __call__(
477
+ self,
478
+ *,
479
+ prompt: str = None,
480
+ conversations: List[Dict[str, str]] = None,
481
+ images: List[Image.Image] = None,
482
+ apply_sft_format: bool = False,
483
+ force_batchify: bool = True,
484
+ inference_mode: bool = True,
485
+ system_prompt: str = "",
486
+ **kwargs,
487
+ ):
488
+ """
489
+
490
+ Args:
491
+ prompt (str): the formatted prompt;
492
+ conversations (List[Dict]): conversations with a list of messages;
493
+ images (List[ImageType]): the list of images;
494
+ apply_sft_format (bool): if prompt is not None, then apply the SFT format to prompt;
495
+ if conversations is not None, then it will always apply the SFT format to conversations;
496
+ force_batchify (bool): force batchify the inputs;
497
+ inference_mode (bool): if True, then remove the last eos token;
498
+ system_prompt (str): the system prompt;
499
+ **kwargs:
500
+
501
+ Returns:
502
+ outputs (BaseProcessorOutput): the output of the processor,
503
+ - input_ids (torch.LongTensor): [N + image tokens]
504
+ - images (torch.FloatTensor): [n_images, 3, H, W]
505
+ - image_id (int): the id of the image token
506
+ - num_image_tokens (List[int]): the number of image tokens
507
+ """
508
+
509
+ prepare = self.process_one(
510
+ prompt=prompt,
511
+ conversations=conversations,
512
+ images=images,
513
+ apply_sft_format=apply_sft_format,
514
+ inference_mode=inference_mode,
515
+ system_prompt=system_prompt
516
+ )
517
+
518
+ if force_batchify:
519
+ prepare = self.batchify([prepare])
520
+
521
+ return prepare
522
+
523
+ def tokenize_with_images(
524
+ self,
525
+ conversation: str,
526
+ images: List[Image.Image],
527
+ bos: bool = True,
528
+ eos: bool = True,
529
+ cropping: bool = True,
530
+ ):
531
+ """Tokenize text with <image> tags."""
532
+ assert conversation.count(self.image_token) == len(images)
533
+ text_splits = conversation.split(self.image_token)
534
+ images_list, images_seq_mask, images_spatial_crop = [], [], []
535
+ num_image_tokens = []
536
+ tokenized_str = []
537
+ for text_sep, image in zip(text_splits, images):
538
+ """encode text_sep"""
539
+ tokenized_sep = self.encode(text_sep, bos=False, eos=False)
540
+ tokenized_str += tokenized_sep
541
+ images_seq_mask += [False] * len(tokenized_sep)
542
+
543
+ """select best resolution for anyres"""
544
+ if cropping:
545
+ best_width, best_height = select_best_resolution(image.size, self.candidate_resolutions)
546
+ else:
547
+ best_width, best_height = self.image_size, self.image_size
548
+ # print(image.size, (best_width, best_height)) # check the select_best_resolutions func
549
+
550
+ """process the global view"""
551
+ global_view = ImageOps.pad(image, (self.image_size, self.image_size),
552
+ color=tuple(int(x * 255) for x in self.image_transform.mean))
553
+ images_list.append(self.image_transform(global_view))
554
+
555
+ """process the local views"""
556
+ local_view = ImageOps.pad(image, (best_width, best_height),
557
+ color=tuple(int(x * 255) for x in self.image_transform.mean))
558
+ for i in range(0, best_height, self.image_size):
559
+ for j in range(0, best_width, self.image_size):
560
+ images_list.append(
561
+ self.image_transform(local_view.crop((j, i, j + self.image_size, i + self.image_size))))
562
+
563
+ """record height / width crop num"""
564
+ num_width_tiles, num_height_tiles = best_width // self.image_size, best_height // self.image_size
565
+ images_spatial_crop.append([num_width_tiles, num_height_tiles])
566
+
567
+ """add image tokens"""
568
+ h = w = math.ceil((self.image_size // self.patch_size) / self.downsample_ratio)
569
+ # global views tokens h * (w + 1), 1 is for line seperator
570
+ tokenized_image = [self.image_token_id] * h * (w + 1)
571
+ # add a seperator between global and local views
572
+ tokenized_image += [self.image_token_id]
573
+ # local views tokens, (num_height_tiles * h) * (num_width_tiles * w + 1)
574
+ tokenized_image += [self.image_token_id] * (num_height_tiles * h) * (num_width_tiles * w + 1)
575
+
576
+ tokenized_str += tokenized_image
577
+ images_seq_mask += [True] * len(tokenized_image)
578
+ num_image_tokens.append(len(tokenized_image))
579
+ # print(width_crop_num, height_crop_num, len(tokenized_image)) # test the correctness of the number of image-related tokens
580
+
581
+ """process the last text split"""
582
+ tokenized_sep = self.encode(text_splits[-1], bos=False, eos=False)
583
+ tokenized_str += tokenized_sep
584
+ images_seq_mask += [False] * len(tokenized_sep)
585
+
586
+ """add the bos and eos tokens"""
587
+ if bos:
588
+ tokenized_str = [self.bos_id] + tokenized_str
589
+ images_seq_mask = [False] + images_seq_mask
590
+ if eos:
591
+ tokenized_str = tokenized_str + [self.eos_id]
592
+ images_seq_mask = images_seq_mask + [False]
593
+
594
+ assert len(tokenized_str) == len(
595
+ images_seq_mask), f"tokenize_with_images func: tokenized_str's length {len(tokenized_str)} is not equal to imags_seq_mask's length {len(images_seq_mask)}"
596
+
597
+ return tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens
598
+
599
+ def batchify(
600
+ self,
601
+ sample_list: List[VLChatProcessorOutput],
602
+ padding: Literal["left", "right"] = "left"
603
+ ) -> BatchCollateOutput:
604
+ """
605
+ Preprocesses the inputs for multimodal inference.
606
+
607
+ Args:
608
+ sample_list (List[VLChatProcessorOutput]): A list of VLChatProcessorOutput.
609
+ padding (str): The padding method. Defaults to "left".
610
+
611
+ Returns:
612
+ BatchCollateOutput: A dictionary of the inputs to use for multimodal inference.
613
+ """
614
+
615
+ batched_sft_format = [sample.sft_format for sample in sample_list]
616
+ batched_input_ids = [sample.input_ids for sample in sample_list]
617
+ batched_labels = [sample.target_ids for sample in sample_list]
618
+ batched_images_seq_mask = [sample["images_seq_mask"] for sample in sample_list]
619
+ seq_lens = [len(sample) for sample in sample_list]
620
+
621
+ """padding input_ids and images_seq_mask"""
622
+ if padding == "left":
623
+ # the tokenizer is default to pad at left
624
+ ## TODO, You're using a LlamaTokenizerFast tokenizer.
625
+ # Please note that with a fast tokenizer, using the `__call__` method is faster than
626
+ # using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
627
+ padded_input_ids = self.tokenizer.pad({"input_ids": batched_input_ids})
628
+ batched_input_ids, batched_attention_mask = padded_input_ids["input_ids"], padded_input_ids[
629
+ "attention_mask"].bool()
630
+ batched_labels = self.tokenizer.pad({"input_ids": batched_labels})["input_ids"]
631
+ batched_labels[batched_labels == self.pad_id] = self.ignore_id # labels正常不会出现pad_id,无需额外保护
632
+ batched_images_seq_mask = self.tokenizer.pad({"input_ids": batched_images_seq_mask})["input_ids"]
633
+ batched_images_seq_mask[batched_images_seq_mask == self.pad_id] = False
634
+ else:
635
+ batched_input_ids = pad_sequence(batched_input_ids, batch_first=True, padding_value=self.pad_id)
636
+ batched_labels = pad_sequence(batched_labels, batch_first=True, padding_value=self.ignore_id)
637
+ batched_images_seq_mask = pad_sequence(batched_images_seq_mask, batch_first=True, padding_value=0)
638
+ batched_attention_mask = batched_input_ids != self.pad_id
639
+
640
+ """padding images to max_patch_num"""
641
+ max_n_patches = max(sample["images"].shape[0] for sample in sample_list)
642
+ batched_images = []
643
+ for sample in sample_list:
644
+ images = sample["images"]
645
+ n_pads = max_n_patches - images.shape[0]
646
+ if n_pads > 0:
647
+ pad_images = torch.zeros((n_pads, *images.shape[1:]), dtype=images.dtype)
648
+ images = torch.cat([images, pad_images], dim=0)
649
+ batched_images.append(images)
650
+ batched_images = torch.stack(batched_images, dim=0)
651
+
652
+ """padding images_spatial_crop to max_n_images"""
653
+ max_n_images = max(sample["images_spatial_crop"].shape[0] for sample in sample_list)
654
+ batched_images_spatial_crop = []
655
+ for sample in sample_list:
656
+ images_spatial_crop = sample["images_spatial_crop"]
657
+ n_pads = max_n_images - sample["images_spatial_crop"].shape[0]
658
+ if n_pads > 0:
659
+ pad_images_spatial_crop = torch.full((n_pads, 2), 0, dtype=images_spatial_crop.dtype)
660
+ images_spatial_crop = torch.cat([images_spatial_crop, pad_images_spatial_crop], dim=0)
661
+ batched_images_spatial_crop.append(images_spatial_crop)
662
+ batched_images_spatial_crop = torch.stack(batched_images_spatial_crop, dim=0)
663
+
664
+ batched_samples = BatchCollateOutput(
665
+ input_ids=batched_input_ids,
666
+ attention_mask=batched_attention_mask,
667
+ labels=batched_labels,
668
+ images=batched_images,
669
+ images_seq_mask=batched_images_seq_mask,
670
+ images_spatial_crop=batched_images_spatial_crop,
671
+ sft_format=batched_sft_format,
672
+ seq_lens=seq_lens
673
+ )
674
+
675
+ return batched_samples
processor_config.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_special_token": false,
3
+ "candidate_resolutions": [
4
+ [
5
+ 384,
6
+ 384
7
+ ],
8
+ [
9
+ 384,
10
+ 768
11
+ ],
12
+ [
13
+ 768,
14
+ 384
15
+ ],
16
+ [
17
+ 384,
18
+ 1152
19
+ ],
20
+ [
21
+ 1152,
22
+ 384
23
+ ],
24
+ [
25
+ 384,
26
+ 1536
27
+ ],
28
+ [
29
+ 1536,
30
+ 384
31
+ ],
32
+ [
33
+ 768,
34
+ 768
35
+ ],
36
+ [
37
+ 384,
38
+ 1920
39
+ ],
40
+ [
41
+ 1920,
42
+ 384
43
+ ],
44
+ [
45
+ 384,
46
+ 2304
47
+ ],
48
+ [
49
+ 2304,
50
+ 384
51
+ ],
52
+ [
53
+ 768,
54
+ 1152
55
+ ],
56
+ [
57
+ 1152,
58
+ 768
59
+ ],
60
+ [
61
+ 384,
62
+ 2688
63
+ ],
64
+ [
65
+ 2688,
66
+ 384
67
+ ],
68
+ [
69
+ 384,
70
+ 3072
71
+ ],
72
+ [
73
+ 3072,
74
+ 384
75
+ ],
76
+ [
77
+ 768,
78
+ 1536
79
+ ],
80
+ [
81
+ 1536,
82
+ 768
83
+ ],
84
+ [
85
+ 384,
86
+ 3456
87
+ ],
88
+ [
89
+ 3456,
90
+ 384
91
+ ],
92
+ [
93
+ 1152,
94
+ 1152
95
+ ]
96
+ ],
97
+ "downsample_ratio": 2,
98
+ "ignore_id": -100,
99
+ "image_mean": [
100
+ 0.5,
101
+ 0.5,
102
+ 0.5
103
+ ],
104
+ "image_std": [
105
+ 0.5,
106
+ 0.5,
107
+ 0.5
108
+ ],
109
+ "image_token": "<image>",
110
+ "mask_prompt": false,
111
+ "normalize": true,
112
+ "pad_token": "<\uff5c\u2581pad\u2581\uff5c>",
113
+ "patch_size": 14,
114
+ "processor_class": "DeepseekVLV2Processor",
115
+ "sft_format": "deepseek"
116
+ }
siglip_vit.py ADDED
@@ -0,0 +1,660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py
2
+ from dataclasses import dataclass
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from typing import Final, Optional, Callable, Union, Tuple, List, Set, Dict, Type, Literal, Sequence
8
+ import math
9
+ import warnings
10
+ from timm.layers import (
11
+ PatchEmbed, Mlp, DropPath,
12
+ AttentionPoolLatent, PatchDropout, resample_abs_pos_embed, LayerType
13
+ )
14
+ from timm.models._manipulate import named_apply, checkpoint_seq, adapt_input_conv
15
+ from transformers.modeling_utils import is_flash_attn_2_available
16
+ from xformers.ops import memory_efficient_attention
17
+ from functools import partial
18
+
19
+
20
+ if is_flash_attn_2_available():
21
+ from flash_attn import flash_attn_qkvpacked_func
22
+
23
+
24
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b):
25
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
26
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
27
+ def norm_cdf(x):
28
+ # Computes standard normal cumulative distribution function
29
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
30
+
31
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
32
+ warnings.warn(
33
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
34
+ "The distribution of values may be incorrect.",
35
+ stacklevel=2,
36
+ )
37
+
38
+ with torch.no_grad():
39
+ # Values are generated by using a truncated uniform distribution and
40
+ # then using the inverse CDF for the normal distribution.
41
+ # Get upper and lower cdf values
42
+ l = norm_cdf((a - mean) / std) # noqa: E741
43
+ u = norm_cdf((b - mean) / std)
44
+
45
+ # Uniformly fill tensor with values from [l, u], then translate to
46
+ # [2l-1, 2u-1].
47
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
48
+
49
+ # Use inverse cdf transform for normal distribution to get truncated
50
+ # standard normal
51
+ tensor.erfinv_()
52
+
53
+ # Transform to proper mean, std
54
+ tensor.mul_(std * math.sqrt(2.0))
55
+ tensor.add_(mean)
56
+
57
+ # Clamp to ensure it's in the proper range
58
+ tensor.clamp_(min=a, max=b)
59
+ return tensor
60
+
61
+
62
+ def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
63
+ # type: (torch.Tensor, float, float, float, float) -> torch.Tensor
64
+ r"""The original timm.models.layers.weight_init.trunc_normal_ can not handle bfloat16 yet, here we first
65
+ convert the tensor to float32, apply the trunc_normal_() in float32, and then convert it back to its orignal dtype.
66
+ Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn
67
+ from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
68
+ with values outside :math:`[a, b]` redrawn until they are within
69
+ the bounds. The method used for generating the random values works
70
+ best when :math:`a \leq \text{mean} \leq b`.
71
+ Args:
72
+ tensor: an n-dimensional `torch.Tensor`
73
+ mean: the mean of the normal distribution
74
+ std: the standard deviation of the normal distribution
75
+ a: the minimum cutoff value
76
+ b: the maximum cutoff value
77
+ Examples:
78
+ >>> w = torch.empty(3, 5)
79
+ >>> nn.init.trunc_normal_(w)
80
+ """
81
+
82
+ with torch.no_grad():
83
+ dtype = tensor.dtype
84
+ tensor_fp32 = tensor.float()
85
+ tensor_fp32 = _no_grad_trunc_normal_(tensor_fp32, mean, std, a, b)
86
+ tensor_dtype = tensor_fp32.to(dtype=dtype)
87
+ tensor.copy_(tensor_dtype)
88
+
89
+
90
+ def init_weights(self):
91
+ if self.pos_embed is not None:
92
+ trunc_normal_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5)
93
+ trunc_normal_(self.latent, std=self.latent_dim ** -0.5)
94
+
95
+
96
+ def init_weights_vit_timm(module: nn.Module, name: str = '') -> None:
97
+ """ ViT weight initialization, original timm impl (for reproducibility) """
98
+ if isinstance(module, nn.Linear):
99
+ trunc_normal_(module.weight, std=.02)
100
+ if module.bias is not None:
101
+ nn.init.zeros_(module.bias)
102
+ elif hasattr(module, 'init_weights'):
103
+ module.init_weights()
104
+
105
+
106
+ class Attention(nn.Module):
107
+ fused_attn: Final[bool]
108
+
109
+ def __init__(
110
+ self,
111
+ dim: int,
112
+ num_heads: int = 8,
113
+ qkv_bias: bool = False,
114
+ qk_norm: bool = False,
115
+ attn_drop: float = 0.,
116
+ proj_drop: float = 0.,
117
+ norm_layer: nn.Module = nn.LayerNorm,
118
+ deterministic: bool = False,
119
+ ) -> None:
120
+ super().__init__()
121
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
122
+ self.num_heads = num_heads
123
+ self.head_dim = dim // num_heads
124
+ self.scale = self.head_dim ** -0.5
125
+ self.qk_norm = qk_norm
126
+ self.fused_attn = True
127
+ self.deterministic = deterministic
128
+
129
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
130
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
131
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
132
+ self.attn_drop = nn.Dropout(attn_drop)
133
+ self.proj = nn.Linear(dim, dim)
134
+ self.proj_drop = nn.Dropout(proj_drop) if proj_drop > 0. else nn.Identity()
135
+
136
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
137
+ B, N, C = x.shape
138
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim)
139
+
140
+ if not self.qk_norm:
141
+ if self.head_dim % 32 == 0 and is_flash_attn_2_available():
142
+ # flashattn must have head_dim as a multiple of 32
143
+ x = flash_attn_qkvpacked_func(qkv, dropout_p=self.attn_drop.p if self.training else 0.,
144
+ deterministic=self.deterministic)
145
+ else:
146
+ q, k, v = qkv.unbind(2)
147
+ x = memory_efficient_attention(q, k, v, p=self.attn_drop.p if self.training else 0.)
148
+ x = x.reshape(B, N, C)
149
+ x = self.proj(x)
150
+ x = self.proj_drop(x)
151
+ return x
152
+
153
+ qkv = qkv.permute(2, 0, 3, 1, 4)
154
+ q, k, v = qkv.unbind(0)
155
+ q, k = self.q_norm(q), self.k_norm(k)
156
+
157
+ if self.fused_attn:
158
+ with torch.backends.cuda.sdp_kernel(enable_math=False, enable_mem_efficient=False):
159
+ # 用上下文的方式强行使用fa
160
+ x = F.scaled_dot_product_attention(
161
+ q, k, v,
162
+ dropout_p=self.attn_drop.p if self.training else 0.,
163
+ )
164
+ else:
165
+ q = q * self.scale
166
+ attn = q @ k.transpose(-2, -1)
167
+ attn = attn.softmax(dim=-1)
168
+ attn = self.attn_drop(attn)
169
+ x = attn @ v
170
+
171
+ x = x.transpose(1, 2).reshape(B, N, C)
172
+ x = self.proj(x)
173
+ x = self.proj_drop(x)
174
+ return x
175
+
176
+
177
+ class LayerScale(nn.Module):
178
+ def __init__(
179
+ self,
180
+ dim: int,
181
+ init_values: float = 1e-5,
182
+ inplace: bool = False,
183
+ ) -> None:
184
+ super().__init__()
185
+ self.inplace = inplace
186
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
187
+
188
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
189
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
190
+
191
+
192
+ class Block(nn.Module):
193
+ def __init__(
194
+ self,
195
+ dim: int,
196
+ num_heads: int,
197
+ mlp_ratio: float = 4.,
198
+ qkv_bias: bool = False,
199
+ qk_norm: bool = False,
200
+ proj_drop: float = 0.,
201
+ attn_drop: float = 0.,
202
+ init_values: Optional[float] = None,
203
+ drop_path: float = 0.,
204
+ act_layer: nn.Module = nn.GELU,
205
+ norm_layer: nn.Module = nn.LayerNorm,
206
+ mlp_layer: nn.Module = Mlp,
207
+ deterministic: bool = False,
208
+ ) -> None:
209
+ super().__init__()
210
+ self.norm1 = norm_layer(dim)
211
+ self.attn = Attention(
212
+ dim,
213
+ num_heads=num_heads,
214
+ qkv_bias=qkv_bias,
215
+ qk_norm=qk_norm,
216
+ attn_drop=attn_drop,
217
+ proj_drop=proj_drop,
218
+ norm_layer=norm_layer,
219
+ deterministic=deterministic,
220
+ )
221
+ self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
222
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
223
+
224
+ self.norm2 = norm_layer(dim)
225
+ self.mlp = mlp_layer(
226
+ in_features=dim,
227
+ hidden_features=int(dim * mlp_ratio),
228
+ act_layer=act_layer,
229
+ drop=proj_drop,
230
+ )
231
+ self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
232
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
233
+
234
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
235
+ x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
236
+ x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
237
+ return x
238
+
239
+
240
+ class VisionTransformer(nn.Module):
241
+ """ Vision Transformer
242
+
243
+ A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
244
+ - https://arxiv.org/abs/2010.11929
245
+ """
246
+ dynamic_img_size: Final[bool]
247
+
248
+ def __init__(
249
+ self,
250
+ img_size: Union[int, Tuple[int, int]] = 224,
251
+ patch_size: Union[int, Tuple[int, int]] = 16,
252
+ in_chans: int = 3,
253
+ num_classes: int = 1000,
254
+ global_pool: Literal['', 'avg', 'token', 'map'] = 'token',
255
+ embed_dim: int = 768,
256
+ depth: int = 12,
257
+ num_heads: int = 12,
258
+ mlp_ratio: float = 4.,
259
+ qkv_bias: bool = True,
260
+ qk_norm: bool = False,
261
+ init_values: Optional[float] = None,
262
+ class_token: bool = True,
263
+ no_embed_class: bool = False,
264
+ reg_tokens: int = 0,
265
+ pre_norm: bool = False,
266
+ fc_norm: Optional[bool] = None,
267
+ dynamic_img_size: bool = False,
268
+ dynamic_img_pad: bool = False,
269
+ drop_rate: float = 0.,
270
+ pos_drop_rate: float = 0.,
271
+ patch_drop_rate: float = 0.,
272
+ proj_drop_rate: float = 0.,
273
+ attn_drop_rate: float = 0.,
274
+ drop_path_rate: float = 0.,
275
+ weight_init: Literal['skip', 'jax', 'jax_nlhb', 'moco', ''] = '',
276
+ embed_layer: Callable = PatchEmbed,
277
+ norm_layer: Optional[LayerType] = None,
278
+ act_layer: Optional[LayerType] = None,
279
+ block_fn: Type[nn.Module] = Block,
280
+ mlp_layer: Type[nn.Module] = Mlp,
281
+ ignore_head: bool = False,
282
+ deterministic: bool = False,
283
+ num_recomputing_layers: int = 0
284
+ ) -> None:
285
+ """
286
+ Args:
287
+ img_size: Input image size.
288
+ patch_size: Patch size.
289
+ in_chans: Number of image input channels.
290
+ num_classes: Mumber of classes for classification head.
291
+ global_pool: Type of global pooling for final sequence (default: 'token').
292
+ embed_dim: Transformer embedding dimension.
293
+ depth: Depth of transformer.
294
+ num_heads: Number of attention heads.
295
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
296
+ qkv_bias: Enable bias for qkv projections if True.
297
+ init_values: Layer-scale init values (layer-scale enabled if not None).
298
+ class_token: Use class token.
299
+ no_embed_class: Don't include position embeddings for class (or reg) tokens.
300
+ reg_tokens: Number of register tokens.
301
+ fc_norm: Pre head norm after pool (instead of before), if None, enabled when global_pool == 'avg'.
302
+ drop_rate: Head dropout rate.
303
+ pos_drop_rate: Position embedding dropout rate.
304
+ attn_drop_rate: Attention dropout rate.
305
+ drop_path_rate: Stochastic depth rate.
306
+ weight_init: Weight initialization scheme.
307
+ embed_layer: Patch embedding layer.
308
+ norm_layer: Normalization layer.
309
+ act_layer: MLP activation layer.
310
+ block_fn: Transformer block layer.
311
+ """
312
+ super().__init__()
313
+ assert global_pool in ('', 'avg', 'token', 'map')
314
+ assert class_token or global_pool != 'token'
315
+ use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm
316
+ # norm_layer = get_norm_layer(norm_layer) or partial(nn.LayerNorm, eps=1e-6)
317
+ # act_layer = get_act_layer(act_layer) or nn.GELU
318
+ norm_layer = partial(nn.LayerNorm, eps=1e-6)
319
+ # siglip use PytorchGELUTanh() rather than the vanilla nn.GELU()
320
+ # https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/siglip/configuration_siglip.py#L191
321
+ act_layer = partial(nn.GELU, approximate='tanh')
322
+
323
+ self.num_classes = num_classes
324
+ self.global_pool = global_pool
325
+ self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
326
+ self.num_prefix_tokens = 1 if class_token else 0
327
+ self.num_prefix_tokens += reg_tokens
328
+ self.num_reg_tokens = reg_tokens
329
+ self.has_class_token = class_token
330
+ self.no_embed_class = no_embed_class # don't embed prefix positions (includes reg)
331
+ self.dynamic_img_size = dynamic_img_size
332
+ self.grad_checkpointing = False
333
+ self.ignore_head = ignore_head
334
+ self.num_recomputing_layers = num_recomputing_layers
335
+
336
+ embed_args = {}
337
+ if dynamic_img_size:
338
+ # flatten deferred until after pos embed
339
+ embed_args.update(dict(strict_img_size=False, output_fmt='NHWC'))
340
+ self.patch_embed = embed_layer(
341
+ img_size=img_size,
342
+ patch_size=patch_size,
343
+ in_chans=in_chans,
344
+ embed_dim=embed_dim,
345
+ bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP)
346
+ dynamic_img_pad=dynamic_img_pad,
347
+ **embed_args,
348
+ )
349
+ num_patches = self.patch_embed.num_patches
350
+
351
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
352
+ self.reg_token = nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None
353
+ embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens
354
+ self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02)
355
+ self.pos_drop = nn.Dropout(p=pos_drop_rate)
356
+ if patch_drop_rate > 0:
357
+ self.patch_drop = PatchDropout(
358
+ patch_drop_rate,
359
+ num_prefix_tokens=self.num_prefix_tokens,
360
+ )
361
+ else:
362
+ self.patch_drop = nn.Identity()
363
+ self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
364
+
365
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
366
+ self.blocks = nn.Sequential(*[
367
+ block_fn(
368
+ dim=embed_dim,
369
+ num_heads=num_heads,
370
+ mlp_ratio=mlp_ratio,
371
+ qkv_bias=qkv_bias,
372
+ qk_norm=qk_norm,
373
+ init_values=init_values,
374
+ proj_drop=proj_drop_rate,
375
+ attn_drop=attn_drop_rate,
376
+ drop_path=dpr[i],
377
+ norm_layer=norm_layer,
378
+ act_layer=act_layer,
379
+ mlp_layer=mlp_layer,
380
+ deterministic=deterministic,
381
+ )
382
+ for i in range(depth)])
383
+ self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity()
384
+
385
+ # Classifier Head
386
+ if global_pool == 'map':
387
+ AttentionPoolLatent.init_weights = init_weights
388
+ self.attn_pool = AttentionPoolLatent(
389
+ self.embed_dim,
390
+ num_heads=num_heads,
391
+ mlp_ratio=mlp_ratio,
392
+ norm_layer=norm_layer,
393
+ )
394
+ else:
395
+ self.attn_pool = None
396
+ self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
397
+ self.head_drop = nn.Dropout(drop_rate)
398
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
399
+
400
+ if weight_init != 'skip':
401
+ self.init_weights(weight_init)
402
+
403
+ def init_weights(self, mode: Literal['jax', 'jax_nlhb', 'moco', ''] = '') -> None:
404
+ assert mode in ('jax', 'jax_nlhb', 'moco', '')
405
+ head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
406
+ trunc_normal_(self.pos_embed, std=.02)
407
+ if self.cls_token is not None:
408
+ nn.init.normal_(self.cls_token, std=1e-6)
409
+ named_apply(init_weights_vit_timm, self)
410
+
411
+ @torch.jit.ignore
412
+ def no_weight_decay(self) -> Set:
413
+ return {'pos_embed', 'cls_token', 'dist_token'}
414
+
415
+ @torch.jit.ignore
416
+ def group_matcher(self, coarse: bool = False) -> Dict:
417
+ return dict(
418
+ stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
419
+ blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
420
+ )
421
+
422
+ @torch.jit.ignore
423
+ def set_grad_checkpointing(self, enable: bool = True) -> None:
424
+ self.grad_checkpointing = enable
425
+
426
+ @torch.jit.ignore
427
+ def get_classifier(self) -> nn.Module:
428
+ return self.head
429
+
430
+ def reset_classifier(self, num_classes: int, global_pool=None) -> None:
431
+ self.num_classes = num_classes
432
+ if global_pool is not None:
433
+ assert global_pool in ('', 'avg', 'token', 'map')
434
+ if global_pool == 'map' and self.attn_pool is None:
435
+ assert False, "Cannot currently add attention pooling in reset_classifier()."
436
+ elif global_pool != 'map ' and self.attn_pool is not None:
437
+ self.attn_pool = None # remove attention pooling
438
+ self.global_pool = global_pool
439
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
440
+
441
+ def _pos_embed(self, x: torch.Tensor) -> torch.Tensor:
442
+ if self.dynamic_img_size:
443
+ B, H, W, C = x.shape
444
+ pos_embed = resample_abs_pos_embed(
445
+ self.pos_embed,
446
+ (H, W),
447
+ num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens,
448
+ )
449
+ x = x.view(B, -1, C)
450
+ else:
451
+ pos_embed = self.pos_embed
452
+
453
+ to_cat = []
454
+ if self.cls_token is not None:
455
+ to_cat.append(self.cls_token.expand(x.shape[0], -1, -1))
456
+ if self.reg_token is not None:
457
+ to_cat.append(self.reg_token.expand(x.shape[0], -1, -1))
458
+
459
+ if self.no_embed_class:
460
+ # deit-3, updated JAX (big vision)
461
+ # position embedding does not overlap with class token, add then concat
462
+ x = x + pos_embed
463
+ if to_cat:
464
+ x = torch.cat(to_cat + [x], dim=1)
465
+ else:
466
+ # original timm, JAX, and deit vit impl
467
+ # pos_embed has entry for class token, concat then add
468
+ if to_cat:
469
+ x = torch.cat(to_cat + [x], dim=1)
470
+ x = x + pos_embed
471
+
472
+ return self.pos_drop(x)
473
+
474
+ def _intermediate_layers(
475
+ self,
476
+ x: torch.Tensor,
477
+ n: Union[int, Sequence] = 1,
478
+ ) -> List[torch.Tensor]:
479
+ outputs, num_blocks = [], len(self.blocks)
480
+ take_indices = set(range(num_blocks - n, num_blocks) if isinstance(n, int) else n)
481
+
482
+ # forward pass
483
+ x = self.patch_embed(x)
484
+ x = self._pos_embed(x)
485
+ x = self.patch_drop(x)
486
+ x = self.norm_pre(x)
487
+ for i, blk in enumerate(self.blocks):
488
+ x = blk(x)
489
+ if i in take_indices:
490
+ outputs.append(x)
491
+
492
+ return outputs
493
+
494
+ def get_intermediate_layers(
495
+ self,
496
+ x: torch.Tensor,
497
+ n: Union[int, Sequence] = 1,
498
+ reshape: bool = False,
499
+ return_prefix_tokens: bool = False,
500
+ norm: bool = False,
501
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
502
+ """ Intermediate layer accessor (NOTE: This is a WIP experiment).
503
+ Inspired by DINO / DINOv2 interface
504
+ """
505
+ # take last n blocks if n is an int, if in is a sequence, select by matching indices
506
+ outputs = self._intermediate_layers(x, n)
507
+ if norm:
508
+ outputs = [self.norm(out) for out in outputs]
509
+ prefix_tokens = [out[:, 0:self.num_prefix_tokens] for out in outputs]
510
+ outputs = [out[:, self.num_prefix_tokens:] for out in outputs]
511
+
512
+ if reshape:
513
+ grid_size = self.patch_embed.grid_size
514
+ outputs = [
515
+ out.reshape(x.shape[0], grid_size[0], grid_size[1], -1).permute(0, 3, 1, 2).contiguous()
516
+ for out in outputs
517
+ ]
518
+
519
+ if return_prefix_tokens:
520
+ return tuple(zip(outputs, prefix_tokens))
521
+ return tuple(outputs)
522
+
523
+ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
524
+ if getattr(self, "is_first_stage", True):
525
+ x = self.patch_embed(x)
526
+ x = self._pos_embed(x)
527
+ x = self.patch_drop(x)
528
+ x = self.norm_pre(x)
529
+ if self.grad_checkpointing and not torch.jit.is_scripting():
530
+ skip_last = max(1, len(self.blocks) - self.num_recomputing_layers)
531
+ x = checkpoint_seq(self.blocks, x, skip_last=skip_last)
532
+ else:
533
+ x = self.blocks(x)
534
+ if getattr(self, "is_last_stage", True):
535
+ x = self.norm(x)
536
+ return x
537
+
538
+ def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
539
+ if not getattr(self, "is_last_stage", True):
540
+ return x
541
+ if self.attn_pool is not None:
542
+ x = self.attn_pool(x)
543
+ elif self.global_pool == 'avg':
544
+ x = x[:, self.num_prefix_tokens:].mean(dim=1)
545
+ elif self.global_pool:
546
+ x = x[:, 0] # class token
547
+ x = self.fc_norm(x)
548
+ x = self.head_drop(x)
549
+ return x if pre_logits else self.head(x)
550
+
551
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
552
+ x = self.forward_features(x)
553
+ if not self.ignore_head:
554
+ x = self.forward_head(x)
555
+ return x
556
+
557
+ def to_pipeline(self, pp_size, pp_rank, pp_splits: Optional[List[int]] = None):
558
+ self.is_first_stage = pp_rank == 0
559
+ self.is_last_stage = pp_rank == pp_size - 1
560
+ if not self.is_first_stage and hasattr(self, "patch_embed"):
561
+ del self.patch_embed, self.cls_token, self.reg_token, self.pos_embed, self.pos_drop, self.patch_drop, self.norm_pre
562
+ if not self.is_last_stage and hasattr(self, "norm"):
563
+ del self.norm, self.attn_pool, self.fc_norm, self.head_drop, self.head
564
+ if pp_splits is not None:
565
+ assert len(self.blocks) == sum(pp_splits)
566
+ splits = np.cumsum([0] + pp_splits)
567
+ self.blocks = self.blocks[splits[pp_rank]:splits[pp_rank + 1]]
568
+ return self
569
+
570
+
571
+ @dataclass
572
+ class SigLIPVisionCfg:
573
+ width: int = 1152
574
+ layers: Union[Tuple[int, int, int, int], int] = 27
575
+ heads: int = 16
576
+ patch_size: int = 14
577
+ image_size: Union[Tuple[int, int], int] = 336
578
+ global_pool: str = "map"
579
+ mlp_ratio: float = 3.7362
580
+ class_token: bool = False
581
+ num_classes: int = 0
582
+ use_checkpoint: bool = False
583
+
584
+
585
+ SigLIP_MODEL_CONFIG = {
586
+ "siglip_so400m_patch14_384": {
587
+ "image_size": 384,
588
+ "patch_size": 14,
589
+ "width": 1152,
590
+ "layers": 27,
591
+ "heads": 16,
592
+ "mlp_ratio": 3.7362,
593
+ "global_pool": "map",
594
+ "use_checkpoint": False
595
+ },
596
+
597
+ "siglip_so400m_patch14_224": {
598
+ "image_size": 224,
599
+ "patch_size": 14,
600
+ "width": 1152,
601
+ "layers": 27,
602
+ "heads": 16,
603
+ "mlp_ratio": 3.7362,
604
+ "global_pool": "map",
605
+ "use_checkpoint": False
606
+ },
607
+
608
+ "siglip_large_patch16_384": {
609
+ "image_size": 384,
610
+ "patch_size": 16,
611
+ "width": 1024,
612
+ "layers": 24,
613
+ "heads": 16,
614
+ "mlp_ratio": 4,
615
+ "global_pool": "map",
616
+ "use_checkpoint": False
617
+ }
618
+ }
619
+
620
+
621
+ def create_siglip_vit(
622
+ model_name: str = "siglip_so400m_patch14_384",
623
+ image_size: int = 384,
624
+ select_layer: int = -1,
625
+ ckpt_path: str = "",
626
+ **kwargs
627
+ ):
628
+ assert model_name in SigLIP_MODEL_CONFIG.keys(), f"model name should be in {SigLIP_MODEL_CONFIG.keys()}"
629
+
630
+ vision_cfg = SigLIPVisionCfg(**SigLIP_MODEL_CONFIG[model_name])
631
+
632
+ if select_layer <= 0:
633
+ layers = min(vision_cfg.layers, vision_cfg.layers + select_layer + 1)
634
+ else:
635
+ layers = min(vision_cfg.layers, select_layer)
636
+
637
+ model = VisionTransformer(
638
+ img_size=image_size,
639
+ patch_size=vision_cfg.patch_size,
640
+ embed_dim=vision_cfg.width,
641
+ depth=layers,
642
+ num_heads=vision_cfg.heads,
643
+ mlp_ratio=vision_cfg.mlp_ratio,
644
+ class_token=vision_cfg.class_token,
645
+ global_pool=vision_cfg.global_pool,
646
+ ignore_head=kwargs.get("ignore_head", True),
647
+ weight_init=kwargs.get("weight_init", "skip"),
648
+ num_classes=0,
649
+ deterministic=kwargs.get("deterministic", False),
650
+ num_recomputing_layers=kwargs.get("num_recomputing_layers", 0)
651
+ )
652
+
653
+ if ckpt_path:
654
+ state_dict = torch.load(ckpt_path, map_location="cpu")
655
+
656
+ incompatible_keys = model.load_state_dict(state_dict, strict=False)
657
+ print(f"SigLIP-ViT restores from {ckpt_path},\n"
658
+ f"\tincompatible_keys:', {incompatible_keys}.")
659
+
660
+ return model
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|User|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|Assistant|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "bos_token": {
19
+ "content": "<|begin▁of▁sentence|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "eos_token": {
26
+ "content": "<|end▁of▁sentence|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "pad_token": {
33
+ "content": "<|▁pad▁|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d1800d401506f60fae88c7103df9105a6f58ab3ff4738db68cfee7e96edaa85
3
+ size 10760044
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff