Jiafei1224 commited on
Commit
2a16948
·
verified ·
1 Parent(s): 27062d7

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<DEPTH_0>": 151667,
4
+ "<DEPTH_100>": 151767,
5
+ "<DEPTH_101>": 151768,
6
+ "<DEPTH_102>": 151769,
7
+ "<DEPTH_103>": 151770,
8
+ "<DEPTH_104>": 151771,
9
+ "<DEPTH_105>": 151772,
10
+ "<DEPTH_106>": 151773,
11
+ "<DEPTH_107>": 151774,
12
+ "<DEPTH_108>": 151775,
13
+ "<DEPTH_109>": 151776,
14
+ "<DEPTH_10>": 151677,
15
+ "<DEPTH_110>": 151777,
16
+ "<DEPTH_111>": 151778,
17
+ "<DEPTH_112>": 151779,
18
+ "<DEPTH_113>": 151780,
19
+ "<DEPTH_114>": 151781,
20
+ "<DEPTH_115>": 151782,
21
+ "<DEPTH_116>": 151783,
22
+ "<DEPTH_117>": 151784,
23
+ "<DEPTH_118>": 151785,
24
+ "<DEPTH_119>": 151786,
25
+ "<DEPTH_11>": 151678,
26
+ "<DEPTH_120>": 151787,
27
+ "<DEPTH_121>": 151788,
28
+ "<DEPTH_122>": 151789,
29
+ "<DEPTH_123>": 151790,
30
+ "<DEPTH_124>": 151791,
31
+ "<DEPTH_125>": 151792,
32
+ "<DEPTH_126>": 151793,
33
+ "<DEPTH_127>": 151794,
34
+ "<DEPTH_12>": 151679,
35
+ "<DEPTH_13>": 151680,
36
+ "<DEPTH_14>": 151681,
37
+ "<DEPTH_15>": 151682,
38
+ "<DEPTH_16>": 151683,
39
+ "<DEPTH_17>": 151684,
40
+ "<DEPTH_18>": 151685,
41
+ "<DEPTH_19>": 151686,
42
+ "<DEPTH_1>": 151668,
43
+ "<DEPTH_20>": 151687,
44
+ "<DEPTH_21>": 151688,
45
+ "<DEPTH_22>": 151689,
46
+ "<DEPTH_23>": 151690,
47
+ "<DEPTH_24>": 151691,
48
+ "<DEPTH_25>": 151692,
49
+ "<DEPTH_26>": 151693,
50
+ "<DEPTH_27>": 151694,
51
+ "<DEPTH_28>": 151695,
52
+ "<DEPTH_29>": 151696,
53
+ "<DEPTH_2>": 151669,
54
+ "<DEPTH_30>": 151697,
55
+ "<DEPTH_31>": 151698,
56
+ "<DEPTH_32>": 151699,
57
+ "<DEPTH_33>": 151700,
58
+ "<DEPTH_34>": 151701,
59
+ "<DEPTH_35>": 151702,
60
+ "<DEPTH_36>": 151703,
61
+ "<DEPTH_37>": 151704,
62
+ "<DEPTH_38>": 151705,
63
+ "<DEPTH_39>": 151706,
64
+ "<DEPTH_3>": 151670,
65
+ "<DEPTH_40>": 151707,
66
+ "<DEPTH_41>": 151708,
67
+ "<DEPTH_42>": 151709,
68
+ "<DEPTH_43>": 151710,
69
+ "<DEPTH_44>": 151711,
70
+ "<DEPTH_45>": 151712,
71
+ "<DEPTH_46>": 151713,
72
+ "<DEPTH_47>": 151714,
73
+ "<DEPTH_48>": 151715,
74
+ "<DEPTH_49>": 151716,
75
+ "<DEPTH_4>": 151671,
76
+ "<DEPTH_50>": 151717,
77
+ "<DEPTH_51>": 151718,
78
+ "<DEPTH_52>": 151719,
79
+ "<DEPTH_53>": 151720,
80
+ "<DEPTH_54>": 151721,
81
+ "<DEPTH_55>": 151722,
82
+ "<DEPTH_56>": 151723,
83
+ "<DEPTH_57>": 151724,
84
+ "<DEPTH_58>": 151725,
85
+ "<DEPTH_59>": 151726,
86
+ "<DEPTH_5>": 151672,
87
+ "<DEPTH_60>": 151727,
88
+ "<DEPTH_61>": 151728,
89
+ "<DEPTH_62>": 151729,
90
+ "<DEPTH_63>": 151730,
91
+ "<DEPTH_64>": 151731,
92
+ "<DEPTH_65>": 151732,
93
+ "<DEPTH_66>": 151733,
94
+ "<DEPTH_67>": 151734,
95
+ "<DEPTH_68>": 151735,
96
+ "<DEPTH_69>": 151736,
97
+ "<DEPTH_6>": 151673,
98
+ "<DEPTH_70>": 151737,
99
+ "<DEPTH_71>": 151738,
100
+ "<DEPTH_72>": 151739,
101
+ "<DEPTH_73>": 151740,
102
+ "<DEPTH_74>": 151741,
103
+ "<DEPTH_75>": 151742,
104
+ "<DEPTH_76>": 151743,
105
+ "<DEPTH_77>": 151744,
106
+ "<DEPTH_78>": 151745,
107
+ "<DEPTH_79>": 151746,
108
+ "<DEPTH_7>": 151674,
109
+ "<DEPTH_80>": 151747,
110
+ "<DEPTH_81>": 151748,
111
+ "<DEPTH_82>": 151749,
112
+ "<DEPTH_83>": 151750,
113
+ "<DEPTH_84>": 151751,
114
+ "<DEPTH_85>": 151752,
115
+ "<DEPTH_86>": 151753,
116
+ "<DEPTH_87>": 151754,
117
+ "<DEPTH_88>": 151755,
118
+ "<DEPTH_89>": 151756,
119
+ "<DEPTH_8>": 151675,
120
+ "<DEPTH_90>": 151757,
121
+ "<DEPTH_91>": 151758,
122
+ "<DEPTH_92>": 151759,
123
+ "<DEPTH_93>": 151760,
124
+ "<DEPTH_94>": 151761,
125
+ "<DEPTH_95>": 151762,
126
+ "<DEPTH_96>": 151763,
127
+ "<DEPTH_97>": 151764,
128
+ "<DEPTH_98>": 151765,
129
+ "<DEPTH_99>": 151766,
130
+ "<DEPTH_9>": 151676,
131
+ "<DEPTH_END>": 151666,
132
+ "<DEPTH_START>": 151665,
133
+ "<im_col>": 152067,
134
+ "<im_end>": 152065,
135
+ "<im_low>": 152069,
136
+ "<im_patch>": 152066,
137
+ "<im_start>": 152064,
138
+ "<tool_call>": 151657,
139
+ "<|box_end|>": 151649,
140
+ "<|box_start|>": 151648,
141
+ "<|endoftext|>": 151643,
142
+ "<|file_sep|>": 151664,
143
+ "<|fim_middle|>": 151660,
144
+ "<|fim_pad|>": 151662,
145
+ "<|fim_prefix|>": 151659,
146
+ "<|fim_suffix|>": 151661,
147
+ "<|im_end|>": 151645,
148
+ "<|im_start|>": 151644,
149
+ "<|image_pad|>": 151655,
150
+ "<|image|>": 152068,
151
+ "<|object_ref_end|>": 151647,
152
+ "<|object_ref_start|>": 151646,
153
+ "<|quad_end|>": 151651,
154
+ "<|quad_start|>": 151650,
155
+ "<|repo_name|>": 151663,
156
+ "<|video_pad|>": 151656,
157
+ "<|vision_end|>": 151653,
158
+ "<|vision_pad|>": 151654,
159
+ "<|vision_start|>": 151652,
160
+ "|<EXTRA_TOKENS_0>|": 151795,
161
+ "|<EXTRA_TOKENS_100>|": 151895,
162
+ "|<EXTRA_TOKENS_101>|": 151896,
163
+ "|<EXTRA_TOKENS_102>|": 151897,
164
+ "|<EXTRA_TOKENS_103>|": 151898,
165
+ "|<EXTRA_TOKENS_104>|": 151899,
166
+ "|<EXTRA_TOKENS_105>|": 151900,
167
+ "|<EXTRA_TOKENS_106>|": 151901,
168
+ "|<EXTRA_TOKENS_107>|": 151902,
169
+ "|<EXTRA_TOKENS_108>|": 151903,
170
+ "|<EXTRA_TOKENS_109>|": 151904,
171
+ "|<EXTRA_TOKENS_10>|": 151805,
172
+ "|<EXTRA_TOKENS_110>|": 151905,
173
+ "|<EXTRA_TOKENS_111>|": 151906,
174
+ "|<EXTRA_TOKENS_112>|": 151907,
175
+ "|<EXTRA_TOKENS_113>|": 151908,
176
+ "|<EXTRA_TOKENS_114>|": 151909,
177
+ "|<EXTRA_TOKENS_115>|": 151910,
178
+ "|<EXTRA_TOKENS_116>|": 151911,
179
+ "|<EXTRA_TOKENS_117>|": 151912,
180
+ "|<EXTRA_TOKENS_118>|": 151913,
181
+ "|<EXTRA_TOKENS_119>|": 151914,
182
+ "|<EXTRA_TOKENS_11>|": 151806,
183
+ "|<EXTRA_TOKENS_120>|": 151915,
184
+ "|<EXTRA_TOKENS_121>|": 151916,
185
+ "|<EXTRA_TOKENS_122>|": 151917,
186
+ "|<EXTRA_TOKENS_123>|": 151918,
187
+ "|<EXTRA_TOKENS_124>|": 151919,
188
+ "|<EXTRA_TOKENS_125>|": 151920,
189
+ "|<EXTRA_TOKENS_126>|": 151921,
190
+ "|<EXTRA_TOKENS_127>|": 151922,
191
+ "|<EXTRA_TOKENS_128>|": 151923,
192
+ "|<EXTRA_TOKENS_129>|": 151924,
193
+ "|<EXTRA_TOKENS_12>|": 151807,
194
+ "|<EXTRA_TOKENS_130>|": 151925,
195
+ "|<EXTRA_TOKENS_131>|": 151926,
196
+ "|<EXTRA_TOKENS_132>|": 151927,
197
+ "|<EXTRA_TOKENS_133>|": 151928,
198
+ "|<EXTRA_TOKENS_134>|": 151929,
199
+ "|<EXTRA_TOKENS_135>|": 151930,
200
+ "|<EXTRA_TOKENS_136>|": 151931,
201
+ "|<EXTRA_TOKENS_137>|": 151932,
202
+ "|<EXTRA_TOKENS_138>|": 151933,
203
+ "|<EXTRA_TOKENS_139>|": 151934,
204
+ "|<EXTRA_TOKENS_13>|": 151808,
205
+ "|<EXTRA_TOKENS_140>|": 151935,
206
+ "|<EXTRA_TOKENS_141>|": 151936,
207
+ "|<EXTRA_TOKENS_142>|": 151937,
208
+ "|<EXTRA_TOKENS_143>|": 151938,
209
+ "|<EXTRA_TOKENS_144>|": 151939,
210
+ "|<EXTRA_TOKENS_145>|": 151940,
211
+ "|<EXTRA_TOKENS_146>|": 151941,
212
+ "|<EXTRA_TOKENS_147>|": 151942,
213
+ "|<EXTRA_TOKENS_148>|": 151943,
214
+ "|<EXTRA_TOKENS_149>|": 151944,
215
+ "|<EXTRA_TOKENS_14>|": 151809,
216
+ "|<EXTRA_TOKENS_150>|": 151945,
217
+ "|<EXTRA_TOKENS_151>|": 151946,
218
+ "|<EXTRA_TOKENS_152>|": 151947,
219
+ "|<EXTRA_TOKENS_153>|": 151948,
220
+ "|<EXTRA_TOKENS_154>|": 151949,
221
+ "|<EXTRA_TOKENS_155>|": 151950,
222
+ "|<EXTRA_TOKENS_156>|": 151951,
223
+ "|<EXTRA_TOKENS_157>|": 151952,
224
+ "|<EXTRA_TOKENS_158>|": 151953,
225
+ "|<EXTRA_TOKENS_159>|": 151954,
226
+ "|<EXTRA_TOKENS_15>|": 151810,
227
+ "|<EXTRA_TOKENS_160>|": 151955,
228
+ "|<EXTRA_TOKENS_161>|": 151956,
229
+ "|<EXTRA_TOKENS_162>|": 151957,
230
+ "|<EXTRA_TOKENS_163>|": 151958,
231
+ "|<EXTRA_TOKENS_164>|": 151959,
232
+ "|<EXTRA_TOKENS_165>|": 151960,
233
+ "|<EXTRA_TOKENS_166>|": 151961,
234
+ "|<EXTRA_TOKENS_167>|": 151962,
235
+ "|<EXTRA_TOKENS_168>|": 151963,
236
+ "|<EXTRA_TOKENS_169>|": 151964,
237
+ "|<EXTRA_TOKENS_16>|": 151811,
238
+ "|<EXTRA_TOKENS_170>|": 151965,
239
+ "|<EXTRA_TOKENS_171>|": 151966,
240
+ "|<EXTRA_TOKENS_172>|": 151967,
241
+ "|<EXTRA_TOKENS_173>|": 151968,
242
+ "|<EXTRA_TOKENS_174>|": 151969,
243
+ "|<EXTRA_TOKENS_175>|": 151970,
244
+ "|<EXTRA_TOKENS_176>|": 151971,
245
+ "|<EXTRA_TOKENS_177>|": 151972,
246
+ "|<EXTRA_TOKENS_178>|": 151973,
247
+ "|<EXTRA_TOKENS_179>|": 151974,
248
+ "|<EXTRA_TOKENS_17>|": 151812,
249
+ "|<EXTRA_TOKENS_180>|": 151975,
250
+ "|<EXTRA_TOKENS_181>|": 151976,
251
+ "|<EXTRA_TOKENS_182>|": 151977,
252
+ "|<EXTRA_TOKENS_183>|": 151978,
253
+ "|<EXTRA_TOKENS_184>|": 151979,
254
+ "|<EXTRA_TOKENS_185>|": 151980,
255
+ "|<EXTRA_TOKENS_186>|": 151981,
256
+ "|<EXTRA_TOKENS_187>|": 151982,
257
+ "|<EXTRA_TOKENS_188>|": 151983,
258
+ "|<EXTRA_TOKENS_189>|": 151984,
259
+ "|<EXTRA_TOKENS_18>|": 151813,
260
+ "|<EXTRA_TOKENS_190>|": 151985,
261
+ "|<EXTRA_TOKENS_191>|": 151986,
262
+ "|<EXTRA_TOKENS_192>|": 151987,
263
+ "|<EXTRA_TOKENS_193>|": 151988,
264
+ "|<EXTRA_TOKENS_194>|": 151989,
265
+ "|<EXTRA_TOKENS_195>|": 151990,
266
+ "|<EXTRA_TOKENS_196>|": 151991,
267
+ "|<EXTRA_TOKENS_197>|": 151992,
268
+ "|<EXTRA_TOKENS_198>|": 151993,
269
+ "|<EXTRA_TOKENS_199>|": 151994,
270
+ "|<EXTRA_TOKENS_19>|": 151814,
271
+ "|<EXTRA_TOKENS_1>|": 151796,
272
+ "|<EXTRA_TOKENS_200>|": 151995,
273
+ "|<EXTRA_TOKENS_201>|": 151996,
274
+ "|<EXTRA_TOKENS_202>|": 151997,
275
+ "|<EXTRA_TOKENS_203>|": 151998,
276
+ "|<EXTRA_TOKENS_204>|": 151999,
277
+ "|<EXTRA_TOKENS_205>|": 152000,
278
+ "|<EXTRA_TOKENS_206>|": 152001,
279
+ "|<EXTRA_TOKENS_207>|": 152002,
280
+ "|<EXTRA_TOKENS_208>|": 152003,
281
+ "|<EXTRA_TOKENS_209>|": 152004,
282
+ "|<EXTRA_TOKENS_20>|": 151815,
283
+ "|<EXTRA_TOKENS_210>|": 152005,
284
+ "|<EXTRA_TOKENS_211>|": 152006,
285
+ "|<EXTRA_TOKENS_212>|": 152007,
286
+ "|<EXTRA_TOKENS_213>|": 152008,
287
+ "|<EXTRA_TOKENS_214>|": 152009,
288
+ "|<EXTRA_TOKENS_215>|": 152010,
289
+ "|<EXTRA_TOKENS_216>|": 152011,
290
+ "|<EXTRA_TOKENS_217>|": 152012,
291
+ "|<EXTRA_TOKENS_218>|": 152013,
292
+ "|<EXTRA_TOKENS_219>|": 152014,
293
+ "|<EXTRA_TOKENS_21>|": 151816,
294
+ "|<EXTRA_TOKENS_220>|": 152015,
295
+ "|<EXTRA_TOKENS_221>|": 152016,
296
+ "|<EXTRA_TOKENS_222>|": 152017,
297
+ "|<EXTRA_TOKENS_223>|": 152018,
298
+ "|<EXTRA_TOKENS_224>|": 152019,
299
+ "|<EXTRA_TOKENS_225>|": 152020,
300
+ "|<EXTRA_TOKENS_226>|": 152021,
301
+ "|<EXTRA_TOKENS_227>|": 152022,
302
+ "|<EXTRA_TOKENS_228>|": 152023,
303
+ "|<EXTRA_TOKENS_229>|": 152024,
304
+ "|<EXTRA_TOKENS_22>|": 151817,
305
+ "|<EXTRA_TOKENS_230>|": 152025,
306
+ "|<EXTRA_TOKENS_231>|": 152026,
307
+ "|<EXTRA_TOKENS_232>|": 152027,
308
+ "|<EXTRA_TOKENS_233>|": 152028,
309
+ "|<EXTRA_TOKENS_234>|": 152029,
310
+ "|<EXTRA_TOKENS_235>|": 152030,
311
+ "|<EXTRA_TOKENS_236>|": 152031,
312
+ "|<EXTRA_TOKENS_237>|": 152032,
313
+ "|<EXTRA_TOKENS_238>|": 152033,
314
+ "|<EXTRA_TOKENS_239>|": 152034,
315
+ "|<EXTRA_TOKENS_23>|": 151818,
316
+ "|<EXTRA_TOKENS_240>|": 152035,
317
+ "|<EXTRA_TOKENS_241>|": 152036,
318
+ "|<EXTRA_TOKENS_242>|": 152037,
319
+ "|<EXTRA_TOKENS_243>|": 152038,
320
+ "|<EXTRA_TOKENS_244>|": 152039,
321
+ "|<EXTRA_TOKENS_245>|": 152040,
322
+ "|<EXTRA_TOKENS_246>|": 152041,
323
+ "|<EXTRA_TOKENS_247>|": 152042,
324
+ "|<EXTRA_TOKENS_248>|": 152043,
325
+ "|<EXTRA_TOKENS_249>|": 152044,
326
+ "|<EXTRA_TOKENS_24>|": 151819,
327
+ "|<EXTRA_TOKENS_250>|": 152045,
328
+ "|<EXTRA_TOKENS_251>|": 152046,
329
+ "|<EXTRA_TOKENS_252>|": 152047,
330
+ "|<EXTRA_TOKENS_253>|": 152048,
331
+ "|<EXTRA_TOKENS_254>|": 152049,
332
+ "|<EXTRA_TOKENS_255>|": 152050,
333
+ "|<EXTRA_TOKENS_256>|": 152051,
334
+ "|<EXTRA_TOKENS_257>|": 152052,
335
+ "|<EXTRA_TOKENS_258>|": 152053,
336
+ "|<EXTRA_TOKENS_259>|": 152054,
337
+ "|<EXTRA_TOKENS_25>|": 151820,
338
+ "|<EXTRA_TOKENS_260>|": 152055,
339
+ "|<EXTRA_TOKENS_261>|": 152056,
340
+ "|<EXTRA_TOKENS_262>|": 152057,
341
+ "|<EXTRA_TOKENS_263>|": 152058,
342
+ "|<EXTRA_TOKENS_264>|": 152059,
343
+ "|<EXTRA_TOKENS_265>|": 152060,
344
+ "|<EXTRA_TOKENS_266>|": 152061,
345
+ "|<EXTRA_TOKENS_267>|": 152062,
346
+ "|<EXTRA_TOKENS_268>|": 152063,
347
+ "|<EXTRA_TOKENS_26>|": 151821,
348
+ "|<EXTRA_TOKENS_27>|": 151822,
349
+ "|<EXTRA_TOKENS_28>|": 151823,
350
+ "|<EXTRA_TOKENS_29>|": 151824,
351
+ "|<EXTRA_TOKENS_2>|": 151797,
352
+ "|<EXTRA_TOKENS_30>|": 151825,
353
+ "|<EXTRA_TOKENS_31>|": 151826,
354
+ "|<EXTRA_TOKENS_32>|": 151827,
355
+ "|<EXTRA_TOKENS_33>|": 151828,
356
+ "|<EXTRA_TOKENS_34>|": 151829,
357
+ "|<EXTRA_TOKENS_35>|": 151830,
358
+ "|<EXTRA_TOKENS_36>|": 151831,
359
+ "|<EXTRA_TOKENS_37>|": 151832,
360
+ "|<EXTRA_TOKENS_38>|": 151833,
361
+ "|<EXTRA_TOKENS_39>|": 151834,
362
+ "|<EXTRA_TOKENS_3>|": 151798,
363
+ "|<EXTRA_TOKENS_40>|": 151835,
364
+ "|<EXTRA_TOKENS_41>|": 151836,
365
+ "|<EXTRA_TOKENS_42>|": 151837,
366
+ "|<EXTRA_TOKENS_43>|": 151838,
367
+ "|<EXTRA_TOKENS_44>|": 151839,
368
+ "|<EXTRA_TOKENS_45>|": 151840,
369
+ "|<EXTRA_TOKENS_46>|": 151841,
370
+ "|<EXTRA_TOKENS_47>|": 151842,
371
+ "|<EXTRA_TOKENS_48>|": 151843,
372
+ "|<EXTRA_TOKENS_49>|": 151844,
373
+ "|<EXTRA_TOKENS_4>|": 151799,
374
+ "|<EXTRA_TOKENS_50>|": 151845,
375
+ "|<EXTRA_TOKENS_51>|": 151846,
376
+ "|<EXTRA_TOKENS_52>|": 151847,
377
+ "|<EXTRA_TOKENS_53>|": 151848,
378
+ "|<EXTRA_TOKENS_54>|": 151849,
379
+ "|<EXTRA_TOKENS_55>|": 151850,
380
+ "|<EXTRA_TOKENS_56>|": 151851,
381
+ "|<EXTRA_TOKENS_57>|": 151852,
382
+ "|<EXTRA_TOKENS_58>|": 151853,
383
+ "|<EXTRA_TOKENS_59>|": 151854,
384
+ "|<EXTRA_TOKENS_5>|": 151800,
385
+ "|<EXTRA_TOKENS_60>|": 151855,
386
+ "|<EXTRA_TOKENS_61>|": 151856,
387
+ "|<EXTRA_TOKENS_62>|": 151857,
388
+ "|<EXTRA_TOKENS_63>|": 151858,
389
+ "|<EXTRA_TOKENS_64>|": 151859,
390
+ "|<EXTRA_TOKENS_65>|": 151860,
391
+ "|<EXTRA_TOKENS_66>|": 151861,
392
+ "|<EXTRA_TOKENS_67>|": 151862,
393
+ "|<EXTRA_TOKENS_68>|": 151863,
394
+ "|<EXTRA_TOKENS_69>|": 151864,
395
+ "|<EXTRA_TOKENS_6>|": 151801,
396
+ "|<EXTRA_TOKENS_70>|": 151865,
397
+ "|<EXTRA_TOKENS_71>|": 151866,
398
+ "|<EXTRA_TOKENS_72>|": 151867,
399
+ "|<EXTRA_TOKENS_73>|": 151868,
400
+ "|<EXTRA_TOKENS_74>|": 151869,
401
+ "|<EXTRA_TOKENS_75>|": 151870,
402
+ "|<EXTRA_TOKENS_76>|": 151871,
403
+ "|<EXTRA_TOKENS_77>|": 151872,
404
+ "|<EXTRA_TOKENS_78>|": 151873,
405
+ "|<EXTRA_TOKENS_79>|": 151874,
406
+ "|<EXTRA_TOKENS_7>|": 151802,
407
+ "|<EXTRA_TOKENS_80>|": 151875,
408
+ "|<EXTRA_TOKENS_81>|": 151876,
409
+ "|<EXTRA_TOKENS_82>|": 151877,
410
+ "|<EXTRA_TOKENS_83>|": 151878,
411
+ "|<EXTRA_TOKENS_84>|": 151879,
412
+ "|<EXTRA_TOKENS_85>|": 151880,
413
+ "|<EXTRA_TOKENS_86>|": 151881,
414
+ "|<EXTRA_TOKENS_87>|": 151882,
415
+ "|<EXTRA_TOKENS_88>|": 151883,
416
+ "|<EXTRA_TOKENS_89>|": 151884,
417
+ "|<EXTRA_TOKENS_8>|": 151803,
418
+ "|<EXTRA_TOKENS_90>|": 151885,
419
+ "|<EXTRA_TOKENS_91>|": 151886,
420
+ "|<EXTRA_TOKENS_92>|": 151887,
421
+ "|<EXTRA_TOKENS_93>|": 151888,
422
+ "|<EXTRA_TOKENS_94>|": 151889,
423
+ "|<EXTRA_TOKENS_95>|": 151890,
424
+ "|<EXTRA_TOKENS_96>|": 151891,
425
+ "|<EXTRA_TOKENS_97>|": 151892,
426
+ "|<EXTRA_TOKENS_98>|": 151893,
427
+ "|<EXTRA_TOKENS_99>|": 151894,
428
+ "|<EXTRA_TOKENS_9>|": 151804
429
+ }
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% for message in messages %}{%- if (loop.index % 2 == 1 and message['role'].lower() != 'user') or (loop.index % 2 == 0 and message['role'].lower() != 'assistant') -%}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{%- endif -%}{{ message['role'].capitalize() + ': ' }}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'text' %}{{ content['text'] }}{%- if not loop.last -%}{{ ' ' }}{%- endif -%}{% endif %}{% endfor %}{% endif %}{%- if not loop.last -%}{{ ' ' }}{%- endif -%}{% endfor %}{% if add_generation_prompt %}{{ ' Assistant:' }}{% endif %}
config.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adapter_config": {
3
+ "attention_dropout": 0.0,
4
+ "float32_attention": true,
5
+ "head_dim": 72,
6
+ "hidden_act": "silu",
7
+ "hidden_size": 1152,
8
+ "image_feature_dropout": 0.0,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 18944,
11
+ "model_type": "",
12
+ "num_attention_heads": 16,
13
+ "num_key_value_heads": 16,
14
+ "residual_dropout": 0.0,
15
+ "text_hidden_size": 3584,
16
+ "vit_layers": [
17
+ -3,
18
+ -9
19
+ ]
20
+ },
21
+ "architectures": [
22
+ "Molmo2ForConditionalGeneration"
23
+ ],
24
+ "auto_map": {
25
+ "AutoConfig": "configuration_molmo2.Molmo2Config",
26
+ "AutoModelForImageTextToText": "modeling_molmo2.Molmo2ForConditionalGeneration"
27
+ },
28
+ "image_patch_id": 152066,
29
+ "initializer_range": 0.02,
30
+ "llm_config": {
31
+ "additional_vocab_size": 128,
32
+ "attention_dropout": 0.0,
33
+ "embedding_dropout": 0.0,
34
+ "head_dim": 128,
35
+ "hidden_act": "silu",
36
+ "hidden_size": 3584,
37
+ "initializer_range": 0.02,
38
+ "intermediate_size": 18944,
39
+ "layer_norm_eps": 1e-06,
40
+ "max_position_embeddings": 4096,
41
+ "model_type": "molmo2_llm",
42
+ "norm_after": false,
43
+ "num_attention_heads": 28,
44
+ "num_hidden_layers": 28,
45
+ "num_key_value_heads": 4,
46
+ "qk_norm_type": "olmo",
47
+ "qkv_bias": true,
48
+ "residual_dropout": 0.0,
49
+ "rope_scaling": null,
50
+ "rope_theta": 1000000.0,
51
+ "use_cache": true,
52
+ "use_qk_norm": false,
53
+ "vocab_size": 152064
54
+ },
55
+ "model_type": "molmo2",
56
+ "tie_word_embeddings": false,
57
+ "torch_dtype": "bfloat16",
58
+ "transformers_version": "4.52.3",
59
+ "use_cache": true,
60
+ "vit_config": {
61
+ "attention_dropout": 0.0,
62
+ "float32_attention": true,
63
+ "head_dim": 72,
64
+ "hidden_act": "gelu_pytorch_tanh",
65
+ "hidden_size": 1152,
66
+ "image_default_input_size": [
67
+ 378,
68
+ 378
69
+ ],
70
+ "image_num_pos": 729,
71
+ "image_patch_size": 14,
72
+ "initializer_range": 0.02,
73
+ "intermediate_size": 4304,
74
+ "layer_norm_eps": 1e-06,
75
+ "model_type": "molmo2_vit",
76
+ "num_attention_heads": 16,
77
+ "num_hidden_layers": 27,
78
+ "num_key_value_heads": 16,
79
+ "residual_dropout": 0.0
80
+ }
81
+ }
configuration_molmo2.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Molmo2 configuration
3
+ """
4
+
5
+ from typing import Tuple, Optional, Dict, Any
6
+
7
+ from transformers import PretrainedConfig
8
+ from transformers.modeling_rope_utils import rope_config_validation
9
+ from transformers.utils import logging
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ class Molmo2VitConfig(PretrainedConfig):
15
+ r"""
16
+ This is the configuration class to store the configuration of a [`Molmo2VisionTransformer`].
17
+ It is used to instantiate a `Molmo2VisionTransformer` according to the specified arguments,
18
+ defining the model architecture.
19
+
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+
23
+ Example:
24
+ ```python
25
+ >>> from transformers import Molmo2VitConfig, Molmo2VisionTransformer
26
+
27
+ >>> # Initializing a Molmo2VitConfig
28
+ >>> configuration = Molmo2VitConfig()
29
+
30
+ >>> # Initializing a Molmo2VisionTransformer (with random weights)
31
+ >>> model = Molmo2VisionTransformer(configuration)
32
+
33
+ >>> # Accessing the model configuration
34
+ >>> configuration = model.config
35
+ ```"""
36
+
37
+ model_type = "molmo2_vit"
38
+
39
+ def __init__(
40
+ self,
41
+ hidden_size: int = 1152,
42
+ intermediate_size: int = 4304,
43
+ num_hidden_layers: int = 27,
44
+ num_attention_heads: int = 16,
45
+ num_key_value_heads: int = 16,
46
+ head_dim: int = 72,
47
+ hidden_act: str = "gelu_pytorch_tanh",
48
+ layer_norm_eps: float = 1e-6,
49
+ image_default_input_size: Tuple[int, int] = (378, 378),
50
+ image_patch_size: int = 14,
51
+ image_num_pos: int = 577,
52
+ attention_dropout: float = 0.0,
53
+ residual_dropout: float = 0.0,
54
+ initializer_range: float = 0.02,
55
+ float32_attention: bool = True,
56
+ **kwargs,
57
+ ):
58
+ super().__init__(**kwargs)
59
+ self.hidden_size = hidden_size
60
+ self.intermediate_size = intermediate_size
61
+ self.num_hidden_layers = num_hidden_layers
62
+ self.num_attention_heads = num_attention_heads
63
+ self.num_key_value_heads = num_key_value_heads
64
+ self.head_dim = head_dim
65
+ self.hidden_act = hidden_act
66
+ self.layer_norm_eps = layer_norm_eps
67
+ self.image_default_input_size = image_default_input_size
68
+ self.image_patch_size = image_patch_size
69
+ self.image_num_pos = image_num_pos
70
+ self.attention_dropout = attention_dropout
71
+ self.residual_dropout = residual_dropout
72
+ self.initializer_range = initializer_range
73
+ self.float32_attention = float32_attention
74
+
75
+ @property
76
+ def image_num_patch(self):
77
+ h, w = self.image_default_input_size
78
+ return h // self.image_patch_size, w // self.image_patch_size
79
+
80
+
81
+ class Molmo2AdapterConfig(PretrainedConfig):
82
+ r"""
83
+ This is the configuration class to store the configuration of Molmo2Adapter. With Molmo2VitConfig,
84
+ It is used to instantiate an Molmo2VisionBackbone according to the specified arguments,
85
+ defining the model architecture.
86
+
87
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
88
+ documentation from [`PretrainedConfig`] for more information.
89
+
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import Molmo2VitConfig, Molmo2AdapterConfig, Molmo2VisionBackbone
94
+
95
+ >>> # Initializing a Molmo2VitConfig and a Molmo2AdapterConfig
96
+ >>> vit_config = Molmo2VitConfig()
97
+ >>> adapter_config = MolmoPoolingConfig()
98
+
99
+ >>> # Initializing a Molmo2VisionBackbone (with random weights)
100
+ >>> model = Molmo2VisionBackbone(vit_config, adapter_config)
101
+
102
+ >>> # Accessing the model configuration
103
+ >>> vit_configuration = model.vit_config
104
+ >>> adapter_configuration = model.adapter_config
105
+ ```"""
106
+
107
+ def __init__(
108
+ self,
109
+ vit_layers: Tuple = (-3, -9),
110
+ hidden_size: int = 1152,
111
+ num_attention_heads: int = 16,
112
+ num_key_value_heads: int = 16,
113
+ head_dim: int = 72,
114
+ float32_attention: bool = True,
115
+ attention_dropout: float = 0.0,
116
+ residual_dropout: float = 0.0,
117
+ hidden_act: str = "silu",
118
+ intermediate_size: int = 18944,
119
+ text_hidden_size: int = 3584,
120
+ image_feature_dropout: float = 0.0,
121
+ initializer_range: float = 0.02,
122
+ **kwargs,
123
+ ):
124
+ super().__init__(**kwargs)
125
+ self.vit_layers = vit_layers
126
+ self.hidden_size = hidden_size
127
+ self.num_attention_heads = num_attention_heads
128
+ self.num_key_value_heads = num_key_value_heads
129
+ self.head_dim = head_dim
130
+ self.float32_attention = float32_attention
131
+ self.attention_dropout = attention_dropout
132
+ self.residual_dropout = residual_dropout
133
+ self.hidden_act = hidden_act
134
+ self.intermediate_size = intermediate_size
135
+ self.text_hidden_size = text_hidden_size
136
+ self.image_feature_dropout = image_feature_dropout
137
+ self.initializer_range = initializer_range
138
+
139
+
140
+ class Molmo2LlmConfig(PretrainedConfig):
141
+ r"""
142
+ This is the configuration class to store the configuration of a [`Molmo2Llm`]. It is used to instantiate a
143
+ `Molmo2Llm` according to the specified arguments, defining the model architecture.
144
+
145
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
146
+ documentation from [`PretrainedConfig`] for more information.
147
+
148
+ Example:
149
+ ```python
150
+ >>> from transformers import Molmo2LlmConfig, Molmo2Llm
151
+
152
+ >>> # Initializing a Molmo2LlmConfig
153
+ >>> configuration = Molmo2LlmConfig()
154
+
155
+ >>> # Initializing a Molmo2Llm (with random weights)
156
+ >>> model = Molmo2Llm(configuration)
157
+
158
+ >>> # Accessing the model configuration
159
+ >>> configuration = model.config
160
+ ```"""
161
+
162
+ model_type = "molmo2_llm"
163
+ keys_to_ignore_at_inference = ["past_key_values"]
164
+ base_model_tp_plan = {
165
+ "blocks.*.self_attn.att_proj": "colwise",
166
+ "blocks.*.self_attn.attn_out": "rowwise",
167
+ "blocks.*.mlp.ff_proj": "colwise",
168
+ "blocks.*.mlp.ff_out": "rowwise",
169
+ }
170
+ base_model_pp_plan = {
171
+ "wte": (["input_ids"], ["inputs_embeds"]),
172
+ "blocks": (["hidden_states", "attention_mask"], ["hidden_states"]),
173
+ "ln_f": (["hidden_states"], ["hidden_states"]),
174
+ }
175
+
176
+ def __init__(
177
+ self,
178
+ hidden_size: int = 3584,
179
+ num_attention_heads: int = 28,
180
+ num_key_value_heads: Optional[int] = 4,
181
+ head_dim: int = 128,
182
+ vocab_size: int = 152064,
183
+ additional_vocab_size: int = 128,
184
+ qkv_bias: bool = True,
185
+ num_hidden_layers: int = 48,
186
+ intermediate_size: int = 18944,
187
+ hidden_act: str = "silu",
188
+ embedding_dropout: float=0.0,
189
+ attention_dropout: float=0.0,
190
+ residual_dropout: float = 0.0,
191
+ max_position_embeddings: int = 4096,
192
+ rope_theta: float = 1000000.0,
193
+ rope_scaling: Dict[str, Any] = None,
194
+ use_qk_norm: bool = False,
195
+ qk_norm_type: str = "olmo",
196
+ layer_norm_eps: int = 1e-6,
197
+ norm_after: bool = False,
198
+ initializer_range: float = 0.02,
199
+ use_cache=True,
200
+ tie_word_embeddings=False,
201
+ **kwargs,
202
+ ):
203
+ super().__init__(
204
+ tie_word_embeddings=tie_word_embeddings,
205
+ **kwargs
206
+ )
207
+ self.hidden_size = hidden_size
208
+ self.num_attention_heads = num_attention_heads
209
+ if num_key_value_heads is None:
210
+ num_key_value_heads = num_attention_heads
211
+ self.num_key_value_heads = num_key_value_heads
212
+ self.head_dim = head_dim
213
+ self.vocab_size = vocab_size
214
+ self.additional_vocab_size = additional_vocab_size
215
+ self.qkv_bias = qkv_bias
216
+ self.num_hidden_layers = num_hidden_layers
217
+ self.intermediate_size = intermediate_size
218
+ self.hidden_act = hidden_act
219
+ self.embedding_dropout = embedding_dropout
220
+ self.attention_dropout = attention_dropout
221
+ self.residual_dropout = residual_dropout
222
+ self.max_position_embeddings = max_position_embeddings
223
+ self.rope_theta = rope_theta
224
+ self.rope_scaling = rope_scaling
225
+ self.use_qk_norm = use_qk_norm
226
+ self.qk_norm_type = qk_norm_type
227
+ self.layer_norm_eps = layer_norm_eps
228
+ self.norm_after = norm_after
229
+ self.initializer_range = initializer_range
230
+ self.use_cache = use_cache
231
+
232
+ # Validate the correctness of rotary position embeddings parameters
233
+ rope_config_validation(self)
234
+
235
+
236
+ class Molmo2Config(PretrainedConfig):
237
+ r"""
238
+ This is the configuration class to store the configuration of a [`Molmo2ForConditionalGeneration`].
239
+ It is used to instantiate an Molmo2 model according to the specified arguments, defining the model architecture.
240
+
241
+ Example:
242
+
243
+ ```python
244
+ >>> from transformers import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2LlmConfig
245
+
246
+ >>> # Initializing a Molmo2VitConfig
247
+ >>> vit_config = Molmo2VitConfig()
248
+
249
+ >>> # Initializing a Molmo2AdapterConfig
250
+ >>> adapter_config = Molmo2AdapterConfig()
251
+
252
+ >>> # Initializing a Molmo2LlmConfig
253
+ >>> llm_config = Molmo2LlmConfig()
254
+
255
+ >>> # Initializing a Molmo2Config
256
+ >>> configuration = Molmo2Config(vit_config, adapter_config, llm_config, image_patch_id=152069)
257
+
258
+ >>> # Initializing a model
259
+ >>> model = Molmo2ForConditionalGeneration(configuration)
260
+
261
+ >>> # Accessing the model configuration
262
+ >>> configuration = model.config
263
+ ```"""
264
+
265
+ model_type = "molmo2"
266
+ sub_configs = {
267
+ "llm_config": Molmo2LlmConfig,
268
+ "vit_config": Molmo2VitConfig,
269
+ "adapter_config": Molmo2AdapterConfig,
270
+ }
271
+
272
+ def __init__(
273
+ self,
274
+ vit_config: Molmo2VitConfig = None,
275
+ adapter_config: Molmo2AdapterConfig = None,
276
+ llm_config: Molmo2LlmConfig = None,
277
+ image_patch_id: int = None,
278
+ initializer_range: float = 0.02,
279
+ **kwargs,
280
+ ):
281
+ super().__init__(**kwargs)
282
+ if vit_config is None:
283
+ self.vit_config = Molmo2VitConfig()
284
+ elif isinstance(vit_config, dict):
285
+ self.vit_config = Molmo2VitConfig(**vit_config)
286
+ else:
287
+ self.vit_config = vit_config
288
+ if adapter_config is None:
289
+ self.adapter_config = Molmo2AdapterConfig()
290
+ elif isinstance(adapter_config, dict):
291
+ self.adapter_config = Molmo2AdapterConfig(**adapter_config)
292
+ else:
293
+ self.adapter_config = adapter_config
294
+ if llm_config is None:
295
+ self.llm_config = Molmo2LlmConfig()
296
+ elif isinstance(llm_config, dict):
297
+ self.llm_config = Molmo2LlmConfig(**llm_config)
298
+ else:
299
+ self.llm_config = llm_config
300
+ self.image_patch_id = image_patch_id
301
+ self.initializer_range = initializer_range
302
+
303
+ @property
304
+ def image_num_patch(self):
305
+ assert self.vit_config is not None
306
+ return self.vit_config.image_num_patch
307
+
308
+ @property
309
+ def num_attention_heads(self):
310
+ return self.llm_config.num_attention_heads
311
+
312
+ @property
313
+ def num_key_value_heads(self):
314
+ return self.llm_config.num_key_value_heads
315
+
316
+ @property
317
+ def head_dim(self):
318
+ return self.llm_config.head_dim
319
+
320
+ @property
321
+ def num_hidden_layers(self):
322
+ return self.llm_config.num_hidden_layers
323
+
324
+ @property
325
+ def hidden_size(self):
326
+ return self.llm_config.hidden_size
327
+
328
+ @property
329
+ def vocab_size(self):
330
+ return self.llm_config.vocab_size
331
+
332
+ @property
333
+ def max_position_embeddings(self):
334
+ return self.llm_config.max_position_embeddings
335
+
336
+
337
+ Molmo2VitConfig.register_for_auto_class()
338
+ Molmo2AdapterConfig.register_for_auto_class()
339
+ Molmo2LlmConfig.register_for_auto_class()
340
+ Molmo2Config.register_for_auto_class()
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "pad_token_id": 151643,
5
+ "transformers_version": "4.52.3"
6
+ }
image_processing_molmo2.py ADDED
@@ -0,0 +1,932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image processor class for Molmo2"""
2
+ from typing import TYPE_CHECKING, Tuple, List, Optional, Union, Dict, Any
3
+ import numpy as np
4
+ import einops
5
+ import torch
6
+ import torchvision.transforms
7
+ from torchvision.transforms import InterpolationMode
8
+ from torchvision.transforms.functional import convert_image_dtype
9
+
10
+ from transformers.image_utils import (
11
+ OPENAI_CLIP_MEAN,
12
+ OPENAI_CLIP_STD,
13
+ ChannelDimension,
14
+ ImageInput,
15
+ is_valid_image,
16
+ valid_images,
17
+ to_numpy_array,
18
+ )
19
+ from transformers.image_transforms import convert_to_rgb, to_channel_dimension_format
20
+ from transformers.processing_utils import ImagesKwargs
21
+ from transformers.image_processing_utils import BaseImageProcessor
22
+ from transformers.utils import logging
23
+ from transformers.feature_extraction_utils import BatchFeature
24
+ from transformers.utils import TensorType, logging
25
+
26
+
27
+ if TYPE_CHECKING:
28
+ from transformers.utils import TensorType, logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ def is_multi_image(image: Union[ImageInput, List[ImageInput]]) -> bool:
35
+ return isinstance(image, (list, tuple))
36
+
37
+
38
+ def make_batched_images(images) -> List[ImageInput]:
39
+ """
40
+ Accepts images in list or nested list format.
41
+
42
+ Args:
43
+ images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
44
+ The input image.
45
+
46
+ Returns:
47
+ list: A list of images or a list of lists of images.
48
+ """
49
+ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
50
+ return images
51
+
52
+ elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
53
+ return images
54
+
55
+ elif is_valid_image(images):
56
+ return [images]
57
+
58
+ raise ValueError(f"Could not make batched images from {images}")
59
+
60
+
61
+ def normalize_image(image: np.ndarray, normalize_mode: str) -> np.ndarray:
62
+ if normalize_mode == "openai":
63
+ image -= np.array(OPENAI_CLIP_MEAN, dtype=np.float32)[None, None, :]
64
+ image /= np.array(OPENAI_CLIP_STD, dtype=np.float32)[None, None, :]
65
+ elif normalize_mode == "siglip":
66
+ image = np.asarray(-1.0, dtype=np.float32) + image * np.asarray(2.0, dtype=np.float32)
67
+ elif normalize_mode == "dino":
68
+ image -= np.array([0.485, 0.456, 0.406], dtype=np.float32)[None, None, :]
69
+ image /= np.array([0.229, 0.224, 0.225], dtype=np.float32)[None, None, :]
70
+ else:
71
+ raise NotImplementedError(normalize_mode)
72
+ return image
73
+
74
+
75
+ def resize_and_pad(
76
+ image,
77
+ desired_output_size,
78
+ resize_method="torch-bilinear",
79
+ pad_value=0,
80
+ ):
81
+ """Resize an image while padding to preserve uts aspect ratio."""
82
+ desired_height, desired_width = desired_output_size
83
+ height, width = image.shape[:2]
84
+
85
+ # Cast into float32 since the training code did this in float32 and it (very rarely) effects
86
+ # the results after rounding.
87
+ image_scale_y = np.array(desired_height, np.float32) / np.array(height, np.float32)
88
+ image_scale_x = np.array(desired_width, np.float32) / np.array(width, np.float32)
89
+ image_scale = min(image_scale_x, image_scale_y)
90
+ scaled_height = int(np.array(height, np.float32) * image_scale)
91
+ scaled_width = int(np.array(width, np.float32) * image_scale)
92
+
93
+ if resize_method in ["torch-bilinear"]:
94
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
95
+ image = convert_image_dtype(image) # resize in float32 to match the training code
96
+ mode = InterpolationMode.BILINEAR
97
+ image = torchvision.transforms.Resize([scaled_height, scaled_width], mode, antialias=True)(image)
98
+ image = torch.clip(image, 0.0, 1.0)
99
+ image = torch.permute(image, [1, 2, 0]).numpy()
100
+ else:
101
+ raise NotImplementedError(resize_method)
102
+
103
+ top_pad = (desired_height - scaled_height) // 2
104
+ left_pad = (desired_width - scaled_width) // 2
105
+ padding = [
106
+ [top_pad, desired_height - scaled_height - top_pad],
107
+ [left_pad, desired_width - scaled_width - left_pad],
108
+ [0, 0]
109
+ ]
110
+ image_mask = np.pad(np.ones_like(image[:, :, 0], dtype=bool), padding[:2])
111
+ image = np.pad(image, padding, constant_values=pad_value)
112
+ return image, image_mask
113
+
114
+
115
+ def metaclip_resize(image, desired_output_size):
116
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
117
+ if torch.is_floating_point(image):
118
+ image = torchvision.transforms.Resize(
119
+ desired_output_size, InterpolationMode.BICUBIC, antialias=True)(image)
120
+ image = torch.clip(image, 0.0, 1.0)
121
+ else:
122
+ assert image.dtype == torch.uint8, "Expected float images or uint8 images, but got {}".format(image.dtype)
123
+ image = torchvision.transforms.Resize(
124
+ desired_output_size, InterpolationMode.BICUBIC, antialias=True)(image)
125
+ image = image.to(torch.float32)
126
+ image = torch.clip(image, 0, 255)
127
+ image = image / 255.0
128
+ resized = torch.permute(image, [1, 2, 0]).numpy()
129
+ image_mask = np.ones_like(resized[:, :, 0], dtype=np.bool_)
130
+ return resized, image_mask
131
+
132
+
133
+ def siglip_resize_and_pad(
134
+ image: np.ndarray,
135
+ desired_output_size: Tuple[int, int],
136
+ ) -> Tuple[np.ndarray, np.ndarray]:
137
+ if len(image.shape) == 3:
138
+ is_video = False
139
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
140
+ else:
141
+ is_video = True
142
+ image = torch.permute(torch.from_numpy(image), [0, 3, 1, 2])
143
+ dtype = image.dtype
144
+ if torch.is_floating_point(image):
145
+ in_min = 0.0
146
+ in_max = 1.0
147
+ resized = torchvision.transforms.Resize(
148
+ desired_output_size,
149
+ InterpolationMode.BILINEAR,
150
+ antialias=False,
151
+ )(image)
152
+ resized = torch.clip(resized, 0.0, 1.0).to(dtype)
153
+ else:
154
+ assert image.dtype == torch.uint8, "SigLIP expects float images or uint8 images, but got {}".format(image.dtype)
155
+ in_min = 0.0
156
+ in_max = 255.0
157
+ resized = torchvision.transforms.Resize(
158
+ desired_output_size,
159
+ InterpolationMode.BILINEAR,
160
+ antialias=False,
161
+ )(image)
162
+ resized = torch.clip(resized, 0, 255).to(dtype)
163
+
164
+ resized = resized.to(torch.float32)
165
+ resized = (resized - in_min) / (in_max - in_min)
166
+
167
+ if is_video:
168
+ resized = torch.permute(resized, [0, 2, 3, 1]).numpy()
169
+ image_mask = None
170
+ else:
171
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
172
+ image_mask = np.ones_like(resized[:, :, 0], dtype=np.bool_)
173
+
174
+ return resized, image_mask
175
+
176
+
177
+ def dino_resize_and_pad(
178
+ image: np.ndarray,
179
+ desired_output_size: Tuple[int, int],
180
+ ) -> Tuple[np.ndarray, np.ndarray]:
181
+ image = torch.permute(torch.from_numpy(image), [2, 0, 1])
182
+ dtype = image.dtype
183
+ if torch.is_floating_point(image):
184
+ resized = torchvision.transforms.Resize(
185
+ desired_output_size,
186
+ InterpolationMode.BICUBIC,
187
+ antialias=True,
188
+ )(image)
189
+ resized = torch.clip(resized, 0.0, 1.0).to(torch.float32)
190
+ else:
191
+ assert image.dtype == torch.uint8, "DINOv2 expects float images or uint8 images, but got {}".format(image.dtype)
192
+ resized = torchvision.transforms.Resize(
193
+ desired_output_size,
194
+ InterpolationMode.BICUBIC,
195
+ antialias=True,
196
+ )(image)
197
+ resized = torch.clip(resized, 0, 255).to(torch.float32)
198
+ resized = resized / 255.0
199
+
200
+ resized = torch.permute(resized, [1, 2, 0]).numpy()
201
+ image_mask = np.ones_like(resized[:, :, 0], dtype=np.bool_)
202
+
203
+ return resized, image_mask
204
+
205
+
206
+ def resize_image(
207
+ image: np.ndarray,
208
+ resize_mode: str,
209
+ output_size: Tuple[int, int],
210
+ pad_value: float,
211
+ ) -> Tuple[np.ndarray, np.ndarray]:
212
+ if resize_mode == "siglip":
213
+ return siglip_resize_and_pad(image, output_size)
214
+ elif resize_mode == "dino":
215
+ return dino_resize_and_pad(image, output_size)
216
+ elif resize_mode == "metaclip":
217
+ return metaclip_resize(image, output_size)
218
+ else:
219
+ resize = "torch-bilinear" if resize_mode == "default" else resize_mode
220
+ return resize_and_pad(
221
+ image, output_size, resize_method=resize, pad_value=pad_value,
222
+ )
223
+
224
+
225
+ def select_tiling(h, w, patch_size, max_num_crops):
226
+ """Divide in image of size [w, h] in up to max_num_patches of size patch_size"""
227
+ original_size = np.stack([h, w]) # [1, 2]
228
+ original_res = h * w
229
+ tilings = []
230
+ for i in range(1, max_num_crops + 1):
231
+ for j in range(1, max_num_crops + 1):
232
+ if i*j <= max_num_crops:
233
+ tilings.append((i, j))
234
+ # sort so argmin and argmax favour smaller tilings in the event of a tie
235
+ tilings.sort(key=lambda x: (x[0]*x[1], x[0]))
236
+ candidate_tilings = np.array(tilings, dtype=np.int32) # [n_resolutions, 2]
237
+ candidate_resolutions = candidate_tilings * patch_size # [n_resolutions, 2]
238
+
239
+ # How much we would need to scale the image to fit exactly in each tiling
240
+ original_size = np.stack([h, w], dtype=np.float32) # [1, 2]
241
+
242
+ # The original size can be zero in rare cases if the image is smaller than the margin
243
+ # In those cases letting the scale become infinite means the tiling is based on the
244
+ # other side, or falls back to the smallest tiling
245
+ with np.errstate(divide='ignore'):
246
+ required_scale_d = candidate_resolutions.astype(np.float32) / original_size,
247
+ required_scale = np.min(required_scale_d, axis=-1, keepdims=True) # [n_resolutions, 1]
248
+ if np.all(required_scale < 1):
249
+ # We are forced to downscale, so try to minimize the amount of downscaling
250
+ ix = np.argmax(required_scale)
251
+ else:
252
+ # Pick the resolution that required the least upscaling so that it most closely fits the image
253
+ required_scale = np.where(required_scale < 1.0, 10e9, required_scale)
254
+ ix = np.argmin(required_scale)
255
+ return candidate_tilings[ix]
256
+
257
+
258
+ def build_resized_image(
259
+ image: np.ndarray,
260
+ resize_mode: str,
261
+ normalized_mode: str,
262
+ base_image_input_size: List[int],
263
+ pad_value: float,
264
+ image_patch_size: int,
265
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
266
+ resized, resized_mask = resize_image(
267
+ image, resize_mode, base_image_input_size, pad_value,
268
+ )
269
+ resized = normalize_image(resized, normalized_mode)
270
+ if len(resized.shape) == 3:
271
+ resized = np.expand_dims(resized, 0)
272
+ resized_mask = np.expand_dims(resized_mask, 0)
273
+ crop_patch_w = base_image_input_size[1] // image_patch_size
274
+ crop_patch_h = base_image_input_size[0] // image_patch_size
275
+ resize_idx = np.arange(crop_patch_w*crop_patch_h).reshape([crop_patch_h, crop_patch_w])
276
+ return resized, resized_mask, resize_idx
277
+
278
+
279
+ def build_overlapping_crops(
280
+ image: np.ndarray,
281
+ resize_mode: str,
282
+ normalize_mode: str,
283
+ max_crops: int,
284
+ overlap_margins: List[int],
285
+ base_image_input_size: List[int],
286
+ pad_value: float,
287
+ image_patch_size: int,
288
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
289
+ """Decompose an image into a set of overlapping crops
290
+
291
+ :return crop_arr: [n_crops, h, w, 3] The crops
292
+ :return mask_arr: [n_crops, h, w] The padding masks
293
+ :return patch_idx: [overlap_patch_h, overlap_patch_w] For each patch in the resized image
294
+ the crops were extracted from, what patch in `crop_arr` it corresponds to
295
+ """
296
+ original_image_h, original_image_w = image.shape[:2]
297
+ crop_size = base_image_input_size[0]
298
+ assert base_image_input_size[0] == base_image_input_size[1]
299
+
300
+ left_margin, right_margin = overlap_margins
301
+ total_margin_pixels = image_patch_size * (right_margin + left_margin) # pixels removed per dim
302
+ crop_patches = base_image_input_size[0] // image_patch_size # patches per crop dim
303
+ crop_window_patches = crop_patches - (right_margin + left_margin) # usable patches
304
+ crop_window_size = crop_window_patches * image_patch_size
305
+ crop_patch_w = base_image_input_size[1] // image_patch_size
306
+ crop_patch_h = base_image_input_size[0] // image_patch_size
307
+ original_image_h, original_image_w = image.shape[:2]
308
+ crop_size = base_image_input_size[0]
309
+
310
+ # Decide how to tile the image, to account for the overlap margins we compute the tiling
311
+ # as if we had an image without the margins and were using a crop size without the margins
312
+ tiling = select_tiling(
313
+ original_image_h - total_margin_pixels,
314
+ original_image_w - total_margin_pixels,
315
+ crop_window_size,
316
+ max_crops,
317
+ )
318
+
319
+ src, img_mask = resize_image(
320
+ image,
321
+ resize_mode,
322
+ [tiling[0]*crop_window_size+total_margin_pixels, tiling[1]*crop_window_size+total_margin_pixels],
323
+ pad_value,
324
+ )
325
+ src = normalize_image(src, normalize_mode)
326
+
327
+ # Now we have to split the image into crops, and track what patches came from
328
+ # where in `patch_idx_arr`
329
+ n_crops = tiling[0] * tiling[1]
330
+ crop_arr = np.zeros([n_crops, crop_size, crop_size, 3], dtype=src.dtype)
331
+ mask_arr = np.zeros([n_crops, crop_size, crop_size], dtype=img_mask.dtype)
332
+ patch_idx_arr = np.zeros([n_crops, crop_patch_h, crop_patch_w], dtype=np.int32)
333
+ on = 0
334
+ on_crop = 0
335
+ for i in range(tiling[0]):
336
+ # Slide over `src` by `crop_window_size` steps, but extract crops of size `crops_size`
337
+ # which results in overlapping crop windows
338
+ y0 = i*crop_window_size
339
+ for j in range(tiling[1]):
340
+ x0 = j*crop_window_size
341
+ crop_arr[on_crop] = src[y0:y0+crop_size, x0:x0+crop_size]
342
+ mask_arr[on_crop] = img_mask[y0:y0+crop_size, x0:x0+crop_size]
343
+ patch_idx = np.arange(crop_patch_w*crop_patch_h).reshape(crop_patch_h, crop_patch_w)
344
+ patch_idx += on_crop * crop_patch_h * crop_patch_w
345
+
346
+ # Mask out idx that are in the overlap region
347
+ if i != 0:
348
+ patch_idx[:left_margin, :] = -1
349
+ if j != 0:
350
+ patch_idx[:, :left_margin] = -1
351
+ if i != tiling[0]-1:
352
+ patch_idx[-right_margin:, :] = -1
353
+ if j != tiling[1]-1:
354
+ patch_idx[:, -right_margin:] = -1
355
+ patch_idx_arr[on_crop] = patch_idx
356
+ on_crop += 1
357
+
358
+ # `patch_idx_arr` is ordered crop-by-crop, here we transpose `patch_idx_arr`
359
+ # so it is ordered left-to-right order
360
+ patch_idx_arr = np.reshape(
361
+ patch_idx_arr,
362
+ [tiling[0], tiling[1], crop_patch_h, crop_patch_w]
363
+ )
364
+ patch_idx_arr = np.transpose(patch_idx_arr, [0, 2, 1, 3])
365
+ patch_idx_arr = np.reshape(patch_idx_arr, [-1])
366
+
367
+ # Now get the parts not in the overlap region, so it should map each patch in `src`
368
+ # to the correct patch it should come from in `crop_arr`
369
+ patch_idx_arr = patch_idx_arr[patch_idx_arr >= 0].reshape(
370
+ src.shape[0]//image_patch_size,
371
+ src.shape[1]//image_patch_size,
372
+ )
373
+ return crop_arr, mask_arr, patch_idx_arr
374
+
375
+
376
+ def batch_pixels_to_patches(array: np.ndarray, patch_size: int) -> np.ndarray:
377
+ """Reshape images of [n_images, h, w, 3] -> [n_images, n_patches, pixels_per_patch]"""
378
+ if len(array.shape) == 3:
379
+ n_crops, h, w = array.shape
380
+ h_patches = h//patch_size
381
+ w_patches = w//patch_size
382
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size])
383
+ array = np.transpose(array, [0, 1, 3, 2, 4])
384
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size])
385
+ return array
386
+ else:
387
+ n_crops, h, w, c = array.shape
388
+ h_patches = h//patch_size
389
+ w_patches = w//patch_size
390
+ array = np.reshape(array, [n_crops, h_patches, patch_size, w_patches, patch_size, c])
391
+ array = np.transpose(array, [0, 1, 3, 2, 4, 5])
392
+ array = np.reshape(array, [n_crops, h_patches*w_patches, patch_size*patch_size*c])
393
+ return array
394
+
395
+
396
+ def arange_for_pooling(
397
+ idx_arr: np.ndarray,
398
+ pool_h: int,
399
+ pool_w: int,
400
+ ) -> np.ndarray:
401
+ h_pad = pool_h * ((idx_arr.shape[0] + pool_h - 1) // pool_h) - idx_arr.shape[0]
402
+ w_pad = pool_w * ((idx_arr.shape[1] + pool_w - 1) // pool_w) - idx_arr.shape[1]
403
+ idx_arr = np.pad(idx_arr, [[h_pad//2, (h_pad+1)//2], [w_pad//2, (w_pad+1)//2]],
404
+ mode='constant',constant_values=-1)
405
+ return einops.rearrange(
406
+ idx_arr, "(h dh) (w dw) -> h w (dh dw)", dh=pool_h, dw=pool_w)
407
+
408
+
409
+ def image_to_patches_and_grids(
410
+ image: ImageInput,
411
+ crop_mode: str,
412
+ resize_mode: str,
413
+ normalize_mode: str,
414
+ max_crops: int,
415
+ overlap_margins: List[int],
416
+ base_image_input_size: List[int],
417
+ pad_value: float,
418
+ image_patch_size: int,
419
+ image_pooling_w: int,
420
+ image_pooling_h: int,
421
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
422
+ """
423
+ :return image_grids, the shape of each (low-res, high-res) image after pooling
424
+ :return crops, the image crops to processes with the ViT
425
+ :return mask, the padding mask for each crop
426
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
427
+ patches in `crops` to pool for that token, masked with -1
428
+ """
429
+ if isinstance(base_image_input_size, int):
430
+ base_image_input_size = (base_image_input_size, base_image_input_size)
431
+
432
+ base_image_input_d = image_patch_size
433
+ pooling_w = image_pooling_w
434
+ pooling_h = image_pooling_h
435
+ crop_patch_w = base_image_input_size[1] // base_image_input_d
436
+ crop_patch_h = base_image_input_size[0] // base_image_input_d
437
+
438
+ if crop_mode == "resize":
439
+ resized, resized_mask, resize_idx = build_resized_image(
440
+ image,
441
+ resize_mode,
442
+ normalize_mode,
443
+ base_image_input_size,
444
+ pad_value,
445
+ image_patch_size
446
+ )
447
+ pooling_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
448
+ h, w = pooling_idx.shape[:2]
449
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
450
+ image_grid = [np.array([h, w])]
451
+ return (
452
+ np.stack(image_grid, 0),
453
+ batch_pixels_to_patches(resized, image_patch_size),
454
+ batch_pixels_to_patches(resized_mask, image_patch_size).mean(-1),
455
+ pooling_idx,
456
+ )
457
+
458
+ if crop_mode in ["overlap-and-resize-c2", "overlap-and-resize"]:
459
+ crop_arr, mask_arr, patch_idx_arr = build_overlapping_crops(
460
+ image,
461
+ resize_mode,
462
+ normalize_mode,
463
+ max_crops,
464
+ overlap_margins,
465
+ base_image_input_size,
466
+ pad_value,
467
+ image_patch_size,
468
+ )
469
+ pooling_idx = arange_for_pooling(patch_idx_arr, pooling_h, pooling_w)
470
+ h, w = pooling_idx.shape[:2]
471
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
472
+ image_grid = [np.array([h, w])]
473
+
474
+ if crop_mode == "overlap-and-resize":
475
+ crop_arr = batch_pixels_to_patches(crop_arr, image_patch_size)
476
+ mask_arr = batch_pixels_to_patches(mask_arr, image_patch_size).astype(np.float32).mean(axis=-1)
477
+ return np.stack(image_grid, 0), crop_arr, mask_arr, pooling_idx
478
+
479
+ # Finally do the same for the global image
480
+ resized, resized_mask, resize_idx = build_resized_image(
481
+ image,
482
+ resize_mode,
483
+ normalize_mode,
484
+ base_image_input_size,
485
+ pad_value,
486
+ image_patch_size
487
+ )
488
+ crop_arr = np.concatenate([resized, crop_arr], 0)
489
+
490
+ mask_arr = np.concatenate([resized_mask, mask_arr], 0)
491
+
492
+ resize_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
493
+ h, w = resize_idx.shape[:2]
494
+ resize_idx = resize_idx.reshape([-1, pooling_h*pooling_w])
495
+
496
+ # Global image goes first, so the order of patches in previous crops gets increased
497
+ pooling_idx = np.where(
498
+ pooling_idx >= 0,
499
+ pooling_idx + crop_patch_h*crop_patch_w,
500
+ -1
501
+ )
502
+ pooling_idx = np.concatenate([resize_idx, pooling_idx])
503
+ image_grid = [
504
+ np.array([h, w]),
505
+ ] + image_grid
506
+
507
+ mask_arr = batch_pixels_to_patches(mask_arr, image_patch_size).astype(np.float32).mean(axis=-1)
508
+ return (
509
+ np.stack(image_grid, 0),
510
+ batch_pixels_to_patches(crop_arr, image_patch_size),
511
+ mask_arr,
512
+ pooling_idx
513
+ )
514
+ else:
515
+ raise NotImplementedError(crop_mode)
516
+
517
+
518
+ def image_to_patches_and_tokens(
519
+ image: ImageInput,
520
+ crop_mode: str,
521
+ use_col_tokens: bool,
522
+ resize_mode: str,
523
+ normalize_mode: str,
524
+ max_crops: int,
525
+ overlap_margins: List[int],
526
+ base_image_input_size: List[int],
527
+ pad_value: float,
528
+ image_patch_size: int,
529
+ image_pooling_w: int,
530
+ image_pooling_h: int,
531
+ image_patch_token_id: int,
532
+ image_col_token_id: int,
533
+ image_start_token_id: int,
534
+ image_end_token_id: int,
535
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
536
+ """
537
+ :return image_tokens, the token IDS for this image, including special tokens
538
+ :return crops, the image crops to processes with the ViT
539
+ :return mask, the padding mask for each crop
540
+ :return pooled_patch_idx, for each patch_id tokens in `image_tokens`, the indices of the
541
+ patches in `crops` to pool for that token, masked with -1
542
+ """
543
+
544
+ if isinstance(base_image_input_size, int):
545
+ base_image_input_size = (base_image_input_size, base_image_input_size)
546
+
547
+ base_image_input_d = image_patch_size
548
+ pooling_w = image_pooling_w
549
+ pooling_h = image_pooling_h
550
+ patch_id = image_patch_token_id
551
+ col_id = image_col_token_id
552
+ start_id = image_start_token_id
553
+ end_id = image_end_token_id
554
+ crop_patch_w = base_image_input_size[1] // base_image_input_d
555
+ crop_patch_h = base_image_input_size[0] // base_image_input_d
556
+
557
+ if crop_mode == "resize":
558
+ resized, resized_mask, resize_idx = build_resized_image(
559
+ image,
560
+ resize_mode,
561
+ normalize_mode,
562
+ base_image_input_size,
563
+ pad_value,
564
+ image_patch_size
565
+ )
566
+ pooling_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
567
+ h, w = pooling_idx.shape[:2]
568
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
569
+ per_row = np.full(
570
+ (w,),
571
+ patch_id,
572
+ dtype=np.int32
573
+ )
574
+ if use_col_tokens:
575
+ per_row = np.concatenate([per_row, [col_id]], 0)
576
+ extra_tokens = np.tile(per_row, [h])
577
+ joint = [
578
+ [start_id],
579
+ extra_tokens,
580
+ [end_id],
581
+ ]
582
+ return (
583
+ np.concatenate(joint, 0),
584
+ batch_pixels_to_patches(resized, image_patch_size),
585
+ batch_pixels_to_patches(resized_mask, image_patch_size).mean(-1),
586
+ pooling_idx,
587
+ )
588
+
589
+ if crop_mode in ["overlap-and-resize-c2", "overlap-and-resize"]:
590
+ crop_arr, mask_arr, patch_idx_arr = build_overlapping_crops(
591
+ image,
592
+ resize_mode,
593
+ normalize_mode,
594
+ max_crops,
595
+ overlap_margins,
596
+ base_image_input_size,
597
+ pad_value,
598
+ image_patch_size,
599
+ )
600
+ pooling_idx = arange_for_pooling(patch_idx_arr, pooling_h, pooling_w)
601
+ h, w = pooling_idx.shape[:2]
602
+ pooling_idx = pooling_idx.reshape([-1, pooling_h*pooling_w])
603
+
604
+ # Now build the output tokens
605
+ per_row = np.full(w, patch_id, dtype=np.int32)
606
+ if use_col_tokens:
607
+ per_row = np.concatenate([per_row, [col_id]], 0)
608
+ joint = np.tile(per_row, [h])
609
+ joint = [
610
+ [start_id],
611
+ joint,
612
+ [end_id]
613
+ ]
614
+
615
+ if crop_mode == "overlap-and-resize":
616
+ crop_arr = batch_pixels_to_patches(crop_arr, image_patch_size)
617
+ mask_arr = batch_pixels_to_patches(mask_arr, image_patch_size).astype(np.float32).mean(axis=-1)
618
+ return np.concatenate(joint, 0), crop_arr, mask_arr, pooling_idx
619
+
620
+ # Finally do the same for the global image
621
+ resized, resized_mask, resize_idx = build_resized_image(
622
+ image,
623
+ resize_mode,
624
+ normalize_mode,
625
+ base_image_input_size,
626
+ pad_value,
627
+ image_patch_size
628
+ )
629
+ crop_arr = np.concatenate([resized, crop_arr], 0)
630
+
631
+ mask_arr = np.concatenate([resized_mask, mask_arr], 0)
632
+
633
+ resize_idx = arange_for_pooling(resize_idx, pooling_h, pooling_w)
634
+ h, w = resize_idx.shape[:2]
635
+ resize_idx = resize_idx.reshape([-1, pooling_h*pooling_w])
636
+
637
+ # Global image goes first, so the order of patches in previous crops gets increased
638
+ pooling_idx = np.where(
639
+ pooling_idx >= 0,
640
+ pooling_idx + crop_patch_h*crop_patch_w,
641
+ -1
642
+ )
643
+ pooling_idx = np.concatenate([resize_idx, pooling_idx])
644
+
645
+ per_row = np.full(
646
+ (w,),
647
+ patch_id,
648
+ dtype=np.int32
649
+ )
650
+ if use_col_tokens:
651
+ per_row = np.concatenate([per_row, [col_id]], 0)
652
+ extra_tokens = np.tile(per_row, [h])
653
+ joint = [
654
+ [start_id],
655
+ extra_tokens,
656
+ [end_id],
657
+ ] + joint
658
+ mask_arr = batch_pixels_to_patches(mask_arr, image_patch_size).astype(np.float32).mean(axis=-1)
659
+ return (
660
+ np.concatenate(joint, 0),
661
+ batch_pixels_to_patches(crop_arr, image_patch_size),
662
+ mask_arr,
663
+ pooling_idx
664
+ )
665
+ else:
666
+ raise NotImplementedError(crop_mode)
667
+
668
+
669
+ class Molmo2ImagesKwargs(ImagesKwargs, total=False):
670
+ crop_mode: Optional[str]
671
+ resize_mode: Optional[str]
672
+ normalize_mode: Optional[str]
673
+ max_crops: Optional[int]
674
+ max_multi_image_crops: Optional[int]
675
+ overlap_margins: Optional[List[int]]
676
+ base_image_input_size: Optional[List[int]]
677
+ pad_value: Optional[float]
678
+ image_patch_size: Optional[int]
679
+ image_pooling_w: Optional[int]
680
+ image_pooling_h: Optional[int]
681
+
682
+
683
+ class Molmo2ImageProcessor(BaseImageProcessor):
684
+
685
+ model_input_names = ["images", "pooled_patches_idx", "image_masks"]
686
+
687
+ def __init__(
688
+ self,
689
+ crop_mode: str = "overlap-and-resize-c2",
690
+ resize_mode: str = "siglip",
691
+ normalize_mode: str = "siglip",
692
+ max_crops: int = 8,
693
+ max_multi_image_crops: int = 4,
694
+ overlap_margins: List[int] = [4, 4],
695
+ base_image_input_size: List[int] = (378, 378),
696
+ pad_value: float = 0.0,
697
+ image_patch_size: int = 14,
698
+ image_pooling_w: int = 2,
699
+ image_pooling_h: int = 2,
700
+ do_convert_rgb: bool = True,
701
+ do_pad: Optional[bool] = True,
702
+ **kwargs,
703
+ ) -> None:
704
+ super().__init__(**kwargs)
705
+ self.crop_mode = crop_mode
706
+ self.resize_mode = resize_mode
707
+ self.normalize_mode = normalize_mode
708
+ self.overlap_margins = overlap_margins
709
+ self.max_crops = max_crops
710
+ self.max_multi_image_crops = max_multi_image_crops
711
+ self.overlap_margins = overlap_margins
712
+ self.base_image_input_size = base_image_input_size
713
+ self.pad_value = pad_value
714
+ self.image_patch_size = image_patch_size
715
+ self.image_pooling_w = image_pooling_w
716
+ self.image_pooling_h = image_pooling_h
717
+ self.do_convert_rgb = do_convert_rgb
718
+ self.do_pad = do_pad
719
+
720
+ def to_channel_dimension_last(
721
+ self,
722
+ images: List[ImageInput],
723
+ ) -> List[ImageInput]:
724
+ """
725
+ Convert images to channel dimension last.
726
+ """
727
+ new_images = []
728
+ for image in images:
729
+ if is_multi_image(image):
730
+ new_images.append([to_channel_dimension_format(img, ChannelDimension.LAST) for img in image])
731
+ else:
732
+ new_images.append(to_channel_dimension_format(image, ChannelDimension.LAST))
733
+ return new_images
734
+
735
+ def to_numpy_array(
736
+ self,
737
+ images: List[ImageInput],
738
+ ) -> List[np.ndarray]:
739
+ """
740
+ Convert images to numpy array.
741
+ """
742
+ new_images = []
743
+ for image in images:
744
+ if is_multi_image(image):
745
+ new_images.append([to_numpy_array(img) for img in image])
746
+ else:
747
+ new_images.append(to_numpy_array(image))
748
+ return new_images
749
+
750
+ def to_rgb(
751
+ self,
752
+ images: List[ImageInput],
753
+ ) -> List[ImageInput]:
754
+ """
755
+ Convert images to RGB.
756
+ """
757
+ new_images = []
758
+ for image in images:
759
+ if is_multi_image(image):
760
+ new_images.append([convert_to_rgb(img) for img in image])
761
+ else:
762
+ new_images.append(convert_to_rgb(image))
763
+ return new_images
764
+
765
+ def pad_arrays(self, arrays: List[np.ndarray], pad_value: float = -1) -> np.ndarray:
766
+ max_len = max(arr.shape[0] for arr in arrays)
767
+ padded_arr = np.full(
768
+ [len(arrays), max_len] + list(arrays[0].shape[1:]), pad_value, dtype=arrays[0].dtype
769
+ )
770
+ for ix, arr in enumerate(arrays):
771
+ padded_arr[ix, :len(arr)] = arr[:max_len]
772
+ return padded_arr
773
+
774
+ def pad_for_batching(self, data: Dict[str, Any]) -> Dict[str, Any]:
775
+ """
776
+ Pad the data for batching.
777
+ """
778
+ images = self.pad_arrays(data["images"])
779
+ pooled_patches_idx = self.pad_arrays(data["pooled_patches_idx"])
780
+ image_masks = self.pad_arrays(data["image_masks"])
781
+ image_grids = self.pad_arrays(data["image_grids"])
782
+ new_data = dict(
783
+ images=images,
784
+ pooled_patches_idx=pooled_patches_idx,
785
+ image_masks=image_masks,
786
+ image_grids=image_grids,
787
+ )
788
+ return new_data
789
+
790
+ def preprocess(
791
+ self,
792
+ images: Union[ImageInput, List[ImageInput]],
793
+ crop_mode: Optional[str] = None,
794
+ resize_mode: Optional[str] = None,
795
+ normalize_mode: Optional[str] = None,
796
+ max_crops: Optional[int] = None,
797
+ max_multi_image_crops: Optional[int] = None,
798
+ overlap_margins: Optional[List[int]] = None,
799
+ base_image_input_size: Optional[List[int]] = None,
800
+ pad_value: Optional[float] = None,
801
+ image_patch_size: Optional[int] = None,
802
+ image_pooling_w: Optional[int] = None,
803
+ image_pooling_h: Optional[int] = None,
804
+ do_convert_rgb: Optional[bool] = None,
805
+ do_pad: Optional[bool] = None,
806
+ return_tensors: Optional[Union[str, TensorType]] = None,
807
+ **kwargs,
808
+ ) -> BatchFeature:
809
+ """
810
+ Preprocess an image for the model.
811
+ Args:
812
+ image: The image to preprocess.
813
+ crop_mode: The crop mode to use. If None, use the default crop mode.
814
+ resize_mode: The resize mode to use. If None, use the default resize mode.
815
+ normalize_mode: The normalization mode to use. If None, use the default normalization mode.
816
+ max_crops: The maximum number of crops to use. If None, use the default value.
817
+ max_multi_image_crops: The maximum number of crops to use for multi-image inputs.
818
+ overlap_margins: The overlap margins to use. If None, use the default values.
819
+ base_image_input_size: The base image input size to use. If None, use the default size.
820
+ pad_value: The padding value to use. If None, use the default value.
821
+ image_patch_size: The size of the image patches. If None, use the default size.
822
+ image_pooling_h: The height of the image pooling. If None, use the default height.
823
+ image_pooling_w: The width of the image pooling. If None, use the default width.
824
+ do_convert_rgb: Whether to convert the image to RGB. If None, use the default value.
825
+ do_pad: Whether to pad image features. If None, use the default value.
826
+
827
+ Returns:
828
+ A tuple containing:
829
+ - The image grids
830
+ - The preprocessed images
831
+ - The padding masks
832
+ - The pooling indices
833
+ """
834
+ images = make_batched_images(images)
835
+
836
+ if not valid_images(images):
837
+ raise ValueError("Invalid image input")
838
+
839
+ crop_mode = crop_mode or self.crop_mode
840
+ normalize_mode = normalize_mode or self.normalize_mode
841
+ resize_mode = resize_mode or self.resize_mode
842
+ max_crops = max_crops or self.max_crops
843
+ max_multi_image_crops = max_multi_image_crops or self.max_multi_image_crops
844
+ overlap_margins = overlap_margins or self.overlap_margins
845
+ base_image_input_size = base_image_input_size or self.base_image_input_size
846
+ pad_value = pad_value or self.pad_value
847
+ image_patch_size = image_patch_size or self.image_patch_size
848
+ image_pooling_w = image_pooling_w or self.image_pooling_w
849
+ image_pooling_h = image_pooling_h or self.image_pooling_h
850
+ do_convert_rgb = do_convert_rgb or self.do_convert_rgb
851
+ do_pad = do_pad or self.do_pad
852
+
853
+ if do_convert_rgb:
854
+ images = self.to_rgb(images)
855
+
856
+ # All transformations expect numpy arrays.
857
+ images = self.to_numpy_array(images)
858
+
859
+ # All transformations expect channel dimension last.
860
+ images = self.to_channel_dimension_last(images)
861
+
862
+ batch_image_grids = []
863
+ batch_crops = []
864
+ batch_crop_masks = []
865
+ batch_pooled_patches_idx = []
866
+
867
+ for image in images:
868
+ if is_multi_image(image):
869
+ all_image_grids = []
870
+ all_crops = []
871
+ all_crop_masks = []
872
+ pooled_patches_idx = []
873
+ for img in image:
874
+ image_grid, crops, img_mask, pooled_idx = image_to_patches_and_grids(
875
+ img,
876
+ crop_mode,
877
+ resize_mode,
878
+ normalize_mode,
879
+ max_multi_image_crops,
880
+ overlap_margins,
881
+ base_image_input_size,
882
+ pad_value,
883
+ image_patch_size,
884
+ image_pooling_w,
885
+ image_pooling_h,
886
+ )
887
+ pooled_patches_idx.append(pooled_idx + sum(np.prod(x.shape[:2]) for x in all_crops))
888
+ all_crops.append(crops)
889
+ all_crop_masks.append(img_mask)
890
+ all_image_grids.append(image_grid)
891
+ all_image_grids = np.concatenate(all_image_grids, 0)
892
+ all_crops = np.concatenate(all_crops, 0)
893
+ all_crop_masks = np.concatenate(all_crop_masks, 0)
894
+ pooled_patches_idx = np.concatenate(pooled_patches_idx, 0)
895
+
896
+ batch_image_grids.append(all_image_grids)
897
+ batch_crops.append(all_crops)
898
+ batch_crop_masks.append(all_crop_masks)
899
+ batch_pooled_patches_idx.append(pooled_patches_idx)
900
+ else:
901
+ image_grid, crops, img_mask, pooled_idx = image_to_patches_and_grids(
902
+ image,
903
+ crop_mode,
904
+ resize_mode,
905
+ normalize_mode,
906
+ max_crops,
907
+ overlap_margins,
908
+ base_image_input_size,
909
+ pad_value,
910
+ image_patch_size,
911
+ image_pooling_w,
912
+ image_pooling_h,
913
+ )
914
+ batch_image_grids.append(image_grid)
915
+ batch_crops.append(crops)
916
+ batch_crop_masks.append(img_mask)
917
+ batch_pooled_patches_idx.append(pooled_idx)
918
+
919
+ data =dict(
920
+ images=batch_crops,
921
+ pooled_patches_idx=batch_pooled_patches_idx,
922
+ image_masks=batch_crop_masks,
923
+ image_grids=batch_image_grids,
924
+ )
925
+
926
+ if do_pad:
927
+ data = self.pad_for_batching(data)
928
+
929
+ return BatchFeature(data, tensor_type=return_tensors)
930
+
931
+
932
+ Molmo2ImageProcessor.register_for_auto_class()
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd470f844557010e15783657a6a8154c71d72436e63b33a4fd4788f8ab9a0aff
3
+ size 4878581216
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f36867a723e2147f55779a0dd38cffe6b35470328f65c2f54a03a1e5670e1710
3
+ size 4932745864
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d5e7328e6bea486600bbd5d79217166b6892c92417b12555fd7742fe7ec914
3
+ size 4994552920
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b484370143a4297bfa46cd1de7719f4f7876551fc26eb741b3cd6f60e01a867
3
+ size 1433042592
model.safetensors.index.json ADDED
@@ -0,0 +1,621 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16238835616
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.transformer.blocks.0.attn_norm.weight": "model-00001-of-00004.safetensors",
8
+ "model.transformer.blocks.0.ff_norm.weight": "model-00001-of-00004.safetensors",
9
+ "model.transformer.blocks.0.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
10
+ "model.transformer.blocks.0.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.transformer.blocks.0.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
12
+ "model.transformer.blocks.0.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.transformer.blocks.0.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
14
+ "model.transformer.blocks.1.attn_norm.weight": "model-00001-of-00004.safetensors",
15
+ "model.transformer.blocks.1.ff_norm.weight": "model-00001-of-00004.safetensors",
16
+ "model.transformer.blocks.1.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
17
+ "model.transformer.blocks.1.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.transformer.blocks.1.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
19
+ "model.transformer.blocks.1.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.transformer.blocks.1.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
21
+ "model.transformer.blocks.10.attn_norm.weight": "model-00002-of-00004.safetensors",
22
+ "model.transformer.blocks.10.ff_norm.weight": "model-00002-of-00004.safetensors",
23
+ "model.transformer.blocks.10.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
24
+ "model.transformer.blocks.10.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
25
+ "model.transformer.blocks.10.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
26
+ "model.transformer.blocks.10.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
27
+ "model.transformer.blocks.10.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
28
+ "model.transformer.blocks.11.attn_norm.weight": "model-00002-of-00004.safetensors",
29
+ "model.transformer.blocks.11.ff_norm.weight": "model-00002-of-00004.safetensors",
30
+ "model.transformer.blocks.11.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
31
+ "model.transformer.blocks.11.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.transformer.blocks.11.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
33
+ "model.transformer.blocks.11.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.transformer.blocks.11.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
35
+ "model.transformer.blocks.12.attn_norm.weight": "model-00002-of-00004.safetensors",
36
+ "model.transformer.blocks.12.ff_norm.weight": "model-00002-of-00004.safetensors",
37
+ "model.transformer.blocks.12.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
38
+ "model.transformer.blocks.12.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.transformer.blocks.12.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
40
+ "model.transformer.blocks.12.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.transformer.blocks.12.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
42
+ "model.transformer.blocks.13.attn_norm.weight": "model-00002-of-00004.safetensors",
43
+ "model.transformer.blocks.13.ff_norm.weight": "model-00002-of-00004.safetensors",
44
+ "model.transformer.blocks.13.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
45
+ "model.transformer.blocks.13.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.transformer.blocks.13.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
47
+ "model.transformer.blocks.13.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.transformer.blocks.13.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
49
+ "model.transformer.blocks.14.attn_norm.weight": "model-00002-of-00004.safetensors",
50
+ "model.transformer.blocks.14.ff_norm.weight": "model-00002-of-00004.safetensors",
51
+ "model.transformer.blocks.14.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
52
+ "model.transformer.blocks.14.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.transformer.blocks.14.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
54
+ "model.transformer.blocks.14.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.transformer.blocks.14.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
56
+ "model.transformer.blocks.15.attn_norm.weight": "model-00002-of-00004.safetensors",
57
+ "model.transformer.blocks.15.ff_norm.weight": "model-00002-of-00004.safetensors",
58
+ "model.transformer.blocks.15.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
59
+ "model.transformer.blocks.15.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.transformer.blocks.15.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
61
+ "model.transformer.blocks.15.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.transformer.blocks.15.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
63
+ "model.transformer.blocks.16.attn_norm.weight": "model-00002-of-00004.safetensors",
64
+ "model.transformer.blocks.16.ff_norm.weight": "model-00002-of-00004.safetensors",
65
+ "model.transformer.blocks.16.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
66
+ "model.transformer.blocks.16.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.transformer.blocks.16.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
68
+ "model.transformer.blocks.16.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.transformer.blocks.16.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
70
+ "model.transformer.blocks.17.attn_norm.weight": "model-00002-of-00004.safetensors",
71
+ "model.transformer.blocks.17.ff_norm.weight": "model-00002-of-00004.safetensors",
72
+ "model.transformer.blocks.17.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
73
+ "model.transformer.blocks.17.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.transformer.blocks.17.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
75
+ "model.transformer.blocks.17.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.transformer.blocks.17.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
77
+ "model.transformer.blocks.18.attn_norm.weight": "model-00002-of-00004.safetensors",
78
+ "model.transformer.blocks.18.ff_norm.weight": "model-00003-of-00004.safetensors",
79
+ "model.transformer.blocks.18.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
80
+ "model.transformer.blocks.18.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
81
+ "model.transformer.blocks.18.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
82
+ "model.transformer.blocks.18.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.transformer.blocks.18.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
84
+ "model.transformer.blocks.19.attn_norm.weight": "model-00003-of-00004.safetensors",
85
+ "model.transformer.blocks.19.ff_norm.weight": "model-00003-of-00004.safetensors",
86
+ "model.transformer.blocks.19.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
87
+ "model.transformer.blocks.19.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
88
+ "model.transformer.blocks.19.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
89
+ "model.transformer.blocks.19.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
90
+ "model.transformer.blocks.19.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
91
+ "model.transformer.blocks.2.attn_norm.weight": "model-00001-of-00004.safetensors",
92
+ "model.transformer.blocks.2.ff_norm.weight": "model-00001-of-00004.safetensors",
93
+ "model.transformer.blocks.2.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
94
+ "model.transformer.blocks.2.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
95
+ "model.transformer.blocks.2.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
96
+ "model.transformer.blocks.2.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
97
+ "model.transformer.blocks.2.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
98
+ "model.transformer.blocks.20.attn_norm.weight": "model-00003-of-00004.safetensors",
99
+ "model.transformer.blocks.20.ff_norm.weight": "model-00003-of-00004.safetensors",
100
+ "model.transformer.blocks.20.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
101
+ "model.transformer.blocks.20.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
102
+ "model.transformer.blocks.20.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
103
+ "model.transformer.blocks.20.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
104
+ "model.transformer.blocks.20.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
105
+ "model.transformer.blocks.21.attn_norm.weight": "model-00003-of-00004.safetensors",
106
+ "model.transformer.blocks.21.ff_norm.weight": "model-00003-of-00004.safetensors",
107
+ "model.transformer.blocks.21.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
108
+ "model.transformer.blocks.21.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
109
+ "model.transformer.blocks.21.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
110
+ "model.transformer.blocks.21.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
111
+ "model.transformer.blocks.21.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
112
+ "model.transformer.blocks.22.attn_norm.weight": "model-00003-of-00004.safetensors",
113
+ "model.transformer.blocks.22.ff_norm.weight": "model-00003-of-00004.safetensors",
114
+ "model.transformer.blocks.22.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
115
+ "model.transformer.blocks.22.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
116
+ "model.transformer.blocks.22.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
117
+ "model.transformer.blocks.22.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
118
+ "model.transformer.blocks.22.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
119
+ "model.transformer.blocks.23.attn_norm.weight": "model-00003-of-00004.safetensors",
120
+ "model.transformer.blocks.23.ff_norm.weight": "model-00003-of-00004.safetensors",
121
+ "model.transformer.blocks.23.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
122
+ "model.transformer.blocks.23.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
123
+ "model.transformer.blocks.23.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
124
+ "model.transformer.blocks.23.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
125
+ "model.transformer.blocks.23.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
126
+ "model.transformer.blocks.24.attn_norm.weight": "model-00003-of-00004.safetensors",
127
+ "model.transformer.blocks.24.ff_norm.weight": "model-00003-of-00004.safetensors",
128
+ "model.transformer.blocks.24.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
129
+ "model.transformer.blocks.24.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.transformer.blocks.24.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
131
+ "model.transformer.blocks.24.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.transformer.blocks.24.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
133
+ "model.transformer.blocks.25.attn_norm.weight": "model-00003-of-00004.safetensors",
134
+ "model.transformer.blocks.25.ff_norm.weight": "model-00003-of-00004.safetensors",
135
+ "model.transformer.blocks.25.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
136
+ "model.transformer.blocks.25.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.transformer.blocks.25.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
138
+ "model.transformer.blocks.25.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
139
+ "model.transformer.blocks.25.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
140
+ "model.transformer.blocks.26.attn_norm.weight": "model-00003-of-00004.safetensors",
141
+ "model.transformer.blocks.26.ff_norm.weight": "model-00003-of-00004.safetensors",
142
+ "model.transformer.blocks.26.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
143
+ "model.transformer.blocks.26.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.transformer.blocks.26.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
145
+ "model.transformer.blocks.26.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.transformer.blocks.26.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
147
+ "model.transformer.blocks.27.attn_norm.weight": "model-00003-of-00004.safetensors",
148
+ "model.transformer.blocks.27.ff_norm.weight": "model-00003-of-00004.safetensors",
149
+ "model.transformer.blocks.27.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
150
+ "model.transformer.blocks.27.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.transformer.blocks.27.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
152
+ "model.transformer.blocks.27.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
153
+ "model.transformer.blocks.27.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
154
+ "model.transformer.blocks.3.attn_norm.weight": "model-00001-of-00004.safetensors",
155
+ "model.transformer.blocks.3.ff_norm.weight": "model-00001-of-00004.safetensors",
156
+ "model.transformer.blocks.3.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
157
+ "model.transformer.blocks.3.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
158
+ "model.transformer.blocks.3.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
159
+ "model.transformer.blocks.3.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.transformer.blocks.3.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
161
+ "model.transformer.blocks.4.attn_norm.weight": "model-00001-of-00004.safetensors",
162
+ "model.transformer.blocks.4.ff_norm.weight": "model-00001-of-00004.safetensors",
163
+ "model.transformer.blocks.4.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
164
+ "model.transformer.blocks.4.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
165
+ "model.transformer.blocks.4.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
166
+ "model.transformer.blocks.4.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
167
+ "model.transformer.blocks.4.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
168
+ "model.transformer.blocks.5.attn_norm.weight": "model-00001-of-00004.safetensors",
169
+ "model.transformer.blocks.5.ff_norm.weight": "model-00001-of-00004.safetensors",
170
+ "model.transformer.blocks.5.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
171
+ "model.transformer.blocks.5.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
172
+ "model.transformer.blocks.5.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
173
+ "model.transformer.blocks.5.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
174
+ "model.transformer.blocks.5.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
175
+ "model.transformer.blocks.6.attn_norm.weight": "model-00001-of-00004.safetensors",
176
+ "model.transformer.blocks.6.ff_norm.weight": "model-00001-of-00004.safetensors",
177
+ "model.transformer.blocks.6.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
178
+ "model.transformer.blocks.6.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
179
+ "model.transformer.blocks.6.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
180
+ "model.transformer.blocks.6.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
181
+ "model.transformer.blocks.6.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
182
+ "model.transformer.blocks.7.attn_norm.weight": "model-00001-of-00004.safetensors",
183
+ "model.transformer.blocks.7.ff_norm.weight": "model-00001-of-00004.safetensors",
184
+ "model.transformer.blocks.7.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
185
+ "model.transformer.blocks.7.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
186
+ "model.transformer.blocks.7.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
187
+ "model.transformer.blocks.7.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
188
+ "model.transformer.blocks.7.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
189
+ "model.transformer.blocks.8.attn_norm.weight": "model-00001-of-00004.safetensors",
190
+ "model.transformer.blocks.8.ff_norm.weight": "model-00002-of-00004.safetensors",
191
+ "model.transformer.blocks.8.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
192
+ "model.transformer.blocks.8.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
193
+ "model.transformer.blocks.8.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
194
+ "model.transformer.blocks.8.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
195
+ "model.transformer.blocks.8.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
196
+ "model.transformer.blocks.9.attn_norm.weight": "model-00002-of-00004.safetensors",
197
+ "model.transformer.blocks.9.ff_norm.weight": "model-00002-of-00004.safetensors",
198
+ "model.transformer.blocks.9.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
199
+ "model.transformer.blocks.9.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
200
+ "model.transformer.blocks.9.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
201
+ "model.transformer.blocks.9.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
202
+ "model.transformer.blocks.9.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
203
+ "model.transformer.ln_f.weight": "model-00003-of-00004.safetensors",
204
+ "model.transformer.wte.embedding": "model-00001-of-00004.safetensors",
205
+ "model.transformer.wte.new_embedding": "model-00001-of-00004.safetensors",
206
+ "model.vision_backbone.image_pooling_2d.wk.bias": "model-00004-of-00004.safetensors",
207
+ "model.vision_backbone.image_pooling_2d.wk.weight": "model-00004-of-00004.safetensors",
208
+ "model.vision_backbone.image_pooling_2d.wo.bias": "model-00004-of-00004.safetensors",
209
+ "model.vision_backbone.image_pooling_2d.wo.weight": "model-00004-of-00004.safetensors",
210
+ "model.vision_backbone.image_pooling_2d.wq.bias": "model-00004-of-00004.safetensors",
211
+ "model.vision_backbone.image_pooling_2d.wq.weight": "model-00004-of-00004.safetensors",
212
+ "model.vision_backbone.image_pooling_2d.wv.bias": "model-00004-of-00004.safetensors",
213
+ "model.vision_backbone.image_pooling_2d.wv.weight": "model-00004-of-00004.safetensors",
214
+ "model.vision_backbone.image_projector.w1.weight": "model-00004-of-00004.safetensors",
215
+ "model.vision_backbone.image_projector.w2.weight": "model-00004-of-00004.safetensors",
216
+ "model.vision_backbone.image_projector.w3.weight": "model-00004-of-00004.safetensors",
217
+ "model.vision_backbone.image_vit.patch_embedding.bias": "model-00003-of-00004.safetensors",
218
+ "model.vision_backbone.image_vit.patch_embedding.weight": "model-00003-of-00004.safetensors",
219
+ "model.vision_backbone.image_vit.positional_embedding": "model-00003-of-00004.safetensors",
220
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wk.bias": "model-00003-of-00004.safetensors",
221
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wk.weight": "model-00003-of-00004.safetensors",
222
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wo.bias": "model-00003-of-00004.safetensors",
223
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wo.weight": "model-00003-of-00004.safetensors",
224
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wq.bias": "model-00003-of-00004.safetensors",
225
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wq.weight": "model-00003-of-00004.safetensors",
226
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wv.bias": "model-00003-of-00004.safetensors",
227
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention.wv.weight": "model-00003-of-00004.safetensors",
228
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention_norm.bias": "model-00003-of-00004.safetensors",
229
+ "model.vision_backbone.image_vit.transformer.resblocks.0.attention_norm.weight": "model-00003-of-00004.safetensors",
230
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
231
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
232
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
233
+ "model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
234
+ "model.vision_backbone.image_vit.transformer.resblocks.0.ffn_norm.bias": "model-00003-of-00004.safetensors",
235
+ "model.vision_backbone.image_vit.transformer.resblocks.0.ffn_norm.weight": "model-00003-of-00004.safetensors",
236
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wk.bias": "model-00003-of-00004.safetensors",
237
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wk.weight": "model-00003-of-00004.safetensors",
238
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wo.bias": "model-00003-of-00004.safetensors",
239
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wo.weight": "model-00003-of-00004.safetensors",
240
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wq.bias": "model-00003-of-00004.safetensors",
241
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wq.weight": "model-00003-of-00004.safetensors",
242
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wv.bias": "model-00003-of-00004.safetensors",
243
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention.wv.weight": "model-00003-of-00004.safetensors",
244
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention_norm.bias": "model-00003-of-00004.safetensors",
245
+ "model.vision_backbone.image_vit.transformer.resblocks.1.attention_norm.weight": "model-00003-of-00004.safetensors",
246
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
247
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
248
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
249
+ "model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
250
+ "model.vision_backbone.image_vit.transformer.resblocks.1.ffn_norm.bias": "model-00003-of-00004.safetensors",
251
+ "model.vision_backbone.image_vit.transformer.resblocks.1.ffn_norm.weight": "model-00003-of-00004.safetensors",
252
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wk.bias": "model-00003-of-00004.safetensors",
253
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wk.weight": "model-00003-of-00004.safetensors",
254
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wo.bias": "model-00003-of-00004.safetensors",
255
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wo.weight": "model-00003-of-00004.safetensors",
256
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wq.bias": "model-00003-of-00004.safetensors",
257
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wq.weight": "model-00003-of-00004.safetensors",
258
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wv.bias": "model-00003-of-00004.safetensors",
259
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention.wv.weight": "model-00003-of-00004.safetensors",
260
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention_norm.bias": "model-00003-of-00004.safetensors",
261
+ "model.vision_backbone.image_vit.transformer.resblocks.10.attention_norm.weight": "model-00003-of-00004.safetensors",
262
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
263
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
264
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
265
+ "model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
266
+ "model.vision_backbone.image_vit.transformer.resblocks.10.ffn_norm.bias": "model-00003-of-00004.safetensors",
267
+ "model.vision_backbone.image_vit.transformer.resblocks.10.ffn_norm.weight": "model-00003-of-00004.safetensors",
268
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wk.bias": "model-00003-of-00004.safetensors",
269
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wk.weight": "model-00003-of-00004.safetensors",
270
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wo.bias": "model-00003-of-00004.safetensors",
271
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wo.weight": "model-00003-of-00004.safetensors",
272
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wq.bias": "model-00003-of-00004.safetensors",
273
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wq.weight": "model-00003-of-00004.safetensors",
274
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wv.bias": "model-00003-of-00004.safetensors",
275
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention.wv.weight": "model-00003-of-00004.safetensors",
276
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention_norm.bias": "model-00003-of-00004.safetensors",
277
+ "model.vision_backbone.image_vit.transformer.resblocks.11.attention_norm.weight": "model-00003-of-00004.safetensors",
278
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
279
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
280
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
281
+ "model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
282
+ "model.vision_backbone.image_vit.transformer.resblocks.11.ffn_norm.bias": "model-00003-of-00004.safetensors",
283
+ "model.vision_backbone.image_vit.transformer.resblocks.11.ffn_norm.weight": "model-00003-of-00004.safetensors",
284
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wk.bias": "model-00003-of-00004.safetensors",
285
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wk.weight": "model-00003-of-00004.safetensors",
286
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wo.bias": "model-00003-of-00004.safetensors",
287
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wo.weight": "model-00003-of-00004.safetensors",
288
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wq.bias": "model-00003-of-00004.safetensors",
289
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wq.weight": "model-00003-of-00004.safetensors",
290
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wv.bias": "model-00003-of-00004.safetensors",
291
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention.wv.weight": "model-00003-of-00004.safetensors",
292
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention_norm.bias": "model-00003-of-00004.safetensors",
293
+ "model.vision_backbone.image_vit.transformer.resblocks.12.attention_norm.weight": "model-00003-of-00004.safetensors",
294
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
295
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
296
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
297
+ "model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
298
+ "model.vision_backbone.image_vit.transformer.resblocks.12.ffn_norm.bias": "model-00003-of-00004.safetensors",
299
+ "model.vision_backbone.image_vit.transformer.resblocks.12.ffn_norm.weight": "model-00003-of-00004.safetensors",
300
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wk.bias": "model-00003-of-00004.safetensors",
301
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wk.weight": "model-00003-of-00004.safetensors",
302
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wo.bias": "model-00003-of-00004.safetensors",
303
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wo.weight": "model-00003-of-00004.safetensors",
304
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wq.bias": "model-00003-of-00004.safetensors",
305
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wq.weight": "model-00003-of-00004.safetensors",
306
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wv.bias": "model-00003-of-00004.safetensors",
307
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention.wv.weight": "model-00003-of-00004.safetensors",
308
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention_norm.bias": "model-00003-of-00004.safetensors",
309
+ "model.vision_backbone.image_vit.transformer.resblocks.13.attention_norm.weight": "model-00003-of-00004.safetensors",
310
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
311
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
312
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
313
+ "model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
314
+ "model.vision_backbone.image_vit.transformer.resblocks.13.ffn_norm.bias": "model-00003-of-00004.safetensors",
315
+ "model.vision_backbone.image_vit.transformer.resblocks.13.ffn_norm.weight": "model-00003-of-00004.safetensors",
316
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wk.bias": "model-00003-of-00004.safetensors",
317
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wk.weight": "model-00003-of-00004.safetensors",
318
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wo.bias": "model-00003-of-00004.safetensors",
319
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wo.weight": "model-00003-of-00004.safetensors",
320
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wq.bias": "model-00003-of-00004.safetensors",
321
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wq.weight": "model-00003-of-00004.safetensors",
322
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wv.bias": "model-00003-of-00004.safetensors",
323
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention.wv.weight": "model-00003-of-00004.safetensors",
324
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention_norm.bias": "model-00003-of-00004.safetensors",
325
+ "model.vision_backbone.image_vit.transformer.resblocks.14.attention_norm.weight": "model-00003-of-00004.safetensors",
326
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
327
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
328
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
329
+ "model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
330
+ "model.vision_backbone.image_vit.transformer.resblocks.14.ffn_norm.bias": "model-00003-of-00004.safetensors",
331
+ "model.vision_backbone.image_vit.transformer.resblocks.14.ffn_norm.weight": "model-00003-of-00004.safetensors",
332
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wk.bias": "model-00003-of-00004.safetensors",
333
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wk.weight": "model-00003-of-00004.safetensors",
334
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wo.bias": "model-00003-of-00004.safetensors",
335
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wo.weight": "model-00003-of-00004.safetensors",
336
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wq.bias": "model-00003-of-00004.safetensors",
337
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wq.weight": "model-00003-of-00004.safetensors",
338
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wv.bias": "model-00003-of-00004.safetensors",
339
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention.wv.weight": "model-00003-of-00004.safetensors",
340
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention_norm.bias": "model-00003-of-00004.safetensors",
341
+ "model.vision_backbone.image_vit.transformer.resblocks.15.attention_norm.weight": "model-00003-of-00004.safetensors",
342
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
343
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
344
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
345
+ "model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
346
+ "model.vision_backbone.image_vit.transformer.resblocks.15.ffn_norm.bias": "model-00003-of-00004.safetensors",
347
+ "model.vision_backbone.image_vit.transformer.resblocks.15.ffn_norm.weight": "model-00003-of-00004.safetensors",
348
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wk.bias": "model-00003-of-00004.safetensors",
349
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wk.weight": "model-00003-of-00004.safetensors",
350
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wo.bias": "model-00003-of-00004.safetensors",
351
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wo.weight": "model-00003-of-00004.safetensors",
352
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wq.bias": "model-00003-of-00004.safetensors",
353
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wq.weight": "model-00003-of-00004.safetensors",
354
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wv.bias": "model-00003-of-00004.safetensors",
355
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention.wv.weight": "model-00003-of-00004.safetensors",
356
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention_norm.bias": "model-00003-of-00004.safetensors",
357
+ "model.vision_backbone.image_vit.transformer.resblocks.16.attention_norm.weight": "model-00003-of-00004.safetensors",
358
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
359
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
360
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
361
+ "model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
362
+ "model.vision_backbone.image_vit.transformer.resblocks.16.ffn_norm.bias": "model-00003-of-00004.safetensors",
363
+ "model.vision_backbone.image_vit.transformer.resblocks.16.ffn_norm.weight": "model-00003-of-00004.safetensors",
364
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wk.bias": "model-00003-of-00004.safetensors",
365
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wk.weight": "model-00003-of-00004.safetensors",
366
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wo.bias": "model-00003-of-00004.safetensors",
367
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wo.weight": "model-00003-of-00004.safetensors",
368
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wq.bias": "model-00003-of-00004.safetensors",
369
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wq.weight": "model-00003-of-00004.safetensors",
370
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wv.bias": "model-00003-of-00004.safetensors",
371
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention.wv.weight": "model-00003-of-00004.safetensors",
372
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention_norm.bias": "model-00003-of-00004.safetensors",
373
+ "model.vision_backbone.image_vit.transformer.resblocks.17.attention_norm.weight": "model-00003-of-00004.safetensors",
374
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
375
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
376
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
377
+ "model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
378
+ "model.vision_backbone.image_vit.transformer.resblocks.17.ffn_norm.bias": "model-00003-of-00004.safetensors",
379
+ "model.vision_backbone.image_vit.transformer.resblocks.17.ffn_norm.weight": "model-00003-of-00004.safetensors",
380
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wk.bias": "model-00003-of-00004.safetensors",
381
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wk.weight": "model-00003-of-00004.safetensors",
382
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wo.bias": "model-00003-of-00004.safetensors",
383
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wo.weight": "model-00003-of-00004.safetensors",
384
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wq.bias": "model-00003-of-00004.safetensors",
385
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wq.weight": "model-00003-of-00004.safetensors",
386
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wv.bias": "model-00003-of-00004.safetensors",
387
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention.wv.weight": "model-00003-of-00004.safetensors",
388
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention_norm.bias": "model-00003-of-00004.safetensors",
389
+ "model.vision_backbone.image_vit.transformer.resblocks.18.attention_norm.weight": "model-00003-of-00004.safetensors",
390
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
391
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
392
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
393
+ "model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
394
+ "model.vision_backbone.image_vit.transformer.resblocks.18.ffn_norm.bias": "model-00003-of-00004.safetensors",
395
+ "model.vision_backbone.image_vit.transformer.resblocks.18.ffn_norm.weight": "model-00003-of-00004.safetensors",
396
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wk.bias": "model-00003-of-00004.safetensors",
397
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wk.weight": "model-00003-of-00004.safetensors",
398
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wo.bias": "model-00003-of-00004.safetensors",
399
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wo.weight": "model-00003-of-00004.safetensors",
400
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wq.bias": "model-00003-of-00004.safetensors",
401
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wq.weight": "model-00003-of-00004.safetensors",
402
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wv.bias": "model-00003-of-00004.safetensors",
403
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention.wv.weight": "model-00003-of-00004.safetensors",
404
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention_norm.bias": "model-00003-of-00004.safetensors",
405
+ "model.vision_backbone.image_vit.transformer.resblocks.19.attention_norm.weight": "model-00003-of-00004.safetensors",
406
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
407
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
408
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
409
+ "model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
410
+ "model.vision_backbone.image_vit.transformer.resblocks.19.ffn_norm.bias": "model-00003-of-00004.safetensors",
411
+ "model.vision_backbone.image_vit.transformer.resblocks.19.ffn_norm.weight": "model-00003-of-00004.safetensors",
412
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wk.bias": "model-00003-of-00004.safetensors",
413
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wk.weight": "model-00003-of-00004.safetensors",
414
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wo.bias": "model-00003-of-00004.safetensors",
415
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wo.weight": "model-00003-of-00004.safetensors",
416
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wq.bias": "model-00003-of-00004.safetensors",
417
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wq.weight": "model-00003-of-00004.safetensors",
418
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wv.bias": "model-00003-of-00004.safetensors",
419
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention.wv.weight": "model-00003-of-00004.safetensors",
420
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention_norm.bias": "model-00003-of-00004.safetensors",
421
+ "model.vision_backbone.image_vit.transformer.resblocks.2.attention_norm.weight": "model-00003-of-00004.safetensors",
422
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
423
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
424
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
425
+ "model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
426
+ "model.vision_backbone.image_vit.transformer.resblocks.2.ffn_norm.bias": "model-00003-of-00004.safetensors",
427
+ "model.vision_backbone.image_vit.transformer.resblocks.2.ffn_norm.weight": "model-00003-of-00004.safetensors",
428
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wk.bias": "model-00003-of-00004.safetensors",
429
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wk.weight": "model-00003-of-00004.safetensors",
430
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wo.bias": "model-00003-of-00004.safetensors",
431
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wo.weight": "model-00003-of-00004.safetensors",
432
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wq.bias": "model-00003-of-00004.safetensors",
433
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wq.weight": "model-00003-of-00004.safetensors",
434
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wv.bias": "model-00003-of-00004.safetensors",
435
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention.wv.weight": "model-00003-of-00004.safetensors",
436
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention_norm.bias": "model-00003-of-00004.safetensors",
437
+ "model.vision_backbone.image_vit.transformer.resblocks.20.attention_norm.weight": "model-00003-of-00004.safetensors",
438
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
439
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
440
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
441
+ "model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
442
+ "model.vision_backbone.image_vit.transformer.resblocks.20.ffn_norm.bias": "model-00003-of-00004.safetensors",
443
+ "model.vision_backbone.image_vit.transformer.resblocks.20.ffn_norm.weight": "model-00003-of-00004.safetensors",
444
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wk.bias": "model-00003-of-00004.safetensors",
445
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wk.weight": "model-00003-of-00004.safetensors",
446
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wo.bias": "model-00003-of-00004.safetensors",
447
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wo.weight": "model-00003-of-00004.safetensors",
448
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wq.bias": "model-00003-of-00004.safetensors",
449
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wq.weight": "model-00003-of-00004.safetensors",
450
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wv.bias": "model-00003-of-00004.safetensors",
451
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention.wv.weight": "model-00003-of-00004.safetensors",
452
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention_norm.bias": "model-00004-of-00004.safetensors",
453
+ "model.vision_backbone.image_vit.transformer.resblocks.21.attention_norm.weight": "model-00004-of-00004.safetensors",
454
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
455
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
456
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
457
+ "model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
458
+ "model.vision_backbone.image_vit.transformer.resblocks.21.ffn_norm.bias": "model-00004-of-00004.safetensors",
459
+ "model.vision_backbone.image_vit.transformer.resblocks.21.ffn_norm.weight": "model-00004-of-00004.safetensors",
460
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wk.bias": "model-00004-of-00004.safetensors",
461
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wk.weight": "model-00004-of-00004.safetensors",
462
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wo.bias": "model-00004-of-00004.safetensors",
463
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wo.weight": "model-00004-of-00004.safetensors",
464
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wq.bias": "model-00004-of-00004.safetensors",
465
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wq.weight": "model-00004-of-00004.safetensors",
466
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wv.bias": "model-00004-of-00004.safetensors",
467
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention.wv.weight": "model-00004-of-00004.safetensors",
468
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention_norm.bias": "model-00004-of-00004.safetensors",
469
+ "model.vision_backbone.image_vit.transformer.resblocks.22.attention_norm.weight": "model-00004-of-00004.safetensors",
470
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
471
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
472
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
473
+ "model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
474
+ "model.vision_backbone.image_vit.transformer.resblocks.22.ffn_norm.bias": "model-00004-of-00004.safetensors",
475
+ "model.vision_backbone.image_vit.transformer.resblocks.22.ffn_norm.weight": "model-00004-of-00004.safetensors",
476
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wk.bias": "model-00004-of-00004.safetensors",
477
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wk.weight": "model-00004-of-00004.safetensors",
478
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wo.bias": "model-00004-of-00004.safetensors",
479
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wo.weight": "model-00004-of-00004.safetensors",
480
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wq.bias": "model-00004-of-00004.safetensors",
481
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wq.weight": "model-00004-of-00004.safetensors",
482
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wv.bias": "model-00004-of-00004.safetensors",
483
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention.wv.weight": "model-00004-of-00004.safetensors",
484
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention_norm.bias": "model-00004-of-00004.safetensors",
485
+ "model.vision_backbone.image_vit.transformer.resblocks.23.attention_norm.weight": "model-00004-of-00004.safetensors",
486
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
487
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
488
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
489
+ "model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
490
+ "model.vision_backbone.image_vit.transformer.resblocks.23.ffn_norm.bias": "model-00004-of-00004.safetensors",
491
+ "model.vision_backbone.image_vit.transformer.resblocks.23.ffn_norm.weight": "model-00004-of-00004.safetensors",
492
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wk.bias": "model-00004-of-00004.safetensors",
493
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wk.weight": "model-00004-of-00004.safetensors",
494
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wo.bias": "model-00004-of-00004.safetensors",
495
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wo.weight": "model-00004-of-00004.safetensors",
496
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wq.bias": "model-00004-of-00004.safetensors",
497
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wq.weight": "model-00004-of-00004.safetensors",
498
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wv.bias": "model-00004-of-00004.safetensors",
499
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention.wv.weight": "model-00004-of-00004.safetensors",
500
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention_norm.bias": "model-00004-of-00004.safetensors",
501
+ "model.vision_backbone.image_vit.transformer.resblocks.24.attention_norm.weight": "model-00004-of-00004.safetensors",
502
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
503
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
504
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
505
+ "model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
506
+ "model.vision_backbone.image_vit.transformer.resblocks.24.ffn_norm.bias": "model-00004-of-00004.safetensors",
507
+ "model.vision_backbone.image_vit.transformer.resblocks.24.ffn_norm.weight": "model-00004-of-00004.safetensors",
508
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wk.bias": "model-00003-of-00004.safetensors",
509
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wk.weight": "model-00003-of-00004.safetensors",
510
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wo.bias": "model-00003-of-00004.safetensors",
511
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wo.weight": "model-00003-of-00004.safetensors",
512
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wq.bias": "model-00003-of-00004.safetensors",
513
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wq.weight": "model-00003-of-00004.safetensors",
514
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wv.bias": "model-00003-of-00004.safetensors",
515
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention.wv.weight": "model-00003-of-00004.safetensors",
516
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention_norm.bias": "model-00003-of-00004.safetensors",
517
+ "model.vision_backbone.image_vit.transformer.resblocks.3.attention_norm.weight": "model-00003-of-00004.safetensors",
518
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
519
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
520
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
521
+ "model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
522
+ "model.vision_backbone.image_vit.transformer.resblocks.3.ffn_norm.bias": "model-00003-of-00004.safetensors",
523
+ "model.vision_backbone.image_vit.transformer.resblocks.3.ffn_norm.weight": "model-00003-of-00004.safetensors",
524
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wk.bias": "model-00003-of-00004.safetensors",
525
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wk.weight": "model-00003-of-00004.safetensors",
526
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wo.bias": "model-00003-of-00004.safetensors",
527
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wo.weight": "model-00003-of-00004.safetensors",
528
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wq.bias": "model-00003-of-00004.safetensors",
529
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wq.weight": "model-00003-of-00004.safetensors",
530
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wv.bias": "model-00003-of-00004.safetensors",
531
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention.wv.weight": "model-00003-of-00004.safetensors",
532
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention_norm.bias": "model-00003-of-00004.safetensors",
533
+ "model.vision_backbone.image_vit.transformer.resblocks.4.attention_norm.weight": "model-00003-of-00004.safetensors",
534
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
535
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
536
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
537
+ "model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
538
+ "model.vision_backbone.image_vit.transformer.resblocks.4.ffn_norm.bias": "model-00003-of-00004.safetensors",
539
+ "model.vision_backbone.image_vit.transformer.resblocks.4.ffn_norm.weight": "model-00003-of-00004.safetensors",
540
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wk.bias": "model-00003-of-00004.safetensors",
541
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wk.weight": "model-00003-of-00004.safetensors",
542
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wo.bias": "model-00003-of-00004.safetensors",
543
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wo.weight": "model-00003-of-00004.safetensors",
544
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wq.bias": "model-00003-of-00004.safetensors",
545
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wq.weight": "model-00003-of-00004.safetensors",
546
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wv.bias": "model-00003-of-00004.safetensors",
547
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention.wv.weight": "model-00003-of-00004.safetensors",
548
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention_norm.bias": "model-00003-of-00004.safetensors",
549
+ "model.vision_backbone.image_vit.transformer.resblocks.5.attention_norm.weight": "model-00003-of-00004.safetensors",
550
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
551
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
552
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
553
+ "model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
554
+ "model.vision_backbone.image_vit.transformer.resblocks.5.ffn_norm.bias": "model-00003-of-00004.safetensors",
555
+ "model.vision_backbone.image_vit.transformer.resblocks.5.ffn_norm.weight": "model-00003-of-00004.safetensors",
556
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wk.bias": "model-00003-of-00004.safetensors",
557
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wk.weight": "model-00003-of-00004.safetensors",
558
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wo.bias": "model-00003-of-00004.safetensors",
559
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wo.weight": "model-00003-of-00004.safetensors",
560
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wq.bias": "model-00003-of-00004.safetensors",
561
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wq.weight": "model-00003-of-00004.safetensors",
562
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wv.bias": "model-00003-of-00004.safetensors",
563
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention.wv.weight": "model-00003-of-00004.safetensors",
564
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention_norm.bias": "model-00003-of-00004.safetensors",
565
+ "model.vision_backbone.image_vit.transformer.resblocks.6.attention_norm.weight": "model-00003-of-00004.safetensors",
566
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
567
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
568
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
569
+ "model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
570
+ "model.vision_backbone.image_vit.transformer.resblocks.6.ffn_norm.bias": "model-00003-of-00004.safetensors",
571
+ "model.vision_backbone.image_vit.transformer.resblocks.6.ffn_norm.weight": "model-00003-of-00004.safetensors",
572
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wk.bias": "model-00003-of-00004.safetensors",
573
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wk.weight": "model-00003-of-00004.safetensors",
574
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wo.bias": "model-00003-of-00004.safetensors",
575
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wo.weight": "model-00003-of-00004.safetensors",
576
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wq.bias": "model-00003-of-00004.safetensors",
577
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wq.weight": "model-00003-of-00004.safetensors",
578
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wv.bias": "model-00003-of-00004.safetensors",
579
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention.wv.weight": "model-00003-of-00004.safetensors",
580
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention_norm.bias": "model-00003-of-00004.safetensors",
581
+ "model.vision_backbone.image_vit.transformer.resblocks.7.attention_norm.weight": "model-00003-of-00004.safetensors",
582
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
583
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
584
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
585
+ "model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
586
+ "model.vision_backbone.image_vit.transformer.resblocks.7.ffn_norm.bias": "model-00003-of-00004.safetensors",
587
+ "model.vision_backbone.image_vit.transformer.resblocks.7.ffn_norm.weight": "model-00003-of-00004.safetensors",
588
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wk.bias": "model-00003-of-00004.safetensors",
589
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wk.weight": "model-00003-of-00004.safetensors",
590
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wo.bias": "model-00003-of-00004.safetensors",
591
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wo.weight": "model-00003-of-00004.safetensors",
592
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wq.bias": "model-00003-of-00004.safetensors",
593
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wq.weight": "model-00003-of-00004.safetensors",
594
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wv.bias": "model-00003-of-00004.safetensors",
595
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention.wv.weight": "model-00003-of-00004.safetensors",
596
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention_norm.bias": "model-00003-of-00004.safetensors",
597
+ "model.vision_backbone.image_vit.transformer.resblocks.8.attention_norm.weight": "model-00003-of-00004.safetensors",
598
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
599
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
600
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
601
+ "model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
602
+ "model.vision_backbone.image_vit.transformer.resblocks.8.ffn_norm.bias": "model-00003-of-00004.safetensors",
603
+ "model.vision_backbone.image_vit.transformer.resblocks.8.ffn_norm.weight": "model-00003-of-00004.safetensors",
604
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wk.bias": "model-00003-of-00004.safetensors",
605
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wk.weight": "model-00003-of-00004.safetensors",
606
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wo.bias": "model-00003-of-00004.safetensors",
607
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wo.weight": "model-00003-of-00004.safetensors",
608
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wq.bias": "model-00003-of-00004.safetensors",
609
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wq.weight": "model-00003-of-00004.safetensors",
610
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wv.bias": "model-00003-of-00004.safetensors",
611
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention.wv.weight": "model-00003-of-00004.safetensors",
612
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention_norm.bias": "model-00003-of-00004.safetensors",
613
+ "model.vision_backbone.image_vit.transformer.resblocks.9.attention_norm.weight": "model-00003-of-00004.safetensors",
614
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
615
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
616
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
617
+ "model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
618
+ "model.vision_backbone.image_vit.transformer.resblocks.9.ffn_norm.bias": "model-00003-of-00004.safetensors",
619
+ "model.vision_backbone.image_vit.transformer.resblocks.9.ffn_norm.weight": "model-00003-of-00004.safetensors"
620
+ }
621
+ }
modeling_molmo2.py ADDED
@@ -0,0 +1,1858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass
4
+ from typing import List, Optional, Tuple, Union, Dict, Any, Sequence, Callable
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+
10
+ from transformers.models.auto import AutoModelForCausalLM, AutoModelForImageTextToText
11
+ from transformers.activations import ACT2FN
12
+ from transformers.cache_utils import Cache, DynamicCache
13
+ from transformers.generation import GenerationMixin
14
+ from transformers.generation.configuration_utils import GenerationConfig
15
+ from transformers.generation.utils import GenerateOutput
16
+ from transformers.integrations import use_kernel_forward_from_hub
17
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
18
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward, FlashAttentionKwargs
19
+ from transformers import GradientCheckpointingLayer
20
+ from transformers.modeling_outputs import (
21
+ BaseModelOutput,
22
+ BaseModelOutputWithPast,
23
+ BaseModelOutputWithPooling,
24
+ CausalLMOutputWithPast,
25
+ )
26
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
27
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
28
+ from transformers.processing_utils import Unpack
29
+ from transformers.utils import (
30
+ ModelOutput,
31
+ can_return_tuple,
32
+ is_torch_flex_attn_available,
33
+ logging,
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ )
37
+
38
+ from .configuration_molmo2 import Molmo2Config, Molmo2VitConfig, Molmo2AdapterConfig, Molmo2LlmConfig
39
+
40
+
41
+ if is_torch_flex_attn_available():
42
+ from torch.nn.attention.flex_attention import BlockMask
43
+
44
+ from transformers.integrations.flex_attention import make_flex_block_causal_mask
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ MOLMO_START_DOCSTRING = r"""
51
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
52
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
53
+ etc.)
54
+
55
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
56
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
57
+ and behavior.
58
+
59
+ Parameters:
60
+ config ([`Molmo2Config`]):
61
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
62
+ load the weights associated with the model, only the configuration. Check out the
63
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
64
+ """
65
+
66
+
67
+ @dataclass
68
+ class Molmo2CausalLMOutputWithPast(ModelOutput):
69
+ """
70
+ Base class for Molmo2 causal language model (or autoregressive) outputs.
71
+
72
+ Args:
73
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
74
+ Language modeling loss (for next-token prediction).
75
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
76
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
77
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
78
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
79
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
80
+
81
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
82
+ `past_key_values` input) to speed up sequential decoding.
83
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
84
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
85
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
86
+
87
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
88
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
89
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
90
+ sequence_length)`.
91
+
92
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
93
+ heads.
94
+ image_hidden_states (`torch.FloatTensor`, *optional*):
95
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
96
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
97
+ """
98
+
99
+ loss: Optional[torch.FloatTensor] = None
100
+ logits: Optional[torch.FloatTensor] = None
101
+ past_key_values: Optional[List[torch.FloatTensor]] = None
102
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
103
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
104
+ image_hidden_states: Optional[torch.FloatTensor] = None
105
+
106
+
107
+ @dataclass
108
+ class Molmo2ModelOutputWithPast(BaseModelOutputWithPast):
109
+ """
110
+ Base class for Molmo2 outputs, with hidden states and attentions.
111
+
112
+ Args:
113
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
114
+ Sequence of hidden-states at the output of the last layer of the model.
115
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
116
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
117
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
118
+
119
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
120
+ `past_key_values` input) to speed up sequential decoding.
121
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
122
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
123
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
124
+
125
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
126
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
127
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
128
+ sequence_length)`.
129
+
130
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
131
+ heads.
132
+ image_hidden_states (`torch.FloatTensor`, *optional*):
133
+ A `torch.FloatTensor` of size `(batch_num_patches, hidden_size)`.
134
+ image_hidden_states of the model produced by the vision backbone
135
+ """
136
+
137
+ image_hidden_states: Optional[torch.FloatTensor] = None
138
+ logits: Optional[torch.FloatTensor] = None
139
+
140
+
141
+ class Molmo2PreTrainedModel(PreTrainedModel):
142
+ config_class = Molmo2LlmConfig
143
+ base_model_prefix = "model"
144
+ supports_gradient_checkpointing = True
145
+ _no_split_modules = ["Molmo2DecoderLayer", "Molmo2PostNormDecoderLayer"]
146
+ _skip_keys_device_placement = ["past_key_values"]
147
+ _supports_flash_attn_2 = True
148
+ _supports_sdpa = True
149
+ _supports_flex_attn = False
150
+ _supports_cache_class = True
151
+ _supports_quantized_cache = True
152
+ _supports_static_cache = True
153
+ _supports_attention_backend = True
154
+
155
+ def _init_weights(self, module):
156
+ std = self.config.initializer_range
157
+ if isinstance(module, (nn.Linear,)):
158
+ module.weight.data.normal_(mean=0.0, std=std)
159
+ if module.bias is not None:
160
+ module.bias.data.zero_()
161
+ elif isinstance(module, Molmo2Embedding):
162
+ module.embedding.data.normal_(mean=0.0, std=std)
163
+ module.new_embedding.data.normal_(mean=0.0, std=std)
164
+ elif isinstance(module, nn.Embedding):
165
+ module.weight.data.normal_(mean=0.0, std=std)
166
+ if module.padding_idx is not None:
167
+ module.weight.data[module.padding_idx].zero_()
168
+ elif isinstance(module, Molmo2RMSNorm):
169
+ module.weight.data.fill_(1.0)
170
+ elif isinstance(module, nn.LayerNorm):
171
+ module.weight.data.fill_(1.0)
172
+ if module.bias is not None:
173
+ module.bias.data.zero_()
174
+
175
+
176
+ class ViTMLP(nn.Module):
177
+ def __init__(self, dim: int, hidden_dim: int, hidden_act: str, device: Union[str, torch.device] = None):
178
+ super().__init__()
179
+ self.w1 = nn.Linear(dim, hidden_dim, bias=True, device=device)
180
+ self.act = ACT2FN[hidden_act]
181
+ self.w2 = nn.Linear(hidden_dim, dim, bias=True, device=device)
182
+
183
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
184
+ return self.w2(self.act(self.w1(x)))
185
+
186
+
187
+ class ViTMultiHeadDotProductAttention(nn.Module):
188
+ def __init__(
189
+ self,
190
+ hidden_size: int,
191
+ num_heads: int,
192
+ num_key_value_heads: int,
193
+ head_dim: int,
194
+ use_bias: bool = True,
195
+ input_dim: Optional[int] = None,
196
+ float32_attention: bool = True,
197
+ attention_dropout: float = 0.0,
198
+ residual_dropout: float = 0.0,
199
+ device: Union[str, torch.device] = None,
200
+ attn_implementation: str = "eager",
201
+ ):
202
+ super().__init__()
203
+
204
+ self.hidden_size = hidden_size
205
+ self.num_heads = num_heads
206
+ self.head_dim = head_dim
207
+ self.num_key_value_heads = num_key_value_heads
208
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
209
+ self.attn_implementation = attn_implementation
210
+ self.is_causal = False
211
+
212
+ input_dim = input_dim or hidden_size
213
+
214
+ self.wq = nn.Linear(
215
+ input_dim,
216
+ self.num_heads * self.head_dim,
217
+ bias=use_bias,
218
+ device=device,
219
+ )
220
+ self.wk = nn.Linear(
221
+ input_dim,
222
+ self.num_key_value_heads * self.head_dim,
223
+ bias=use_bias,
224
+ device=device,
225
+ )
226
+ self.wv = nn.Linear(
227
+ input_dim,
228
+ self.num_key_value_heads * self.head_dim,
229
+ bias=use_bias,
230
+ device=device,
231
+ )
232
+ self.wo = nn.Linear(
233
+ self.num_heads * self.head_dim,
234
+ self.hidden_size,
235
+ )
236
+ self.float32_attention = float32_attention
237
+ self.attention_dropout = attention_dropout
238
+ self.residual_dropout = nn.Dropout(residual_dropout)
239
+
240
+ def _split_heads(self, hidden_states, num_heads) -> torch.Tensor:
241
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
242
+
243
+ def _merge_heads(self, hidden_states) -> torch.Tensor:
244
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
245
+
246
+ def forward(
247
+ self,
248
+ inputs_q: torch.Tensor,
249
+ inputs_kv: Optional[torch.Tensor] = None,
250
+ attn_mask: Optional[torch.Tensor] = None,
251
+ ) -> torch.Tensor:
252
+
253
+ if inputs_kv is not None:
254
+ inputs_k = inputs_kv
255
+ inputs_v = inputs_kv
256
+ else:
257
+ inputs_k = inputs_q
258
+ inputs_v = inputs_q
259
+
260
+ xq, xk, xv = self.wq(inputs_q), self.wk(inputs_k), self.wv(inputs_v)
261
+
262
+ xq = self._split_heads(xq, self.num_heads)
263
+ xk = self._split_heads(xk, self.num_key_value_heads)
264
+ xv = self._split_heads(xv, self.num_key_value_heads)
265
+
266
+ if self.num_heads != self.num_key_value_heads:
267
+ xk = xk.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
268
+ xv = xv.repeat_interleave(self.num_key_value_groups, dim=2, output_size=self.num_heads)
269
+
270
+ og_dtype = xq.dtype
271
+
272
+ if self.float32_attention:
273
+ xq = xq.to(torch.float)
274
+ xk = xk.to(torch.float)
275
+
276
+ dropout_p = 0.0 if not self.training else self.attention_dropout
277
+
278
+ if self.attn_implementation == "eager":
279
+ attn_weights = torch.einsum("...qhd,...khd->...hqk", xq / math.sqrt(xq.size(-1)), xk)
280
+ attn_weights = F.softmax(attn_weights, dim=-1)
281
+ attn_weights = F.dropout(
282
+ attn_weights,
283
+ p=dropout_p,
284
+ training=self.training
285
+ )
286
+ attn_output = torch.einsum("...hqk,...khd->...qhd", attn_weights.to(xv.dtype), xv)
287
+
288
+ elif self.attn_implementation == "sdpa":
289
+ if not torch.is_autocast_enabled():
290
+ xv = xv.to(torch.float)
291
+
292
+ attn_output = F.scaled_dot_product_attention(
293
+ xq.transpose(1, 2).contiguous(),
294
+ xk.transpose(1, 2).contiguous(),
295
+ xv.transpose(1, 2).contiguous(),
296
+ attn_mask=attn_mask,
297
+ is_causal=False,
298
+ dropout_p=dropout_p,
299
+ ).transpose(1, 2)
300
+
301
+ elif self.attn_implementation == "flash_attention_2":
302
+ assert not self.config.float32_attention
303
+ # Downcast in case we are running with fp32 hidden states
304
+ attn_output = _flash_attention_forward(
305
+ xq.transpose(1, 2).to(torch.bfloat16),
306
+ xk.transpose(1, 2).to(torch.bfloat16),
307
+ xv.transpose(1, 2).to(torch.bfloat16),
308
+ attention_mask=None,
309
+ query_length=inputs_q.shape[1],
310
+ is_causal=False,
311
+ dropout=dropout_p,
312
+ )
313
+ else:
314
+ raise ValueError(f"Attention implementation {self.attn_implementation} not supported")
315
+
316
+ attn_output = attn_output.to(og_dtype)
317
+ attn_output = self._merge_heads(attn_output)
318
+ attn_output = self.wo(attn_output)
319
+ attn_output = self.residual_dropout(attn_output)
320
+
321
+ return attn_output
322
+
323
+
324
+ class Molmo2VisionBlock(nn.Module):
325
+
326
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
327
+ super().__init__()
328
+ self.attention = ViTMultiHeadDotProductAttention(
329
+ hidden_size=config.hidden_size,
330
+ num_heads=config.num_attention_heads,
331
+ num_key_value_heads=config.num_key_value_heads,
332
+ head_dim=config.head_dim,
333
+ float32_attention=config.float32_attention,
334
+ attention_dropout=config.attention_dropout,
335
+ residual_dropout=config.residual_dropout,
336
+ device=device,
337
+ attn_implementation=config._attn_implementation,
338
+ )
339
+ self.feed_forward = ViTMLP(config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
340
+ self.attention_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
341
+ self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, device=device)
342
+
343
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
344
+ x = x + self.attention(self.attention_norm(x))
345
+ x = x + self.feed_forward(self.ffn_norm(x))
346
+ return x
347
+
348
+
349
+ class Molmo2VisionBlockCollection(nn.Module):
350
+
351
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
352
+ super().__init__()
353
+ self.conifg = config
354
+ self.resblocks = nn.ModuleList([
355
+ Molmo2VisionBlock(config, device) for _ in range(config.num_hidden_layers)
356
+ ])
357
+
358
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
359
+ hidden_states = []
360
+ for r in self.resblocks:
361
+ x = r(x)
362
+ hidden_states.append(x)
363
+ return hidden_states
364
+
365
+
366
+ class Molmo2VisionTransformer(nn.Module):
367
+
368
+ def __init__(self, config: Molmo2VitConfig, device: Union[str, torch.device] = None):
369
+ super().__init__()
370
+ self.config = config
371
+
372
+ # positional embeddings
373
+ self.scale = config.hidden_size ** -0.5
374
+ self.num_prefix_tokens: int = 0 # no class embeddings
375
+ self.positional_embedding = nn.Parameter(
376
+ torch.zeros(config.image_num_pos, config.hidden_size, device=device),
377
+ )
378
+
379
+ image_patch_size = config.image_patch_size
380
+ self.patch_embedding = nn.Linear(
381
+ image_patch_size * image_patch_size * 3,
382
+ config.hidden_size,
383
+ bias=True,
384
+ device=device,
385
+ )
386
+
387
+ self.transformer = Molmo2VisionBlockCollection(config, device)
388
+
389
+ def add_pos_emb(self, x: torch.Tensor, patch_num: int) -> torch.Tensor:
390
+ pos_emb = self.positional_embedding
391
+
392
+ pos_emb = pos_emb.reshape(
393
+ (int(math.sqrt(pos_emb.shape[0])), int(math.sqrt(pos_emb.shape[0])), pos_emb.shape[1])
394
+ )
395
+
396
+ (patch_num_0, patch_num_1) = patch_num
397
+
398
+ if pos_emb.shape[0] != patch_num_0 or pos_emb.shape[1] != patch_num_1:
399
+ # Dervied from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
400
+ # antialias: default True in jax.image.resize
401
+ pos_emb = pos_emb.unsqueeze(0).permute(0, 3, 1, 2)
402
+ pos_emb = F.interpolate(
403
+ pos_emb, size=(patch_num_0, patch_num_1), mode="bicubic", align_corners=False, antialias=True,
404
+ )
405
+ pos_emb = pos_emb.permute(0, 2, 3, 1).squeeze(0)
406
+
407
+ pos_emb = pos_emb.reshape(-1, pos_emb.shape[-1])
408
+ x = x + pos_emb[None, :, :].to(x.dtype)
409
+ return x
410
+
411
+ def forward(self, x: torch.Tensor, patch_num: int = None) -> List[torch.Tensor]:
412
+ """
413
+ : param x: (batch_size, num_patch, n_pixels)
414
+ """
415
+ if patch_num is None:
416
+ patch_num = self.config.image_num_patch
417
+
418
+ B, N, D = x.shape
419
+
420
+ x = self.patch_embedding(x)
421
+
422
+ # class embeddings and positional embeddings
423
+ x = self.add_pos_emb(x, patch_num)
424
+
425
+ hidden_states = self.transformer(x)
426
+ return hidden_states
427
+
428
+
429
+ class ImageProjectorMLP(nn.Module):
430
+
431
+ def __init__(
432
+ self,
433
+ input_dim: int,
434
+ hidden_dim: int,
435
+ output_dim: int,
436
+ hidden_act: str,
437
+ device: Union[str, torch.device] = None,
438
+ ):
439
+ super().__init__()
440
+ self.w1 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
441
+ self.w2 = nn.Linear(hidden_dim, output_dim, bias=False, device=device)
442
+ self.w3 = nn.Linear(input_dim, hidden_dim, bias=False, device=device)
443
+ self.act = ACT2FN[hidden_act]
444
+
445
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
446
+ return self.w2(self.act(self.w1(x)) * self.w3(x))
447
+
448
+
449
+ class Molmo2VisionBackbone(nn.Module):
450
+ def __init__(self, vit_config: Molmo2VitConfig, adapter_config: Molmo2AdapterConfig):
451
+ super().__init__()
452
+ self.vit_config = vit_config
453
+ self.adapter_config = adapter_config
454
+
455
+ self.vit_layers = []
456
+ for layer in adapter_config.vit_layers:
457
+ if layer >= 0:
458
+ self.vit_layers.append(layer)
459
+ else:
460
+ self.vit_layers.append(layer + vit_config.num_hidden_layers)
461
+
462
+ last_layer_needed = max(self.vit_layers) + 1
463
+ if last_layer_needed < vit_config.num_hidden_layers:
464
+ new_vit_config = deepcopy(vit_config)
465
+ new_vit_config.num_hidden_layers = last_layer_needed
466
+ self.image_vit = Molmo2VisionTransformer(new_vit_config)
467
+ else:
468
+ self.image_vit = Molmo2VisionTransformer(vit_config)
469
+
470
+ self.num_prefix_tokens: int = self.image_vit.num_prefix_tokens
471
+
472
+ pool_dim = vit_config.hidden_size * len(adapter_config.vit_layers)
473
+ self.image_pooling_2d = ViTMultiHeadDotProductAttention(
474
+ hidden_size=adapter_config.hidden_size,
475
+ num_heads=adapter_config.num_attention_heads,
476
+ num_key_value_heads=adapter_config.num_key_value_heads,
477
+ head_dim=adapter_config.head_dim,
478
+ input_dim=pool_dim,
479
+ float32_attention=adapter_config.float32_attention,
480
+ attention_dropout=adapter_config.attention_dropout,
481
+ residual_dropout=adapter_config.residual_dropout,
482
+ attn_implementation=adapter_config._attn_implementation,
483
+ )
484
+ self.image_projector = ImageProjectorMLP(
485
+ adapter_config.hidden_size,
486
+ adapter_config.intermediate_size,
487
+ adapter_config.text_hidden_size,
488
+ adapter_config.hidden_act,
489
+ )
490
+ self.image_feature_dropout = nn.Dropout(adapter_config.image_feature_dropout)
491
+
492
+ def encode_image(self, images: torch.Tensor) -> torch.Tensor:
493
+ """
494
+ : param images: (batch_size, num_crops, num_patch, n_pixels)
495
+ """
496
+ B, T, N, D = images.shape
497
+ images = images.view(B * T, N, D)
498
+ image_features = self.image_vit(images)
499
+
500
+ features = []
501
+ for layer in self.vit_layers:
502
+ features.append(image_features[layer])
503
+ image_features = torch.cat(features, dim=-1)
504
+
505
+ if self.num_prefix_tokens > 0:
506
+ image_features = image_features[:, 1:]
507
+ image_features = image_features.view(B, T, N, -1)
508
+ return image_features
509
+
510
+ @property
511
+ def dtype(self) -> torch.dtype:
512
+ return self.image_vit.patch_embedding.weight.dtype
513
+
514
+ @property
515
+ def device(self) -> torch.device:
516
+ return self.image_vit.patch_embedding.weight.device
517
+
518
+ def forward(
519
+ self,
520
+ images: torch.Tensor,
521
+ pooled_patches_idx: torch.Tensor,
522
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
523
+
524
+ # image_features: (batch_size, num_crops(=num_image), num_patch, nximage_emb_dim)
525
+ batch_size, num_image = images.shape[:2]
526
+ images = images.to(device=self.device, dtype=self.dtype)
527
+ image_features = self.encode_image(images)
528
+
529
+ image_features = self.image_feature_dropout(image_features)
530
+ dim = image_features.shape[-1]
531
+ valid = pooled_patches_idx >= 0
532
+ valid_token = torch.any(valid, -1)
533
+
534
+ # Use `pooled_patches_idx` to arange the features for image pooling
535
+ batch_idx = torch.arange(pooled_patches_idx.shape[0], dtype=torch.long, device=pooled_patches_idx.device)
536
+ batch_idx = torch.tile(batch_idx.view(batch_size, 1, 1), [1, pooled_patches_idx.shape[1], pooled_patches_idx.shape[2]])
537
+
538
+ # Now [batch, num_high_res_features, pool_dim, dim]
539
+ to_pool = image_features.reshape(batch_size, -1, dim)[batch_idx, torch.clip(pooled_patches_idx, 0)]
540
+ to_pool = to_pool * valid.to(self.dtype)[:, :, :, None]
541
+ to_pool = to_pool.reshape([-1, pooled_patches_idx.shape[-1], dim])
542
+
543
+ query = to_pool.mean(-2, keepdim=True)
544
+ pooled_features = self.image_pooling_2d(query, to_pool)
545
+ pooled_features = pooled_features.reshape([batch_size, -1, pooled_features.shape[-1]])
546
+
547
+ # MLP layer to map the feature.
548
+ pooled_features = self.image_projector(pooled_features)
549
+ return pooled_features.view(-1, pooled_features.shape[-1])[valid_token.flatten()]
550
+
551
+
552
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
553
+ def rotate_half(x):
554
+ """Rotates half the hidden dims of the input."""
555
+ x1 = x[..., : x.shape[-1] // 2]
556
+ x2 = x[..., x.shape[-1] // 2 :]
557
+ return torch.cat((-x2, x1), dim=-1)
558
+
559
+
560
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
561
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
562
+ """Applies Rotary Position Embedding to the query and key tensors.
563
+
564
+ Args:
565
+ q (`torch.Tensor`): The query tensor.
566
+ k (`torch.Tensor`): The key tensor.
567
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
568
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
569
+ position_ids (`torch.Tensor`, *optional*):
570
+ Deprecated and unused.
571
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
572
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
573
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
574
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
575
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
576
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
577
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
578
+ Returns:
579
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
580
+ """
581
+ cos = cos.unsqueeze(unsqueeze_dim)
582
+ sin = sin.unsqueeze(unsqueeze_dim)
583
+ q_embed = (q * cos) + (rotate_half(q) * sin)
584
+ k_embed = (k * cos) + (rotate_half(k) * sin)
585
+ return q_embed, k_embed
586
+
587
+
588
+ # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding
589
+ class Molmo2RotaryEmbedding(nn.Module):
590
+
591
+ def __init__(self, config: Molmo2LlmConfig, device: Union[str, torch.device] = None):
592
+ super().__init__()
593
+ # BC: "rope_type" was originally "type"
594
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
595
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
596
+ else:
597
+ self.rope_type = "default"
598
+ self.max_seq_len_cached = config.max_position_embeddings
599
+ self.original_max_seq_len = config.max_position_embeddings
600
+
601
+ self.config = config
602
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
603
+
604
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
605
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
606
+ self.original_inv_freq = self.inv_freq
607
+
608
+ @torch.no_grad()
609
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
610
+ def forward(self, x, position_ids: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
611
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
612
+ position_ids_expanded = position_ids[:, None, :].float()
613
+
614
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
615
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
616
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
617
+ emb = torch.cat((freqs, freqs), dim=-1)
618
+ cos = emb.cos() * self.attention_scaling
619
+ sin = emb.sin() * self.attention_scaling
620
+
621
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
622
+
623
+
624
+ @use_kernel_forward_from_hub("RMSNorm")
625
+ class Molmo2RMSNorm(nn.Module):
626
+
627
+ def __init__(
628
+ self,
629
+ size: int,
630
+ eps: float = 1e-6,
631
+ device: Union[str, torch.device] = None,
632
+ ):
633
+ super().__init__()
634
+ self.weight = nn.Parameter(torch.ones(size, device=device))
635
+ self.eps = eps
636
+
637
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
638
+ with torch.autocast(enabled=False, device_type=x.device.type):
639
+ og_dtype = x.dtype
640
+ x = x.to(torch.float32)
641
+ variance = x.pow(2).mean(-1, keepdim=True)
642
+ x = x * torch.rsqrt(variance + self.eps)
643
+ x = x.to(og_dtype)
644
+
645
+ return self.weight * x
646
+
647
+ def extra_repr(self):
648
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
649
+
650
+
651
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
652
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
653
+ """
654
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
655
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
656
+ """
657
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
658
+ if n_rep == 1:
659
+ return hidden_states
660
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
661
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
662
+
663
+
664
+ def eager_attention_forward(
665
+ module: nn.Module,
666
+ query: torch.Tensor,
667
+ key: torch.Tensor,
668
+ value: torch.Tensor,
669
+ attention_mask: Optional[torch.Tensor],
670
+ scaling: float,
671
+ dropout: float = 0.0,
672
+ **kwargs,
673
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
674
+ key_states = repeat_kv(key, module.num_key_value_groups)
675
+ value_states = repeat_kv(value, module.num_key_value_groups)
676
+
677
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
678
+ if attention_mask is not None:
679
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
680
+ attn_weights = attn_weights + causal_mask
681
+
682
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
683
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
684
+ attn_output = torch.matmul(attn_weights, value_states)
685
+ attn_output = attn_output.transpose(1, 2).contiguous()
686
+
687
+ return attn_output, attn_weights
688
+
689
+
690
+ class Molmo2Attention(nn.Module):
691
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
692
+
693
+ # copied from transformers.models.llama.modeling_llama.LlamaAttention.__init__ with Llama->Molmo2
694
+ def __init__(self, config: Molmo2LlmConfig, layer_idx: Optional[int] = None) -> None:
695
+ super().__init__()
696
+ self.config = config
697
+ self.layer_idx = layer_idx
698
+ if layer_idx is None:
699
+ logger.warning_once(
700
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
701
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
702
+ "when creating this class."
703
+ )
704
+
705
+ self.num_heads = config.num_attention_heads
706
+ self.num_key_value_heads = config.num_key_value_heads
707
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
708
+ self.head_dim = config.head_dim
709
+ self.scaling = self.head_dim**-0.5
710
+ self.is_causal = True
711
+
712
+ if (config.head_dim * config.num_attention_heads) != config.hidden_size:
713
+ raise ValueError(
714
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {config.hidden_size}"
715
+ f" and `num_attention_heads`: {config.num_attention_heads})."
716
+ )
717
+
718
+ self.fused_dims = (
719
+ config.hidden_size,
720
+ config.head_dim * config.num_key_value_heads,
721
+ config.head_dim * config.num_key_value_heads,
722
+ )
723
+ self.att_proj = nn.Linear(
724
+ config.hidden_size,
725
+ sum(self.fused_dims),
726
+ bias=config.qkv_bias,
727
+ )
728
+
729
+ # Layer norms.
730
+ self.k_norm: Optional[Molmo2RMSNorm] = None
731
+ self.q_norm: Optional[Molmo2RMSNorm] = None
732
+ self.qk_norm_type: Optional[str] = None
733
+ if config.use_qk_norm:
734
+ k_norm_size = (
735
+ config.head_dim
736
+ if config.qk_norm_type == "olmo" else
737
+ config.num_key_value_heads * config.head_dim
738
+ )
739
+ self.k_norm = Molmo2RMSNorm(k_norm_size, eps=config.layer_norm_eps)
740
+ q_norm_size = (
741
+ config.head_dim
742
+ if config.qk_norm_type == "olmo" else
743
+ config.num_attention_heads * config.head_dim
744
+ )
745
+ self.q_norm = Molmo2RMSNorm(q_norm_size, eps=config.layer_norm_eps)
746
+ self.qk_norm_type = config.qk_norm_type
747
+
748
+ self.attention_dropout = config.attention_dropout
749
+
750
+ self.attn_out = nn.Linear(
751
+ config.hidden_size,
752
+ config.hidden_size,
753
+ bias=False,
754
+ )
755
+
756
+ def forward(
757
+ self,
758
+ hidden_states: torch.Tensor,
759
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
760
+ attention_mask: Optional[torch.Tensor],
761
+ past_key_value: Optional[Cache] = None,
762
+ cache_position: Optional[torch.LongTensor] = None,
763
+ **kwargs: Unpack[FlashAttentionKwargs],
764
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
765
+ input_shape = hidden_states.shape[:-1]
766
+ hidden_shape = (*input_shape, -1, self.head_dim)
767
+
768
+ qkv = self.att_proj(hidden_states)
769
+ query_states, key_states, value_states = qkv.split(self.fused_dims, dim=-1)
770
+ value_states = value_states.view(hidden_shape)
771
+
772
+ # Optionally apply layer norm to keys and queries.
773
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type != "qwen3":
774
+ query_states = self.q_norm(query_states)
775
+ key_states = self.k_norm(key_states)
776
+
777
+ query_states = query_states.view(hidden_shape)
778
+ key_states = key_states.view(hidden_shape)
779
+ if self.q_norm is not None and self.k_norm is not None and self.qk_norm_type == "qwen3":
780
+ query_states = self.q_norm(query_states)
781
+ key_states = self.k_norm(key_states)
782
+ query_states = query_states.transpose(1, 2)
783
+ key_states = key_states.transpose(1, 2)
784
+ value_states = value_states.transpose(1, 2)
785
+
786
+ cos, sin = position_embeddings
787
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
788
+
789
+ if past_key_value is not None:
790
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
791
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
792
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
793
+
794
+ attention_interface: Callable = eager_attention_forward
795
+ if self.config._attn_implementation != "eager":
796
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
797
+ logger.warning_once(
798
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
799
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
800
+ )
801
+ else:
802
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
803
+
804
+ attn_output, attn_weights = attention_interface(
805
+ self,
806
+ query_states,
807
+ key_states,
808
+ value_states,
809
+ attention_mask,
810
+ dropout=0.0 if not self.training else self.attention_dropout,
811
+ scaling=self.scaling,
812
+ **kwargs,
813
+ )
814
+
815
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
816
+ attn_output = self.attn_out(attn_output)
817
+
818
+ return attn_output, attn_weights
819
+
820
+
821
+ class LanguageModelMLP(nn.Module):
822
+
823
+ def __init__(
824
+ self,
825
+ input_dim: int,
826
+ intermediate_size: int,
827
+ hidden_act: str,
828
+ device: Union[str, torch.device] = None,
829
+ ):
830
+ super().__init__()
831
+ self.ff_proj = nn.Linear(input_dim, intermediate_size * 2, bias=False, device=device)
832
+ self.ff_out = nn.Linear(intermediate_size, input_dim, bias=False, device=device)
833
+ self.act = ACT2FN[hidden_act]
834
+
835
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
836
+ x = self.ff_proj(x)
837
+ x, gate = x.chunk(2, dim=-1)
838
+ x = self.act(gate) * x
839
+ x = self.ff_out(x)
840
+ return x
841
+
842
+
843
+ class Molmo2DecoderLayer(GradientCheckpointingLayer):
844
+
845
+ def __init__(
846
+ self,
847
+ config: Molmo2LlmConfig,
848
+ layer_idx: Optional[int] = None,
849
+ device: Union[str, torch.device] = None
850
+ ):
851
+ super().__init__()
852
+ self.config = config
853
+
854
+ self.self_attn = Molmo2Attention(config, layer_idx)
855
+ self.attn_norm = Molmo2RMSNorm(
856
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
857
+ self.dropout = nn.Dropout(config.residual_dropout)
858
+ self.mlp = LanguageModelMLP(
859
+ config.hidden_size, config.intermediate_size, config.hidden_act, device=device)
860
+ self.ff_norm = Molmo2RMSNorm(
861
+ config.hidden_size, eps=config.layer_norm_eps, device=device)
862
+
863
+ def forward(
864
+ self,
865
+ hidden_states: torch.Tensor,
866
+ attention_mask: Optional[torch.Tensor] = None,
867
+ position_ids: Optional[torch.LongTensor] = None,
868
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
869
+ output_attentions: Optional[bool] = False,
870
+ use_cache: Optional[bool] = False,
871
+ cache_position: Optional[torch.LongTensor] = None,
872
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
873
+ **kwargs,
874
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
875
+ """
876
+ Args:
877
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
878
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
879
+ `(batch, sequence_length)` where padding elements are indicated by 0.
880
+ output_attentions (`bool`, *optional*):
881
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
882
+ returned tensors for more detail.
883
+ use_cache (`bool`, *optional*):
884
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
885
+ (see `past_key_values`).
886
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
887
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
888
+ Indices depicting the position of the input sequence tokens in the sequence.
889
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
890
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
891
+ with `head_dim` being the embedding dimension of each attention head.
892
+ kwargs (`dict`, *optional*):
893
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
894
+ into the model
895
+ """
896
+
897
+ residual = hidden_states
898
+ hidden_states = self.attn_norm(hidden_states)
899
+
900
+ # Self Attention
901
+ hidden_states, self_attn_weights = self.self_attn(
902
+ hidden_states=hidden_states,
903
+ attention_mask=attention_mask,
904
+ position_ids=position_ids,
905
+ past_key_value=past_key_value,
906
+ output_attentions=output_attentions,
907
+ use_cache=use_cache,
908
+ cache_position=cache_position,
909
+ position_embeddings=position_embeddings,
910
+ )
911
+
912
+ hidden_states = residual + self.dropout(hidden_states)
913
+
914
+ # Fully Connected
915
+ residual = hidden_states
916
+ hidden_states = self.ff_norm(hidden_states)
917
+ hidden_states = self.mlp(hidden_states)
918
+
919
+ hidden_states = residual + self.dropout(hidden_states)
920
+
921
+ outputs = (hidden_states,)
922
+
923
+ if output_attentions:
924
+ outputs += (self_attn_weights,)
925
+
926
+ return outputs
927
+
928
+
929
+ class Molmo2PostNormDecoderLayer(Molmo2DecoderLayer):
930
+ def forward(
931
+ self,
932
+ hidden_states: torch.Tensor,
933
+ attention_mask: Optional[torch.Tensor] = None,
934
+ position_ids: Optional[torch.LongTensor] = None,
935
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
936
+ output_attentions: Optional[bool] = False,
937
+ use_cache: Optional[bool] = False,
938
+ cache_position: Optional[torch.LongTensor] = None,
939
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
940
+ **kwargs,
941
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
942
+ """
943
+ Args:
944
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
945
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
946
+ `(batch, sequence_length)` where padding elements are indicated by 0.
947
+ output_attentions (`bool`, *optional*):
948
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
949
+ returned tensors for more detail.
950
+ use_cache (`bool`, *optional*):
951
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
952
+ (see `past_key_values`).
953
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
954
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
955
+ Indices depicting the position of the input sequence tokens in the sequence.
956
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
957
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
958
+ with `head_dim` being the embedding dimension of each attention head.
959
+ kwargs (`dict`, *optional*):
960
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
961
+ into the model
962
+ """
963
+
964
+ residual = hidden_states
965
+
966
+ # Self Attention
967
+ hidden_states, self_attn_weights = self.self_attn(
968
+ hidden_states=hidden_states,
969
+ attention_mask=attention_mask,
970
+ position_ids=position_ids,
971
+ past_key_value=past_key_value,
972
+ output_attentions=output_attentions,
973
+ use_cache=use_cache,
974
+ cache_position=cache_position,
975
+ position_embeddings=position_embeddings,
976
+ )
977
+ hidden_states = self.attn_norm(hidden_states)
978
+
979
+ hidden_states = residual + self.dropout(hidden_states)
980
+
981
+ # Fully Connected
982
+ residual = hidden_states
983
+ hidden_states = self.mlp(hidden_states)
984
+ hidden_states = self.ff_norm(hidden_states)
985
+
986
+ hidden_states = residual + self.dropout(hidden_states)
987
+
988
+ outputs = (hidden_states,)
989
+
990
+ if output_attentions:
991
+ outputs += (self_attn_weights,)
992
+
993
+ return outputs
994
+
995
+
996
+ class Molmo2Embedding(nn.Module):
997
+ def __init__(
998
+ self,
999
+ num_embeddings: int,
1000
+ num_new_embeddings: int,
1001
+ features: int,
1002
+ device: Union[str, torch.device] = None,
1003
+ ):
1004
+ super().__init__()
1005
+ self.embedding = nn.Parameter(
1006
+ torch.zeros(num_embeddings, features, device=device),
1007
+ )
1008
+ self.new_embedding = nn.Parameter(
1009
+ torch.zeros(num_new_embeddings, features, device=device),
1010
+ )
1011
+
1012
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1013
+ return F.embedding(x, torch.cat([self.embedding, self.new_embedding], dim=0))
1014
+
1015
+
1016
+ MOLMO2_TEXT_ONLY_INPUTS_DOCSTRING = r"""
1017
+ Args:
1018
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1019
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1020
+ it.
1021
+
1022
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1023
+ [`PreTrainedTokenizer.__call__`] for details.
1024
+
1025
+ [What are input IDs?](../glossary#input-ids)
1026
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1027
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1028
+
1029
+ - 1 for tokens that are **not masked**,
1030
+ - 0 for tokens that are **masked**.
1031
+
1032
+ [What are attention masks?](../glossary#attention-mask)
1033
+
1034
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1035
+ [`PreTrainedTokenizer.__call__`] for details.
1036
+
1037
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1038
+ `past_key_values`).
1039
+
1040
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1041
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1042
+ information on the default strategy.
1043
+
1044
+ - 1 indicates the head is **not masked**,
1045
+ - 0 indicates the head is **masked**.
1046
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1047
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1048
+ config.n_positions - 1]`.
1049
+
1050
+ [What are position IDs?](../glossary#position-ids)
1051
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1052
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1053
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1054
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1055
+
1056
+ Two formats are allowed:
1057
+ - a [`~cache_utils.Cache`] instance, see our
1058
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
1059
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1060
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1061
+ cache format.
1062
+
1063
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1064
+ legacy cache format will be returned.
1065
+
1066
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1067
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1068
+ of shape `(batch_size, sequence_length)`.
1069
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1070
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1071
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1072
+ model's internal embedding lookup matrix.
1073
+ use_cache (`bool`, *optional*):
1074
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1075
+ `past_key_values`).
1076
+ output_attentions (`bool`, *optional*):
1077
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1078
+ tensors for more detail.
1079
+ output_hidden_states (`bool`, *optional*):
1080
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1081
+ more detail.
1082
+ return_dict (`bool`, *optional*):
1083
+ Whether or not to return a [`CausalLMOutputWithPast`] instead of a plain tuple.
1084
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1085
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1086
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1087
+ the complete sequence length.
1088
+ """
1089
+
1090
+
1091
+ @add_start_docstrings(
1092
+ "The bare Molmo2 text-only model outputting raw hidden-states without any specific head on top.",
1093
+ MOLMO_START_DOCSTRING,
1094
+ )
1095
+ class Molmo2Llm(Molmo2PreTrainedModel):
1096
+ def __init__(self, config: Molmo2LlmConfig):
1097
+ super().__init__(config)
1098
+ self.config = config
1099
+ if config.additional_vocab_size is not None:
1100
+ self.wte = Molmo2Embedding(
1101
+ config.vocab_size,
1102
+ config.additional_vocab_size,
1103
+ config.hidden_size,
1104
+ )
1105
+ else:
1106
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
1107
+ self.emb_drop = nn.Dropout(config.embedding_dropout)
1108
+ decoder_layer = Molmo2PostNormDecoderLayer if config.norm_after else Molmo2DecoderLayer
1109
+ self.blocks = nn.ModuleList(
1110
+ [decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1111
+ )
1112
+ self.ln_f = Molmo2RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
1113
+ self.rotary_emb = Molmo2RotaryEmbedding(config)
1114
+ self.gradient_checkpointing = False
1115
+
1116
+ # Initialize weights and apply final processing
1117
+ self.post_init()
1118
+
1119
+ def get_input_embeddings(self) -> torch.nn.Module:
1120
+ return self.wte
1121
+
1122
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1123
+ self.wte = value
1124
+
1125
+ @can_return_tuple
1126
+ def forward(
1127
+ self,
1128
+ input_ids: Optional[torch.LongTensor] = None,
1129
+ attention_mask: Optional[torch.Tensor] = None,
1130
+ position_ids: Optional[torch.LongTensor] = None,
1131
+ past_key_values: Optional[Cache] = None,
1132
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1133
+ use_cache: Optional[bool] = None,
1134
+ output_attentions: Optional[bool] = None,
1135
+ output_hidden_states: Optional[bool] = None,
1136
+ cache_position: Optional[torch.LongTensor] = None,
1137
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
1138
+ ) -> BaseModelOutputWithPast:
1139
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1140
+ output_hidden_states = (
1141
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1142
+ )
1143
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1144
+
1145
+ if (input_ids is None) ^ (inputs_embeds is not None):
1146
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1147
+
1148
+ if self.gradient_checkpointing and self.training and use_cache:
1149
+ logger.warning_once(
1150
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1151
+ )
1152
+ use_cache = False
1153
+
1154
+ # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
1155
+ if not isinstance(past_key_values, (type(None), Cache)):
1156
+ raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
1157
+
1158
+ if inputs_embeds is None:
1159
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1160
+ inputs_embeds = self.wte(input_ids)
1161
+
1162
+ if use_cache and past_key_values is None:
1163
+ past_key_values = DynamicCache()
1164
+
1165
+ if cache_position is None:
1166
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1167
+ cache_position = torch.arange(
1168
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1169
+ )
1170
+
1171
+ if position_ids is None:
1172
+ position_ids = cache_position.unsqueeze(0)
1173
+
1174
+ causal_mask = self._update_causal_mask(
1175
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
1176
+ )
1177
+
1178
+ hidden_states = inputs_embeds
1179
+
1180
+ # create position embeddings to be shared across the decoder layers
1181
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1182
+
1183
+ # decoder layers
1184
+ all_hidden_states = () if output_hidden_states else None
1185
+ all_self_attns = () if output_attentions else None
1186
+
1187
+ for decoder_block in self.blocks[: self.config.num_hidden_layers]:
1188
+ if output_hidden_states:
1189
+ all_hidden_states += (hidden_states,)
1190
+
1191
+ layer_outputs = decoder_block(
1192
+ hidden_states,
1193
+ attention_mask=causal_mask,
1194
+ position_ids=position_ids,
1195
+ past_key_value=past_key_values,
1196
+ output_attentions=output_attentions,
1197
+ use_cache=use_cache,
1198
+ cache_position=cache_position,
1199
+ position_embeddings=position_embeddings,
1200
+ **flash_attn_kwargs,
1201
+ )
1202
+
1203
+ hidden_states = layer_outputs[0]
1204
+
1205
+ if output_attentions:
1206
+ all_self_attns += (layer_outputs[1],)
1207
+
1208
+ hidden_states = self.ln_f(hidden_states)
1209
+
1210
+ # add hidden states from the last decoder layer
1211
+ if output_hidden_states:
1212
+ all_hidden_states += (hidden_states,)
1213
+
1214
+ return BaseModelOutputWithPast(
1215
+ last_hidden_state=hidden_states,
1216
+ past_key_values=past_key_values if use_cache else None,
1217
+ hidden_states=all_hidden_states,
1218
+ attentions=all_self_attns,
1219
+ )
1220
+
1221
+ def _update_causal_mask(
1222
+ self,
1223
+ attention_mask: Union[torch.Tensor, "BlockMask"],
1224
+ input_tensor: torch.Tensor,
1225
+ cache_position: torch.Tensor,
1226
+ past_key_values: Cache,
1227
+ output_attentions: bool = False,
1228
+ ):
1229
+ if self.config._attn_implementation == "flash_attention_2":
1230
+ if attention_mask is not None and (attention_mask == 0.0).any():
1231
+ return attention_mask
1232
+ return None
1233
+ if self.config._attn_implementation == "flex_attention":
1234
+ if isinstance(attention_mask, torch.Tensor):
1235
+ attention_mask = make_flex_block_causal_mask(attention_mask)
1236
+ return attention_mask
1237
+
1238
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1239
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1240
+ # to infer the attention mask.
1241
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1242
+ using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
1243
+
1244
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1245
+ if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions:
1246
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1247
+ attention_mask,
1248
+ inputs_embeds=input_tensor,
1249
+ past_key_values_length=past_seen_tokens,
1250
+ is_training=self.training,
1251
+ ):
1252
+ return None
1253
+
1254
+ dtype = input_tensor.dtype
1255
+ sequence_length = input_tensor.shape[1]
1256
+ if using_compilable_cache:
1257
+ target_length = past_key_values.get_max_cache_shape()
1258
+ else:
1259
+ target_length = (
1260
+ attention_mask.shape[-1]
1261
+ if isinstance(attention_mask, torch.Tensor)
1262
+ else past_seen_tokens + sequence_length + 1
1263
+ )
1264
+
1265
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1266
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
1267
+ attention_mask,
1268
+ sequence_length=sequence_length,
1269
+ target_length=target_length,
1270
+ dtype=dtype,
1271
+ cache_position=cache_position,
1272
+ batch_size=input_tensor.shape[0],
1273
+ )
1274
+
1275
+ if (
1276
+ self.config._attn_implementation == "sdpa"
1277
+ and attention_mask is not None
1278
+ and attention_mask.device.type in ["cuda", "xpu", "npu"]
1279
+ and not output_attentions
1280
+ ):
1281
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1282
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1283
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1284
+ min_dtype = torch.finfo(dtype).min
1285
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1286
+
1287
+ return causal_mask
1288
+
1289
+ @staticmethod
1290
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1291
+ attention_mask: torch.Tensor,
1292
+ sequence_length: int,
1293
+ target_length: int,
1294
+ dtype: torch.dtype,
1295
+ cache_position: torch.Tensor,
1296
+ batch_size: int,
1297
+ **kwargs,
1298
+ ):
1299
+ """
1300
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1301
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1302
+
1303
+ Args:
1304
+ attention_mask (`torch.Tensor`):
1305
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
1306
+ `(batch_size, 1, query_length, key_value_length)`.
1307
+ sequence_length (`int`):
1308
+ The sequence length being processed.
1309
+ target_length (`int`):
1310
+ The target length: when generating with static cache, the mask should be as long as the static cache,
1311
+ to account for the 0 padding, the part of the cache that is not filled yet.
1312
+ dtype (`torch.dtype`):
1313
+ The dtype to use for the 4D attention mask.
1314
+ cache_position (`torch.Tensor`):
1315
+ Indices depicting the position of the input sequence tokens in the sequence.
1316
+ batch_size (`torch.Tensor`):
1317
+ Batch size.
1318
+ """
1319
+ if attention_mask is not None and attention_mask.dim() == 4:
1320
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1321
+ causal_mask = attention_mask
1322
+ else:
1323
+ min_dtype = torch.finfo(dtype).min
1324
+ causal_mask = torch.full(
1325
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
1326
+ )
1327
+ if sequence_length != 1:
1328
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1329
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
1330
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1331
+ if attention_mask is not None:
1332
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1333
+ mask_length = attention_mask.shape[-1]
1334
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
1335
+ causal_mask.device
1336
+ )
1337
+ padding_mask = padding_mask == 0
1338
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1339
+ padding_mask, min_dtype
1340
+ )
1341
+
1342
+ return causal_mask
1343
+
1344
+
1345
+ @add_start_docstrings(
1346
+ "The Molmo2 text-only model which consists of a language model + lm head.",
1347
+ MOLMO_START_DOCSTRING,
1348
+ )
1349
+ class Molmo2ForCausalLM(Molmo2PreTrainedModel, GenerationMixin):
1350
+ _tied_weights_keys = [] # Weights are not tied
1351
+ _tp_plan = {"lm_head": "colwise_rep"}
1352
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
1353
+ base_model_prefix = "model"
1354
+
1355
+ def __init__(self, config: Molmo2LlmConfig):
1356
+ super().__init__(config)
1357
+ self.model = Molmo2Llm(config)
1358
+ self.vocab_size = config.vocab_size
1359
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1360
+
1361
+ # Initialize weights and apply final processing
1362
+ self.post_init()
1363
+
1364
+ def get_input_embeddings(self) -> torch.nn.Module:
1365
+ return self.model.wte
1366
+
1367
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1368
+ self.model.wte = value
1369
+
1370
+ def get_output_embeddings(self):
1371
+ return self.lm_head
1372
+
1373
+ def set_output_embeddings(self, value: torch.nn.Module) -> None:
1374
+ self.lm_head = value
1375
+
1376
+ def set_decoder(self, decoder: torch.nn.Module) -> None:
1377
+ self.model = decoder
1378
+
1379
+ def get_decoder(self) -> torch.nn.Module:
1380
+ return self.model
1381
+
1382
+ @can_return_tuple
1383
+ @add_start_docstrings_to_model_forward(MOLMO2_TEXT_ONLY_INPUTS_DOCSTRING)
1384
+ def forward(
1385
+ self,
1386
+ input_ids: Optional[torch.LongTensor] = None,
1387
+ attention_mask: Optional[torch.Tensor] = None,
1388
+ position_ids: Optional[torch.LongTensor] = None,
1389
+ past_key_values: Optional[Cache] = None,
1390
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1391
+ labels: Optional[torch.LongTensor] = None,
1392
+ use_cache: Optional[bool] = None,
1393
+ output_attentions: Optional[bool] = None,
1394
+ output_hidden_states: Optional[bool] = None,
1395
+ cache_position: Optional[torch.LongTensor] = None,
1396
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1397
+ **kwargs,
1398
+ ) -> CausalLMOutputWithPast:
1399
+ r"""
1400
+ ```python
1401
+ >>> from transformers import AutoTokenizer, Molmo2ForCausalLM
1402
+
1403
+ >>> model = Molmo2ForCausalLM.from_pretrained("...")
1404
+ >>> tokenizer = AutoTokenizer.from_pretrained("...")
1405
+
1406
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1407
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1408
+
1409
+ >>> # Generate
1410
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1411
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1412
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1413
+ ```"""
1414
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1415
+ output_hidden_states = (
1416
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1417
+ )
1418
+
1419
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1420
+ outputs: BaseModelOutputWithPast = self.model(
1421
+ input_ids=input_ids,
1422
+ attention_mask=attention_mask,
1423
+ position_ids=position_ids,
1424
+ past_key_values=past_key_values,
1425
+ inputs_embeds=inputs_embeds,
1426
+ use_cache=use_cache,
1427
+ output_attentions=output_attentions,
1428
+ output_hidden_states=output_hidden_states,
1429
+ cache_position=cache_position,
1430
+ **kwargs,
1431
+ )
1432
+
1433
+ hidden_states = outputs.last_hidden_state
1434
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1435
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1436
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1437
+
1438
+ loss = None
1439
+ if labels is not None:
1440
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
1441
+
1442
+ return CausalLMOutputWithPast(
1443
+ loss=loss,
1444
+ logits=logits,
1445
+ past_key_values=outputs.past_key_values,
1446
+ hidden_states=outputs.hidden_states,
1447
+ attentions=outputs.attentions,
1448
+ )
1449
+
1450
+
1451
+ MOLMO2_INPUTS_DOCSTRING = r"""
1452
+ Args:
1453
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1454
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1455
+ it.
1456
+
1457
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1458
+ [`PreTrainedTokenizer.__call__`] for details.
1459
+
1460
+ [What are input IDs?](../glossary#input-ids)
1461
+ images (`torch.FloatTensor` of shape `(batch_size, n_crops, 27*27, 3*14*14)`, *optional*):
1462
+ The input crops in with pixel values between 0 and 1 and normalized with SigLIP2 mean/std
1463
+
1464
+ Each crop contains 27x27 patches with 14*14*3 pixel values
1465
+ image_masks (`torch.FloatTensor` of shape `(batch_size, n_crops, n_patches, n_features)`, *optional*):
1466
+ Image masks showing what percent of each patch is paddding
1467
+ pooled_patches_idx (`torch.LongTensor` of shape `(batch_size, n_image_tokens, n_pooled_patches)`):
1468
+ For each patch_id tokens in `input_ids`, the indices of the patches in `images`
1469
+ to pool for that token, masked with -1
1470
+ means ignore the patch.
1471
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1472
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1473
+
1474
+ - 1 for tokens that are **not masked**,
1475
+ - 0 for tokens that are **masked**.
1476
+
1477
+ [What are attention masks?](../glossary#attention-mask)
1478
+
1479
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1480
+ [`PreTrainedTokenizer.__call__`] for details.
1481
+
1482
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1483
+ `past_key_values`).
1484
+
1485
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1486
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1487
+ information on the default strategy.
1488
+
1489
+ - 1 indicates the head is **not masked**,
1490
+ - 0 indicates the head is **masked**.
1491
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1492
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1493
+ config.n_positions - 1]`.
1494
+
1495
+ [What are position IDs?](../glossary#position-ids)
1496
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1497
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1498
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1499
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1500
+
1501
+ Two formats are allowed:
1502
+ - a [`~cache_utils.Cache`] instance, see our
1503
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
1504
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1505
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1506
+ cache format.
1507
+
1508
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1509
+ legacy cache format will be returned.
1510
+
1511
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1512
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1513
+ of shape `(batch_size, sequence_length)`.
1514
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1515
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1516
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1517
+ model's internal embedding lookup matrix.
1518
+ use_cache (`bool`, *optional*):
1519
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1520
+ `past_key_values`).
1521
+ output_attentions (`bool`, *optional*):
1522
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1523
+ tensors for more detail.
1524
+ output_hidden_states (`bool`, *optional*):
1525
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1526
+ more detail.
1527
+ return_dict (`bool`, *optional*):
1528
+ Whether or not to return a [`Molmo2CausalLMOutputWithPast`] instead of a plain tuple.
1529
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1530
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1531
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1532
+ the complete sequence length.
1533
+ """
1534
+
1535
+
1536
+ @add_start_docstrings(
1537
+ "The bare Molmo2 model outputting raw hidden-states without any specific head on top.",
1538
+ MOLMO_START_DOCSTRING,
1539
+ )
1540
+ class Molmo2Model(Molmo2PreTrainedModel):
1541
+ _checkpoint_conversion_mapping = {}
1542
+
1543
+ def __init__(self, config: Molmo2Config):
1544
+ super().__init__(config)
1545
+ self.transformer: Molmo2Llm = Molmo2Llm(config.llm_config)
1546
+ self.vision_backbone: Optional[Molmo2VisionBackbone] = None
1547
+ if config.vit_config is not None and config.adapter_config is not None:
1548
+ self.vision_backbone = Molmo2VisionBackbone(config.vit_config, config.adapter_config)
1549
+
1550
+ # Initialize weights and apply final processing
1551
+ self.post_init()
1552
+
1553
+ def get_input_embeddings(self) -> torch.nn.Module:
1554
+ return self.transformer.wte
1555
+
1556
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1557
+ self.transformer.wte = value
1558
+
1559
+ @property
1560
+ def device(self) -> torch.device:
1561
+ return self.transformer.ln_f.weight.device
1562
+
1563
+ def build_input_embeddings(
1564
+ self,
1565
+ input_ids: torch.LongTensor,
1566
+ images: Optional[torch.FloatTensor] = None, # image inputs
1567
+ image_masks: Optional[torch.Tensor] = None,
1568
+ pooled_patches_idx: Optional[torch.LongTensor] = None,
1569
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
1570
+
1571
+ # Get embeddings of input.
1572
+ # shape: (batch_size, seq_len, d_model)
1573
+ input_ids = input_ids * (input_ids != -1).to(input_ids.dtype)
1574
+ x = self.transformer.wte(input_ids)
1575
+
1576
+ image_features: Optional[torch.FloatTensor] = None
1577
+ if images is not None:
1578
+ image_features = self.vision_backbone(images, pooled_patches_idx)
1579
+ is_image_patch = input_ids.view(-1) == self.config.image_patch_id
1580
+ assert is_image_patch.sum() == len(image_features)
1581
+ x.view(-1, x.shape[-1])[is_image_patch] += image_features
1582
+
1583
+ # shape: (batch_size, seq_len, d_model)
1584
+ x = self.transformer.emb_drop(x) # type: ignore
1585
+
1586
+ return x, image_features
1587
+
1588
+ @can_return_tuple
1589
+ def forward(
1590
+ self,
1591
+ input_ids: Optional[torch.LongTensor] = None,
1592
+ images: Optional[torch.FloatTensor] = None,
1593
+ image_masks: Optional[torch.Tensor] = None,
1594
+ pooled_patches_idx: Optional[torch.Tensor] = None,
1595
+ attention_mask: Optional[torch.Tensor] = None,
1596
+ position_ids: Optional[torch.Tensor] = None,
1597
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1598
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1599
+ use_cache: Optional[bool] = None,
1600
+ output_attentions: Optional[bool] = None,
1601
+ output_hidden_states: Optional[bool] = None,
1602
+ cache_position: Optional[torch.LongTensor] = None,
1603
+ ) -> Union[Tuple, Molmo2ModelOutputWithPast]:
1604
+
1605
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1606
+ output_hidden_states = (
1607
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1608
+ )
1609
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1610
+
1611
+ if (input_ids is None) ^ (inputs_embeds is not None):
1612
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1613
+
1614
+ if images is not None and inputs_embeds is not None:
1615
+ raise ValueError(
1616
+ "You cannot specify both images and inputs_embeds at the same time."
1617
+ )
1618
+
1619
+ if inputs_embeds is None:
1620
+ inputs_embeds, image_features = self.build_input_embeddings(
1621
+ input_ids, images, image_masks, pooled_patches_idx)
1622
+
1623
+ outputs = self.transformer(
1624
+ attention_mask=attention_mask,
1625
+ position_ids=position_ids,
1626
+ past_key_values=past_key_values,
1627
+ inputs_embeds=inputs_embeds,
1628
+ use_cache=use_cache,
1629
+ output_attentions=output_attentions,
1630
+ output_hidden_states=output_hidden_states,
1631
+ cache_position=cache_position,
1632
+ )
1633
+
1634
+ return Molmo2ModelOutputWithPast(
1635
+ last_hidden_state=outputs.last_hidden_state,
1636
+ past_key_values=outputs.past_key_values,
1637
+ hidden_states=outputs.hidden_states,
1638
+ attentions=outputs.attentions,
1639
+ image_hidden_states=image_features if images is not None else None,
1640
+ )
1641
+
1642
+ @add_start_docstrings(
1643
+ "The Molmo2 model which consists of a vision backbone and a language model + lm head.",
1644
+ MOLMO_START_DOCSTRING,
1645
+ )
1646
+ class Molmo2ForConditionalGeneration(Molmo2PreTrainedModel, GenerationMixin):
1647
+ _checkpoint_conversion_mapping = {}
1648
+ _tied_weights_keys = [] # Weights are not tied
1649
+ config_class = Molmo2Config
1650
+
1651
+ def __init__(self, config: Molmo2Config):
1652
+ super().__init__(config)
1653
+
1654
+ self.model = Molmo2Model(config)
1655
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1656
+ self.vocab_size = config.vocab_size
1657
+
1658
+ # Initialize weights and apply final processing
1659
+ self.post_init()
1660
+
1661
+ def get_input_embeddings(self) -> torch.nn.Module:
1662
+ return self.model.transformer.wte
1663
+
1664
+ def set_input_embeddings(self, value: torch.nn.Module) -> None:
1665
+ self.model.transformer.wte = value
1666
+
1667
+ def get_output_embeddings(self):
1668
+ self.lm_head
1669
+
1670
+ def set_output_embeddings(self, value: torch.nn.Module) -> None:
1671
+ self.lm_head = value
1672
+
1673
+ # Make modules available throught conditional class for BC
1674
+ @property
1675
+ def language_model(self) -> torch.nn.Module:
1676
+ return self.model.transformer
1677
+
1678
+ @property
1679
+ def vision_backbone(self) -> torch.nn.Module:
1680
+ return self.model.vision_backbone
1681
+
1682
+ @can_return_tuple
1683
+ @add_start_docstrings_to_model_forward(MOLMO2_INPUTS_DOCSTRING)
1684
+ def forward(
1685
+ self,
1686
+ input_ids: torch.LongTensor = None,
1687
+ images: Optional[torch.Tensor] = None,
1688
+ image_masks: Optional[torch.Tensor] = None,
1689
+ pooled_patches_idx: Optional[torch.Tensor] = None,
1690
+ attention_mask: Optional[torch.Tensor] = None,
1691
+ position_ids: Optional[torch.LongTensor] = None,
1692
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1693
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1694
+ labels: Optional[torch.LongTensor] = None,
1695
+ use_cache: Optional[bool] = None,
1696
+ output_attentions: Optional[bool] = None,
1697
+ output_hidden_states: Optional[bool] = None,
1698
+ cache_position: Optional[torch.LongTensor] = None,
1699
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1700
+ **kwargs,
1701
+ ) -> Union[Tuple, Molmo2CausalLMOutputWithPast]:
1702
+ r"""
1703
+ ```python
1704
+ >>> from PIL import Image
1705
+ >>> import requests
1706
+ >>> from transformers import AutoProcessor, Molmo2ForConditionalGeneration
1707
+
1708
+ >>> model = Molmo2ForConditionalGeneration.from_pretrained("...")
1709
+ >>> processor = AutoProcessor.from_pretrained("...")
1710
+
1711
+ >>> prompt = "What's the content of the image?"
1712
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1713
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1714
+
1715
+ >>> inputs = processor(images=image, text=prompt, apply_chat_template=True, return_tensors="pt")
1716
+
1717
+ >>> # Generate
1718
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=15)
1719
+ >>> generated_tokens = generated_ids[:, inputs['input_ids'].size(1):]
1720
+ >>> processor.batch_decode(generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1721
+ "The image features a busy city street with a stop sign prominently displayed"
1722
+ ```"""
1723
+ outputs = self.model(
1724
+ input_ids=input_ids,
1725
+ images=images,
1726
+ image_masks=image_masks,
1727
+ pooled_patches_idx=pooled_patches_idx,
1728
+ attention_mask=attention_mask,
1729
+ position_ids=position_ids,
1730
+ past_key_values=past_key_values,
1731
+ inputs_embeds=inputs_embeds,
1732
+ use_cache=use_cache,
1733
+ output_attentions=output_attentions,
1734
+ output_hidden_states=output_hidden_states,
1735
+ cache_position=cache_position,
1736
+ )
1737
+
1738
+ hidden_states = outputs.last_hidden_state
1739
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1740
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1741
+
1742
+ loss = None
1743
+ if labels is not None:
1744
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size)
1745
+
1746
+ return Molmo2CausalLMOutputWithPast(
1747
+ loss=loss,
1748
+ logits=logits,
1749
+ past_key_values=outputs.past_key_values,
1750
+ hidden_states=outputs.hidden_states,
1751
+ attentions=outputs.attentions,
1752
+ image_hidden_states=outputs.image_hidden_states,
1753
+ )
1754
+
1755
+ def prepare_inputs_for_generation(
1756
+ self,
1757
+ input_ids: torch.LongTensor,
1758
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1759
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1760
+ images: Optional[torch.FloatTensor] = None,
1761
+ image_masks: Optional[torch.Tensor] = None,
1762
+ pooled_patches_idx: Optional[torch.Tensor] = None,
1763
+ attention_mask: Optional[torch.Tensor] = None,
1764
+ cache_position: Optional[torch.LongTensor] = None,
1765
+ logits_to_keep: Optional[Union[int, torch.Tensor]] = None,
1766
+ **kwargs,
1767
+ ):
1768
+
1769
+ model_inputs = super().prepare_inputs_for_generation(
1770
+ input_ids,
1771
+ past_key_values=past_key_values,
1772
+ inputs_embeds=inputs_embeds,
1773
+ attention_mask=attention_mask,
1774
+ cache_position=cache_position,
1775
+ logits_to_keep=logits_to_keep,
1776
+ **kwargs,
1777
+ )
1778
+
1779
+ if cache_position[0] == 0:
1780
+ model_inputs["images"] = images
1781
+ model_inputs["pooled_patches_idx"] = pooled_patches_idx
1782
+ model_inputs["image_masks"] = image_masks
1783
+
1784
+ return model_inputs
1785
+
1786
+ def _update_model_kwargs_for_generation(
1787
+ self,
1788
+ outputs: ModelOutput,
1789
+ model_kwargs: Dict[str, Any],
1790
+ is_encoder_decoder: bool = False,
1791
+ num_new_tokens: int = 1,
1792
+ ) -> Dict[str, Any]:
1793
+ if model_kwargs["use_cache"] and "images" in model_kwargs:
1794
+ # After the first step, no long pass the images into forward since the images tokens
1795
+ # are already cached
1796
+ for k in ["images", "image_masks", "pooled_patches_idx"]:
1797
+ del model_kwargs[k]
1798
+ return super()._update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder, num_new_tokens)
1799
+
1800
+ @staticmethod
1801
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1802
+ attention_mask: torch.Tensor,
1803
+ sequence_length: int,
1804
+ target_length: int,
1805
+ dtype: torch.dtype,
1806
+ cache_position: torch.Tensor,
1807
+ batch_size: int,
1808
+ **kwargs,
1809
+ ):
1810
+ """
1811
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1812
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1813
+
1814
+ Args:
1815
+ attention_mask (`torch.Tensor`):
1816
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
1817
+ `(batch_size, 1, query_length, key_value_length)`.
1818
+ sequence_length (`int`):
1819
+ The sequence length being processed.
1820
+ target_length (`int`):
1821
+ The target length: when generating with static cache, the mask should be as long as the static cache,
1822
+ to account for the 0 padding, the part of the cache that is not filled yet.
1823
+ dtype (`torch.dtype`):
1824
+ The dtype to use for the 4D attention mask.
1825
+ cache_position (`torch.Tensor`):
1826
+ Indices depicting the position of the input sequence tokens in the sequence.
1827
+ batch_size (`torch.Tensor`):
1828
+ Batch size.
1829
+ """
1830
+ if attention_mask is not None and attention_mask.dim() == 4:
1831
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1832
+ causal_mask = attention_mask
1833
+ else:
1834
+ min_dtype = torch.finfo(dtype).min
1835
+ causal_mask = torch.full(
1836
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
1837
+ )
1838
+ if sequence_length != 1:
1839
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1840
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
1841
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1842
+ if attention_mask is not None:
1843
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1844
+ mask_length = attention_mask.shape[-1]
1845
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
1846
+ causal_mask.device
1847
+ )
1848
+ padding_mask = padding_mask == 0
1849
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1850
+ padding_mask, min_dtype
1851
+ )
1852
+
1853
+ return causal_mask
1854
+
1855
+
1856
+ # Always register for multi-modal features
1857
+ AutoModelForImageTextToText.register(Molmo2Config, Molmo2ForConditionalGeneration)
1858
+ AutoModelForCausalLM.register(Molmo2LlmConfig, Molmo2ForCausalLM)
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "image_processing_molmo2.Molmo2ImageProcessor",
4
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
5
+ },
6
+ "base_image_input_size": [
7
+ 378,
8
+ 378
9
+ ],
10
+ "crop_mode": "overlap-and-resize-c2",
11
+ "do_convert_rgb": true,
12
+ "do_pad": true,
13
+ "image_patch_size": 14,
14
+ "image_pooling_h": 2,
15
+ "image_pooling_w": 2,
16
+ "image_processor_type": "Molmo2ImageProcessor",
17
+ "max_crops": 8,
18
+ "max_multi_image_crops": 8,
19
+ "normalize_mode": "siglip",
20
+ "overlap_margins": [
21
+ 4,
22
+ 4
23
+ ],
24
+ "pad_value": 0.0,
25
+ "processor_class": "Molmo2Processor",
26
+ "resize_mode": "siglip"
27
+ }
processing_molmo2.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Processor class for Molmo2.
3
+ """
4
+ from typing import List, Optional, Union, Dict, Tuple
5
+
6
+ import PIL
7
+ from PIL import ImageFile, ImageOps
8
+
9
+ try:
10
+ from typing import Unpack
11
+ except ImportError:
12
+ from typing_extensions import Unpack
13
+
14
+ import numpy as np
15
+ import torch
16
+
17
+ from transformers.image_utils import ImageInput
18
+ from transformers.processing_utils import (
19
+ ProcessingKwargs,
20
+ ProcessorMixin,
21
+ )
22
+ from transformers.feature_extraction_utils import BatchFeature
23
+ from transformers.tokenization_utils_base import TextInput, PreTokenizedInput
24
+ from transformers.utils import logging
25
+
26
+ from transformers import AutoTokenizer
27
+ from .image_processing_molmo2 import Molmo2ImagesKwargs, Molmo2ImageProcessor
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ # Special tokens, these should be present in any tokenizer we use since the preprocessor uses them
34
+ IMAGE_PATCH_TOKEN = f"<im_patch>" # Where to insert high-res tokens
35
+ IMAGE_LOW_RES_TOKEN = f"<im_low>" # Where to insert low-res tokens
36
+ IM_START_TOKEN = f"<im_start>"
37
+ IM_END_TOKEN = f"<im_end>"
38
+ IM_COL_TOKEN = f"<im_col>"
39
+ IMAGE_PROMPT = "<|image|>"
40
+
41
+ EXTRA_TOKENS = (IM_START_TOKEN, IM_END_TOKEN, IMAGE_PATCH_TOKEN,
42
+ IM_COL_TOKEN, IMAGE_PROMPT, IMAGE_LOW_RES_TOKEN)
43
+
44
+
45
+ DEMO_STYLES = [
46
+ "point_count",
47
+ "pointing",
48
+ "cosyn_point",
49
+ "user_qa",
50
+ "long_caption",
51
+ "short_caption",
52
+ "video_long_caption",
53
+ "video_short_caption",
54
+ "correction_qa",
55
+ "demo",
56
+ "android_control",
57
+ ]
58
+
59
+
60
+ def setup_pil():
61
+ PIL.Image.MAX_IMAGE_PIXELS = None
62
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
63
+
64
+
65
+ def get_special_token_ids(tokenizer: AutoTokenizer) -> Dict[str, int]:
66
+ ids = tokenizer.encode("".join(EXTRA_TOKENS), add_special_tokens=False)
67
+ assert len(ids) == len(EXTRA_TOKENS)
68
+ return {k: i for k, i in zip(EXTRA_TOKENS, ids)}
69
+
70
+
71
+ def load_image(image: Union[PIL.Image.Image, np.ndarray]) -> np.ndarray:
72
+ """Load image"""
73
+ setup_pil()
74
+ if isinstance(image, PIL.Image.Image):
75
+ image = image.convert("RGB")
76
+ image = ImageOps.exif_transpose(image)
77
+ return np.array(image)
78
+ elif isinstance(image, np.ndarray):
79
+ assert len(image.shape) == 3, "Image should have 3 dimensions"
80
+ assert image.shape[2] == 3, "Image should have 3 channels"
81
+ assert image.dtype == np.uint8, "Image should have uint8 type"
82
+ return image
83
+ else:
84
+ raise ValueError("Image should be PIL.Image or np.ndarray")
85
+
86
+
87
+ class Molmo2ProcessorKwargs(ProcessingKwargs, total=False):
88
+ """Molmo2 processor kwargs"""
89
+ images_kwargs: Molmo2ImagesKwargs
90
+ _defaults = {
91
+ "text_kwargs": {
92
+ "padding": False,
93
+ },
94
+ }
95
+
96
+
97
+ class Molmo2Processor(ProcessorMixin):
98
+ attributes = ["image_processor", "tokenizer"]
99
+ optional_attributes = [
100
+ "chat_template",
101
+ "prompt_templates",
102
+ "message_format",
103
+ "system_prompt",
104
+ "style",
105
+ "always_start_with_space",
106
+ "default_inference_len",
107
+ "use_col_tokens",
108
+ "image_padding_mask",
109
+ ]
110
+ image_processor_class = "AutoImageProcessor"
111
+ tokenizer_class = "AutoTokenizer"
112
+
113
+ def __init__(
114
+ self,
115
+ image_processor: Molmo2ImageProcessor = None,
116
+ tokenizer: AutoTokenizer = None,
117
+ chat_template: Optional[str] = None,
118
+ prompt_templates: Optional[str] = "uber_model",
119
+ message_format: Optional[str] = "role",
120
+ system_prompt: Optional[str] = "demo_or_style",
121
+ style: Optional[str] = "demo",
122
+ always_start_with_space: Optional[bool] = False,
123
+ default_inference_len: Optional[int] = 65,
124
+ use_col_tokens: Optional[bool] = True,
125
+ image_padding_mask: bool = False,
126
+ **kwargs
127
+ ) -> None:
128
+ if tokenizer.padding_side != "left":
129
+ logger.warning(f"Tokenizer {tokenizer.name_or_path} is not left-padded, padding side will be set to left")
130
+ tokenizer.padding_side = "left" # type: ignore
131
+ super().__init__(
132
+ image_processor,
133
+ tokenizer,
134
+ chat_template=chat_template,
135
+ prompt_templates=prompt_templates,
136
+ message_format=message_format,
137
+ system_prompt=system_prompt,
138
+ style=style,
139
+ always_start_with_space=always_start_with_space,
140
+ default_inference_len=default_inference_len,
141
+ use_col_tokens=use_col_tokens,
142
+ image_padding_mask=image_padding_mask,
143
+ )
144
+ self._special_tokens = None
145
+
146
+ @property
147
+ def special_token_ids(self):
148
+ if self._special_tokens is None:
149
+ self._special_tokens = get_special_token_ids(self.tokenizer)
150
+ return self._special_tokens
151
+
152
+ def get_user_prompt(self, text: TextInput) -> str:
153
+ """Get user prompt"""
154
+ if self.prompt_templates == "none":
155
+ return ""
156
+ elif self.prompt_templates == "uber_model":
157
+ return text
158
+ else:
159
+ raise NotImplementedError(self.prompt_templates)
160
+
161
+ def get_prefix(self) -> str:
162
+ """Get prefix"""
163
+ if self.system_prompt == "style_and_length": # captioner
164
+ assert self.style in ["long_caption"]
165
+ style = self.style
166
+ n = None if self.default_inference_len is None else str(self.default_inference_len)
167
+ if n is not None and len(n) > 0: # allow empty string to signal unconditioned
168
+ prefix = style + " " + n + ":"
169
+ else:
170
+ prefix = style + " :"
171
+ elif self.system_prompt == "demo_or_style": # demo model
172
+ if self.style in DEMO_STYLES:
173
+ prefix = ""
174
+ else:
175
+ prefix = self.style + ":"
176
+ else:
177
+ raise NotImplementedError(self.system_prompt)
178
+ return prefix
179
+
180
+ def format_prompt(self, prompt: str) -> str:
181
+ """Format prompt"""
182
+ if self.message_format == "none":
183
+ pass
184
+ elif self.message_format == "role":
185
+ prompt = "User: " + prompt + " Assistant:"
186
+ else:
187
+ raise NotImplementedError(self.message_format)
188
+
189
+ if self.always_start_with_space:
190
+ prompt = " " + prompt
191
+
192
+ return prompt
193
+
194
+ def get_prompt(self, text: TextInput) -> str:
195
+ prompt = self.get_user_prompt(text)
196
+ if self.system_prompt and self.system_prompt != "none":
197
+ prefix = self.get_prefix()
198
+ if len(prefix) > 0 and len(prompt) > 0:
199
+ prompt = prefix + " " + prompt
200
+ elif len(prefix) > 0:
201
+ prompt = prefix
202
+ prompt = self.format_prompt(prompt)
203
+ return prompt
204
+
205
+ def get_image_tokens(self, image_grid: np.ndarray):
206
+ joint = []
207
+ for h, w in image_grid:
208
+ per_row = np.full(w, IMAGE_PATCH_TOKEN)
209
+ if self.use_col_tokens:
210
+ per_row = np.concatenate([per_row, [IM_COL_TOKEN]], 0)
211
+ extra_tokens = np.tile(per_row, [h])
212
+ joint += [
213
+ [IM_START_TOKEN],
214
+ extra_tokens,
215
+ [IM_END_TOKEN],
216
+ ]
217
+ return np.concatenate(joint)
218
+
219
+ def insert_bos_numpy(
220
+ self,
221
+ input_ids: np.ndarray,
222
+ attention_mask: np.ndarray,
223
+ bos_token_id: int,
224
+ pad_token_id: int,
225
+ ):
226
+ """
227
+ Args:
228
+ input_ids: [B, S] array with left padding
229
+ attention_mask: [B, S] array (0 for pad, 1 for valid)
230
+ bos_token_id: int
231
+ pad_token_id: int
232
+ Returns:
233
+ input_ids_out: [B, S] or [B, S+1] array with bos inserted if needed
234
+ attention_mask_out: same shape as input_ids_out
235
+ """
236
+
237
+ need_to_expand = len(input_ids.shape) == 1
238
+ if need_to_expand:
239
+ input_ids = input_ids[None, :]
240
+ attention_mask = attention_mask[None, :]
241
+
242
+ B, S = input_ids.shape
243
+
244
+ # Handle zero-length sequence
245
+ if S == 0:
246
+ new_input_ids = np.full((B, 1), bos_token_id, dtype=input_ids.dtype)
247
+ new_attention_mask = np.ones((B, 1), dtype=attention_mask.dtype)
248
+ if need_to_expand:
249
+ new_input_ids = new_input_ids[0]
250
+ new_attention_mask = new_attention_mask[0]
251
+ return new_input_ids, new_attention_mask
252
+
253
+ first_valid_index = (attention_mask == 1).argmax(axis=-1) # [B]
254
+ bos_already_present = np.all(input_ids[np.arange(B), first_valid_index] == bos_token_id)
255
+
256
+ if bos_already_present:
257
+ if need_to_expand:
258
+ input_ids = input_ids[0]
259
+ attention_mask = attention_mask[0]
260
+ return input_ids, attention_mask
261
+ else:
262
+ new_input_ids = np.full((B, S+1), pad_token_id, dtype=input_ids.dtype)
263
+ new_attention_mask = np.zeros((B, S+1), dtype=attention_mask.dtype)
264
+
265
+ src_idx = np.tile(np.arange(S), (B, 1)) # [B, S]
266
+ valid_mask = src_idx >= first_valid_index[:, None] # [B, S]
267
+ tgt_idx = src_idx + 1 # shit right
268
+ batch_idx = np.tile(np.arange(B)[:, None], (1, S)) # [B, S]
269
+
270
+ # flatten valid_positions
271
+ flat_vals = input_ids[valid_mask]
272
+ flat_batch = batch_idx[valid_mask]
273
+ flat_tgt = tgt_idx[valid_mask]
274
+
275
+ new_input_ids[flat_batch, flat_tgt] = flat_vals
276
+ new_attention_mask[flat_batch, flat_tgt] = 1
277
+
278
+ insert_pos = first_valid_index
279
+ new_input_ids[np.arange(B), insert_pos] = bos_token_id
280
+ new_attention_mask[np.arange(B), insert_pos] = 1
281
+
282
+ if need_to_expand:
283
+ new_input_ids = new_input_ids[0]
284
+ new_attention_mask = new_attention_mask[0]
285
+
286
+ return new_input_ids, new_attention_mask
287
+
288
+ def insert_bos_torch(
289
+ self,
290
+ input_ids: torch.Tensor,
291
+ attention_mask: torch.Tensor,
292
+ bos_token_id: int,
293
+ pad_token_id: int,
294
+ ):
295
+ """
296
+ Args:
297
+ input_ids: [B, S] tensor with left padding
298
+ attention_mask: [B, S] tensor (0 for pad, 1 for valid)
299
+ bos_token_id: int
300
+ pad_token_id: int
301
+ Returns:
302
+ input_ids_out: [B, S] or [B, S+1] tensor with bos inserted if needed
303
+ attention_mask_out: same shape as input_ids_out
304
+ """
305
+
306
+ B, S = input_ids.shape
307
+ device = input_ids.device
308
+
309
+ # Handle zero-length sequence
310
+ if S == 0:
311
+ new_input_ids = torch.full((B, 1), bos_token_id, dtype=input_ids.dtype, device=device)
312
+ new_attention_mask = torch.ones((B, 1), dtype=attention_mask.dtype, device=device)
313
+ return new_input_ids, new_attention_mask
314
+
315
+ first_valid_index = (attention_mask == 1).long().argmax(dim=-1) # [B]
316
+ bos_already_present = (input_ids[torch.arange(B), first_valid_index] == bos_token_id).all()
317
+
318
+ if bos_already_present:
319
+ return input_ids, attention_mask
320
+ else:
321
+ new_input_ids = torch.full((B, S+1), pad_token_id, dtype=input_ids.dtype, device=device)
322
+ new_attention_mask = torch.zeros((B, S+1), dtype=attention_mask.dtype, device=device)
323
+
324
+ src_idx = torch.arange(S, device=device).expand(B, S) # [B, S]
325
+ valid_mask = src_idx >= first_valid_index.unsqueeze(1) # [B, S]
326
+ tgt_idx = src_idx + 1 # shift right
327
+ batch_idx = torch.arange(B, device=device).unsqueeze(1).expand_as(src_idx)
328
+
329
+ flat_vals = input_ids[valid_mask]
330
+ flat_batch = batch_idx[valid_mask]
331
+ flat_tgt = tgt_idx[valid_mask]
332
+
333
+ new_input_ids[flat_batch, flat_tgt] = flat_vals
334
+ new_attention_mask[flat_batch, flat_tgt] = 1
335
+
336
+ insert_pos = first_valid_index
337
+ batch_indices = torch.arange(B, device=device)
338
+ new_input_ids[batch_indices, insert_pos] = bos_token_id
339
+ new_attention_mask[batch_indices, insert_pos] = 1
340
+
341
+ return new_input_ids, new_attention_mask
342
+
343
+ def __call__(
344
+ self,
345
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
346
+ images: Union[ImageInput, List[ImageInput]] = None,
347
+ apply_chat_template: bool = False,
348
+ **kwargs: Unpack[Molmo2ProcessorKwargs],
349
+ ) -> BatchFeature:
350
+ if images is None and text is None:
351
+ raise ValueError("You have to specify at least one of `images` or `text`.")
352
+
353
+ output_kwargs = self._merge_kwargs(
354
+ Molmo2ProcessorKwargs,
355
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
356
+ **kwargs,
357
+ )
358
+
359
+ if isinstance(text, (list, tuple)) and isinstance(images, (list, tuple)):
360
+ if len(text) != len(images):
361
+ raise ValueError("You have to provide the same number of text and images")
362
+ if len(text) > 1 and not output_kwargs["text_kwargs"].get("padding", False):
363
+ raise ValueError("You have to specify padding when you have multiple text inputs")
364
+
365
+ if isinstance(text, str):
366
+ text = [text]
367
+ elif not isinstance(text, list) and not isinstance(text[0], str):
368
+ raise ValueError("Invalid input text. Please provide a string, or a list of strings")
369
+
370
+ if images is not None:
371
+ image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
372
+ else:
373
+ image_inputs = {}
374
+
375
+ if apply_chat_template:
376
+ text = [self.get_prompt(t) for t in text]
377
+
378
+ prompt_strings = text
379
+ if image_inputs.get("images", None) is not None:
380
+
381
+ prompt_strings = []
382
+ for idx, image_grids in enumerate(image_inputs.pop("image_grids")):
383
+ if isinstance(image_grids, torch.Tensor):
384
+ image_grids = image_grids.cpu().numpy()
385
+ if isinstance(images, (list, tuple)) and isinstance(images[idx], (list, tuple)):
386
+ image_grids = image_grids[~np.all(image_grids == -1, axis=-1)]
387
+ offset = 2 if len(images[idx]) < len(image_grids) else 1 # whether to use both low and high res images
388
+ all_image_strings = []
389
+ for i in range(0, len(image_grids), offset):
390
+ image_grids_i = image_grids[i:i+offset]
391
+ image_tokens = self.get_image_tokens(image_grids_i)
392
+ img_ix = i // offset
393
+ all_image_strings.append(f"Image {img_ix + 1}" + "".join(image_tokens))
394
+ image_string = "".join(all_image_strings)
395
+ prompt_strings.append(image_string + text[idx])
396
+ else:
397
+ image_grids = image_grids[~np.all(image_grids == -1, axis=-1)]
398
+ assert len(image_grids) in [1, 2], "Only one or two crops are supported for single image inputs"
399
+ image_tokens = self.get_image_tokens(image_grids)
400
+ image_string = "".join(image_tokens)
401
+ prompt_strings.append(image_string + text[idx])
402
+
403
+ text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
404
+
405
+ input_ids = text_inputs["input_ids"]
406
+ attention_mask = text_inputs["attention_mask"]
407
+
408
+ is_list = isinstance(input_ids, (list, tuple))
409
+ if is_list:
410
+ input_ids = np.array(input_ids)
411
+ attention_mask = np.array(attention_mask)
412
+
413
+ use_numpy = isinstance(attention_mask, np.ndarray)
414
+
415
+ if use_numpy and np.issubdtype(input_ids.dtype, np.floating):
416
+ input_ids = input_ids.astype(np.int64)
417
+ attention_mask = attention_mask.astype(np.int64)
418
+ elif not use_numpy and torch.is_floating_point(input_ids):
419
+ input_ids = input_ids.to(torch.int64)
420
+ attention_mask = attention_mask.to(torch.int64)
421
+
422
+ bos = self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
423
+ if use_numpy:
424
+ input_ids, attention_mask = self.insert_bos_numpy(
425
+ input_ids, attention_mask, bos, self.tokenizer.pad_token_id
426
+ )
427
+ else:
428
+ input_ids, attention_mask = self.insert_bos_torch(
429
+ input_ids, attention_mask, bos, self.tokenizer.pad_token_id
430
+ )
431
+ if is_list:
432
+ input_ids = input_ids.tolist() # type: ignore
433
+ attention_mask = attention_mask.tolist() # type: ignore
434
+ text_inputs["input_ids"] = input_ids
435
+ text_inputs["attention_mask"] = attention_mask
436
+
437
+ if kwargs.get("device", None) is not None:
438
+ text_inputs = text_inputs.to(device=kwargs.get("device"), non_blocking=True)
439
+ # there is no bos token in Qwen tokenizer
440
+ return BatchFeature(
441
+ data={**text_inputs, **image_inputs}, tensor_type=output_kwargs["common_kwargs"]["return_tensors"]
442
+ )
443
+
444
+ def batch_decode(self, *args, **kwargs):
445
+ """
446
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
447
+ refer to the docstring of this method for more information.
448
+ """
449
+ return self.tokenizer.batch_decode(*args, **kwargs)
450
+
451
+ def decode(self, *args, **kwargs):
452
+ """
453
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
454
+ the docstring of this method for more information.
455
+ """
456
+ return self.tokenizer.decode(*args, **kwargs)
457
+
458
+ @property
459
+ def model_input_names(self):
460
+ tokenizer_input_names = self.tokenizer.model_input_names
461
+ image_processor_input_names = self.image_processor.model_input_names
462
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
463
+
464
+
465
+ Molmo2Processor.register_for_auto_class()
processor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "always_start_with_space": false,
3
+ "auto_map": {
4
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
5
+ },
6
+ "default_inference_len": 65,
7
+ "image_padding_mask": false,
8
+ "message_format": "role",
9
+ "processor_class": "Molmo2Processor",
10
+ "prompt_templates": "uber_model",
11
+ "style": "demo_role",
12
+ "system_prompt": "demo_or_style",
13
+ "use_col_tokens": true
14
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,1944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "|<EXTRA_TOKENS_0>|",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "|<EXTRA_TOKENS_1>|",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "|<EXTRA_TOKENS_2>|",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "|<EXTRA_TOKENS_3>|",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "|<EXTRA_TOKENS_4>|",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "|<EXTRA_TOKENS_5>|",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "|<EXTRA_TOKENS_6>|",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "|<EXTRA_TOKENS_7>|",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "|<EXTRA_TOKENS_8>|",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "|<EXTRA_TOKENS_9>|",
68
+ "lstrip": false,
69
+ "normalized": false,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ },
73
+ {
74
+ "content": "|<EXTRA_TOKENS_10>|",
75
+ "lstrip": false,
76
+ "normalized": false,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ {
81
+ "content": "|<EXTRA_TOKENS_11>|",
82
+ "lstrip": false,
83
+ "normalized": false,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ },
87
+ {
88
+ "content": "|<EXTRA_TOKENS_12>|",
89
+ "lstrip": false,
90
+ "normalized": false,
91
+ "rstrip": false,
92
+ "single_word": false
93
+ },
94
+ {
95
+ "content": "|<EXTRA_TOKENS_13>|",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false
100
+ },
101
+ {
102
+ "content": "|<EXTRA_TOKENS_14>|",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false
107
+ },
108
+ {
109
+ "content": "|<EXTRA_TOKENS_15>|",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false
114
+ },
115
+ {
116
+ "content": "|<EXTRA_TOKENS_16>|",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false
121
+ },
122
+ {
123
+ "content": "|<EXTRA_TOKENS_17>|",
124
+ "lstrip": false,
125
+ "normalized": false,
126
+ "rstrip": false,
127
+ "single_word": false
128
+ },
129
+ {
130
+ "content": "|<EXTRA_TOKENS_18>|",
131
+ "lstrip": false,
132
+ "normalized": false,
133
+ "rstrip": false,
134
+ "single_word": false
135
+ },
136
+ {
137
+ "content": "|<EXTRA_TOKENS_19>|",
138
+ "lstrip": false,
139
+ "normalized": false,
140
+ "rstrip": false,
141
+ "single_word": false
142
+ },
143
+ {
144
+ "content": "|<EXTRA_TOKENS_20>|",
145
+ "lstrip": false,
146
+ "normalized": false,
147
+ "rstrip": false,
148
+ "single_word": false
149
+ },
150
+ {
151
+ "content": "|<EXTRA_TOKENS_21>|",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false
156
+ },
157
+ {
158
+ "content": "|<EXTRA_TOKENS_22>|",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false
163
+ },
164
+ {
165
+ "content": "|<EXTRA_TOKENS_23>|",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false
170
+ },
171
+ {
172
+ "content": "|<EXTRA_TOKENS_24>|",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false
177
+ },
178
+ {
179
+ "content": "|<EXTRA_TOKENS_25>|",
180
+ "lstrip": false,
181
+ "normalized": false,
182
+ "rstrip": false,
183
+ "single_word": false
184
+ },
185
+ {
186
+ "content": "|<EXTRA_TOKENS_26>|",
187
+ "lstrip": false,
188
+ "normalized": false,
189
+ "rstrip": false,
190
+ "single_word": false
191
+ },
192
+ {
193
+ "content": "|<EXTRA_TOKENS_27>|",
194
+ "lstrip": false,
195
+ "normalized": false,
196
+ "rstrip": false,
197
+ "single_word": false
198
+ },
199
+ {
200
+ "content": "|<EXTRA_TOKENS_28>|",
201
+ "lstrip": false,
202
+ "normalized": false,
203
+ "rstrip": false,
204
+ "single_word": false
205
+ },
206
+ {
207
+ "content": "|<EXTRA_TOKENS_29>|",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false
212
+ },
213
+ {
214
+ "content": "|<EXTRA_TOKENS_30>|",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false
219
+ },
220
+ {
221
+ "content": "|<EXTRA_TOKENS_31>|",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false
226
+ },
227
+ {
228
+ "content": "|<EXTRA_TOKENS_32>|",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false
233
+ },
234
+ {
235
+ "content": "|<EXTRA_TOKENS_33>|",
236
+ "lstrip": false,
237
+ "normalized": false,
238
+ "rstrip": false,
239
+ "single_word": false
240
+ },
241
+ {
242
+ "content": "|<EXTRA_TOKENS_34>|",
243
+ "lstrip": false,
244
+ "normalized": false,
245
+ "rstrip": false,
246
+ "single_word": false
247
+ },
248
+ {
249
+ "content": "|<EXTRA_TOKENS_35>|",
250
+ "lstrip": false,
251
+ "normalized": false,
252
+ "rstrip": false,
253
+ "single_word": false
254
+ },
255
+ {
256
+ "content": "|<EXTRA_TOKENS_36>|",
257
+ "lstrip": false,
258
+ "normalized": false,
259
+ "rstrip": false,
260
+ "single_word": false
261
+ },
262
+ {
263
+ "content": "|<EXTRA_TOKENS_37>|",
264
+ "lstrip": false,
265
+ "normalized": false,
266
+ "rstrip": false,
267
+ "single_word": false
268
+ },
269
+ {
270
+ "content": "|<EXTRA_TOKENS_38>|",
271
+ "lstrip": false,
272
+ "normalized": false,
273
+ "rstrip": false,
274
+ "single_word": false
275
+ },
276
+ {
277
+ "content": "|<EXTRA_TOKENS_39>|",
278
+ "lstrip": false,
279
+ "normalized": false,
280
+ "rstrip": false,
281
+ "single_word": false
282
+ },
283
+ {
284
+ "content": "|<EXTRA_TOKENS_40>|",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false
289
+ },
290
+ {
291
+ "content": "|<EXTRA_TOKENS_41>|",
292
+ "lstrip": false,
293
+ "normalized": false,
294
+ "rstrip": false,
295
+ "single_word": false
296
+ },
297
+ {
298
+ "content": "|<EXTRA_TOKENS_42>|",
299
+ "lstrip": false,
300
+ "normalized": false,
301
+ "rstrip": false,
302
+ "single_word": false
303
+ },
304
+ {
305
+ "content": "|<EXTRA_TOKENS_43>|",
306
+ "lstrip": false,
307
+ "normalized": false,
308
+ "rstrip": false,
309
+ "single_word": false
310
+ },
311
+ {
312
+ "content": "|<EXTRA_TOKENS_44>|",
313
+ "lstrip": false,
314
+ "normalized": false,
315
+ "rstrip": false,
316
+ "single_word": false
317
+ },
318
+ {
319
+ "content": "|<EXTRA_TOKENS_45>|",
320
+ "lstrip": false,
321
+ "normalized": false,
322
+ "rstrip": false,
323
+ "single_word": false
324
+ },
325
+ {
326
+ "content": "|<EXTRA_TOKENS_46>|",
327
+ "lstrip": false,
328
+ "normalized": false,
329
+ "rstrip": false,
330
+ "single_word": false
331
+ },
332
+ {
333
+ "content": "|<EXTRA_TOKENS_47>|",
334
+ "lstrip": false,
335
+ "normalized": false,
336
+ "rstrip": false,
337
+ "single_word": false
338
+ },
339
+ {
340
+ "content": "|<EXTRA_TOKENS_48>|",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false
345
+ },
346
+ {
347
+ "content": "|<EXTRA_TOKENS_49>|",
348
+ "lstrip": false,
349
+ "normalized": false,
350
+ "rstrip": false,
351
+ "single_word": false
352
+ },
353
+ {
354
+ "content": "|<EXTRA_TOKENS_50>|",
355
+ "lstrip": false,
356
+ "normalized": false,
357
+ "rstrip": false,
358
+ "single_word": false
359
+ },
360
+ {
361
+ "content": "|<EXTRA_TOKENS_51>|",
362
+ "lstrip": false,
363
+ "normalized": false,
364
+ "rstrip": false,
365
+ "single_word": false
366
+ },
367
+ {
368
+ "content": "|<EXTRA_TOKENS_52>|",
369
+ "lstrip": false,
370
+ "normalized": false,
371
+ "rstrip": false,
372
+ "single_word": false
373
+ },
374
+ {
375
+ "content": "|<EXTRA_TOKENS_53>|",
376
+ "lstrip": false,
377
+ "normalized": false,
378
+ "rstrip": false,
379
+ "single_word": false
380
+ },
381
+ {
382
+ "content": "|<EXTRA_TOKENS_54>|",
383
+ "lstrip": false,
384
+ "normalized": false,
385
+ "rstrip": false,
386
+ "single_word": false
387
+ },
388
+ {
389
+ "content": "|<EXTRA_TOKENS_55>|",
390
+ "lstrip": false,
391
+ "normalized": false,
392
+ "rstrip": false,
393
+ "single_word": false
394
+ },
395
+ {
396
+ "content": "|<EXTRA_TOKENS_56>|",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false
401
+ },
402
+ {
403
+ "content": "|<EXTRA_TOKENS_57>|",
404
+ "lstrip": false,
405
+ "normalized": false,
406
+ "rstrip": false,
407
+ "single_word": false
408
+ },
409
+ {
410
+ "content": "|<EXTRA_TOKENS_58>|",
411
+ "lstrip": false,
412
+ "normalized": false,
413
+ "rstrip": false,
414
+ "single_word": false
415
+ },
416
+ {
417
+ "content": "|<EXTRA_TOKENS_59>|",
418
+ "lstrip": false,
419
+ "normalized": false,
420
+ "rstrip": false,
421
+ "single_word": false
422
+ },
423
+ {
424
+ "content": "|<EXTRA_TOKENS_60>|",
425
+ "lstrip": false,
426
+ "normalized": false,
427
+ "rstrip": false,
428
+ "single_word": false
429
+ },
430
+ {
431
+ "content": "|<EXTRA_TOKENS_61>|",
432
+ "lstrip": false,
433
+ "normalized": false,
434
+ "rstrip": false,
435
+ "single_word": false
436
+ },
437
+ {
438
+ "content": "|<EXTRA_TOKENS_62>|",
439
+ "lstrip": false,
440
+ "normalized": false,
441
+ "rstrip": false,
442
+ "single_word": false
443
+ },
444
+ {
445
+ "content": "|<EXTRA_TOKENS_63>|",
446
+ "lstrip": false,
447
+ "normalized": false,
448
+ "rstrip": false,
449
+ "single_word": false
450
+ },
451
+ {
452
+ "content": "|<EXTRA_TOKENS_64>|",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false
457
+ },
458
+ {
459
+ "content": "|<EXTRA_TOKENS_65>|",
460
+ "lstrip": false,
461
+ "normalized": false,
462
+ "rstrip": false,
463
+ "single_word": false
464
+ },
465
+ {
466
+ "content": "|<EXTRA_TOKENS_66>|",
467
+ "lstrip": false,
468
+ "normalized": false,
469
+ "rstrip": false,
470
+ "single_word": false
471
+ },
472
+ {
473
+ "content": "|<EXTRA_TOKENS_67>|",
474
+ "lstrip": false,
475
+ "normalized": false,
476
+ "rstrip": false,
477
+ "single_word": false
478
+ },
479
+ {
480
+ "content": "|<EXTRA_TOKENS_68>|",
481
+ "lstrip": false,
482
+ "normalized": false,
483
+ "rstrip": false,
484
+ "single_word": false
485
+ },
486
+ {
487
+ "content": "|<EXTRA_TOKENS_69>|",
488
+ "lstrip": false,
489
+ "normalized": false,
490
+ "rstrip": false,
491
+ "single_word": false
492
+ },
493
+ {
494
+ "content": "|<EXTRA_TOKENS_70>|",
495
+ "lstrip": false,
496
+ "normalized": false,
497
+ "rstrip": false,
498
+ "single_word": false
499
+ },
500
+ {
501
+ "content": "|<EXTRA_TOKENS_71>|",
502
+ "lstrip": false,
503
+ "normalized": false,
504
+ "rstrip": false,
505
+ "single_word": false
506
+ },
507
+ {
508
+ "content": "|<EXTRA_TOKENS_72>|",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false
513
+ },
514
+ {
515
+ "content": "|<EXTRA_TOKENS_73>|",
516
+ "lstrip": false,
517
+ "normalized": false,
518
+ "rstrip": false,
519
+ "single_word": false
520
+ },
521
+ {
522
+ "content": "|<EXTRA_TOKENS_74>|",
523
+ "lstrip": false,
524
+ "normalized": false,
525
+ "rstrip": false,
526
+ "single_word": false
527
+ },
528
+ {
529
+ "content": "|<EXTRA_TOKENS_75>|",
530
+ "lstrip": false,
531
+ "normalized": false,
532
+ "rstrip": false,
533
+ "single_word": false
534
+ },
535
+ {
536
+ "content": "|<EXTRA_TOKENS_76>|",
537
+ "lstrip": false,
538
+ "normalized": false,
539
+ "rstrip": false,
540
+ "single_word": false
541
+ },
542
+ {
543
+ "content": "|<EXTRA_TOKENS_77>|",
544
+ "lstrip": false,
545
+ "normalized": false,
546
+ "rstrip": false,
547
+ "single_word": false
548
+ },
549
+ {
550
+ "content": "|<EXTRA_TOKENS_78>|",
551
+ "lstrip": false,
552
+ "normalized": false,
553
+ "rstrip": false,
554
+ "single_word": false
555
+ },
556
+ {
557
+ "content": "|<EXTRA_TOKENS_79>|",
558
+ "lstrip": false,
559
+ "normalized": false,
560
+ "rstrip": false,
561
+ "single_word": false
562
+ },
563
+ {
564
+ "content": "|<EXTRA_TOKENS_80>|",
565
+ "lstrip": false,
566
+ "normalized": false,
567
+ "rstrip": false,
568
+ "single_word": false
569
+ },
570
+ {
571
+ "content": "|<EXTRA_TOKENS_81>|",
572
+ "lstrip": false,
573
+ "normalized": false,
574
+ "rstrip": false,
575
+ "single_word": false
576
+ },
577
+ {
578
+ "content": "|<EXTRA_TOKENS_82>|",
579
+ "lstrip": false,
580
+ "normalized": false,
581
+ "rstrip": false,
582
+ "single_word": false
583
+ },
584
+ {
585
+ "content": "|<EXTRA_TOKENS_83>|",
586
+ "lstrip": false,
587
+ "normalized": false,
588
+ "rstrip": false,
589
+ "single_word": false
590
+ },
591
+ {
592
+ "content": "|<EXTRA_TOKENS_84>|",
593
+ "lstrip": false,
594
+ "normalized": false,
595
+ "rstrip": false,
596
+ "single_word": false
597
+ },
598
+ {
599
+ "content": "|<EXTRA_TOKENS_85>|",
600
+ "lstrip": false,
601
+ "normalized": false,
602
+ "rstrip": false,
603
+ "single_word": false
604
+ },
605
+ {
606
+ "content": "|<EXTRA_TOKENS_86>|",
607
+ "lstrip": false,
608
+ "normalized": false,
609
+ "rstrip": false,
610
+ "single_word": false
611
+ },
612
+ {
613
+ "content": "|<EXTRA_TOKENS_87>|",
614
+ "lstrip": false,
615
+ "normalized": false,
616
+ "rstrip": false,
617
+ "single_word": false
618
+ },
619
+ {
620
+ "content": "|<EXTRA_TOKENS_88>|",
621
+ "lstrip": false,
622
+ "normalized": false,
623
+ "rstrip": false,
624
+ "single_word": false
625
+ },
626
+ {
627
+ "content": "|<EXTRA_TOKENS_89>|",
628
+ "lstrip": false,
629
+ "normalized": false,
630
+ "rstrip": false,
631
+ "single_word": false
632
+ },
633
+ {
634
+ "content": "|<EXTRA_TOKENS_90>|",
635
+ "lstrip": false,
636
+ "normalized": false,
637
+ "rstrip": false,
638
+ "single_word": false
639
+ },
640
+ {
641
+ "content": "|<EXTRA_TOKENS_91>|",
642
+ "lstrip": false,
643
+ "normalized": false,
644
+ "rstrip": false,
645
+ "single_word": false
646
+ },
647
+ {
648
+ "content": "|<EXTRA_TOKENS_92>|",
649
+ "lstrip": false,
650
+ "normalized": false,
651
+ "rstrip": false,
652
+ "single_word": false
653
+ },
654
+ {
655
+ "content": "|<EXTRA_TOKENS_93>|",
656
+ "lstrip": false,
657
+ "normalized": false,
658
+ "rstrip": false,
659
+ "single_word": false
660
+ },
661
+ {
662
+ "content": "|<EXTRA_TOKENS_94>|",
663
+ "lstrip": false,
664
+ "normalized": false,
665
+ "rstrip": false,
666
+ "single_word": false
667
+ },
668
+ {
669
+ "content": "|<EXTRA_TOKENS_95>|",
670
+ "lstrip": false,
671
+ "normalized": false,
672
+ "rstrip": false,
673
+ "single_word": false
674
+ },
675
+ {
676
+ "content": "|<EXTRA_TOKENS_96>|",
677
+ "lstrip": false,
678
+ "normalized": false,
679
+ "rstrip": false,
680
+ "single_word": false
681
+ },
682
+ {
683
+ "content": "|<EXTRA_TOKENS_97>|",
684
+ "lstrip": false,
685
+ "normalized": false,
686
+ "rstrip": false,
687
+ "single_word": false
688
+ },
689
+ {
690
+ "content": "|<EXTRA_TOKENS_98>|",
691
+ "lstrip": false,
692
+ "normalized": false,
693
+ "rstrip": false,
694
+ "single_word": false
695
+ },
696
+ {
697
+ "content": "|<EXTRA_TOKENS_99>|",
698
+ "lstrip": false,
699
+ "normalized": false,
700
+ "rstrip": false,
701
+ "single_word": false
702
+ },
703
+ {
704
+ "content": "|<EXTRA_TOKENS_100>|",
705
+ "lstrip": false,
706
+ "normalized": false,
707
+ "rstrip": false,
708
+ "single_word": false
709
+ },
710
+ {
711
+ "content": "|<EXTRA_TOKENS_101>|",
712
+ "lstrip": false,
713
+ "normalized": false,
714
+ "rstrip": false,
715
+ "single_word": false
716
+ },
717
+ {
718
+ "content": "|<EXTRA_TOKENS_102>|",
719
+ "lstrip": false,
720
+ "normalized": false,
721
+ "rstrip": false,
722
+ "single_word": false
723
+ },
724
+ {
725
+ "content": "|<EXTRA_TOKENS_103>|",
726
+ "lstrip": false,
727
+ "normalized": false,
728
+ "rstrip": false,
729
+ "single_word": false
730
+ },
731
+ {
732
+ "content": "|<EXTRA_TOKENS_104>|",
733
+ "lstrip": false,
734
+ "normalized": false,
735
+ "rstrip": false,
736
+ "single_word": false
737
+ },
738
+ {
739
+ "content": "|<EXTRA_TOKENS_105>|",
740
+ "lstrip": false,
741
+ "normalized": false,
742
+ "rstrip": false,
743
+ "single_word": false
744
+ },
745
+ {
746
+ "content": "|<EXTRA_TOKENS_106>|",
747
+ "lstrip": false,
748
+ "normalized": false,
749
+ "rstrip": false,
750
+ "single_word": false
751
+ },
752
+ {
753
+ "content": "|<EXTRA_TOKENS_107>|",
754
+ "lstrip": false,
755
+ "normalized": false,
756
+ "rstrip": false,
757
+ "single_word": false
758
+ },
759
+ {
760
+ "content": "|<EXTRA_TOKENS_108>|",
761
+ "lstrip": false,
762
+ "normalized": false,
763
+ "rstrip": false,
764
+ "single_word": false
765
+ },
766
+ {
767
+ "content": "|<EXTRA_TOKENS_109>|",
768
+ "lstrip": false,
769
+ "normalized": false,
770
+ "rstrip": false,
771
+ "single_word": false
772
+ },
773
+ {
774
+ "content": "|<EXTRA_TOKENS_110>|",
775
+ "lstrip": false,
776
+ "normalized": false,
777
+ "rstrip": false,
778
+ "single_word": false
779
+ },
780
+ {
781
+ "content": "|<EXTRA_TOKENS_111>|",
782
+ "lstrip": false,
783
+ "normalized": false,
784
+ "rstrip": false,
785
+ "single_word": false
786
+ },
787
+ {
788
+ "content": "|<EXTRA_TOKENS_112>|",
789
+ "lstrip": false,
790
+ "normalized": false,
791
+ "rstrip": false,
792
+ "single_word": false
793
+ },
794
+ {
795
+ "content": "|<EXTRA_TOKENS_113>|",
796
+ "lstrip": false,
797
+ "normalized": false,
798
+ "rstrip": false,
799
+ "single_word": false
800
+ },
801
+ {
802
+ "content": "|<EXTRA_TOKENS_114>|",
803
+ "lstrip": false,
804
+ "normalized": false,
805
+ "rstrip": false,
806
+ "single_word": false
807
+ },
808
+ {
809
+ "content": "|<EXTRA_TOKENS_115>|",
810
+ "lstrip": false,
811
+ "normalized": false,
812
+ "rstrip": false,
813
+ "single_word": false
814
+ },
815
+ {
816
+ "content": "|<EXTRA_TOKENS_116>|",
817
+ "lstrip": false,
818
+ "normalized": false,
819
+ "rstrip": false,
820
+ "single_word": false
821
+ },
822
+ {
823
+ "content": "|<EXTRA_TOKENS_117>|",
824
+ "lstrip": false,
825
+ "normalized": false,
826
+ "rstrip": false,
827
+ "single_word": false
828
+ },
829
+ {
830
+ "content": "|<EXTRA_TOKENS_118>|",
831
+ "lstrip": false,
832
+ "normalized": false,
833
+ "rstrip": false,
834
+ "single_word": false
835
+ },
836
+ {
837
+ "content": "|<EXTRA_TOKENS_119>|",
838
+ "lstrip": false,
839
+ "normalized": false,
840
+ "rstrip": false,
841
+ "single_word": false
842
+ },
843
+ {
844
+ "content": "|<EXTRA_TOKENS_120>|",
845
+ "lstrip": false,
846
+ "normalized": false,
847
+ "rstrip": false,
848
+ "single_word": false
849
+ },
850
+ {
851
+ "content": "|<EXTRA_TOKENS_121>|",
852
+ "lstrip": false,
853
+ "normalized": false,
854
+ "rstrip": false,
855
+ "single_word": false
856
+ },
857
+ {
858
+ "content": "|<EXTRA_TOKENS_122>|",
859
+ "lstrip": false,
860
+ "normalized": false,
861
+ "rstrip": false,
862
+ "single_word": false
863
+ },
864
+ {
865
+ "content": "|<EXTRA_TOKENS_123>|",
866
+ "lstrip": false,
867
+ "normalized": false,
868
+ "rstrip": false,
869
+ "single_word": false
870
+ },
871
+ {
872
+ "content": "|<EXTRA_TOKENS_124>|",
873
+ "lstrip": false,
874
+ "normalized": false,
875
+ "rstrip": false,
876
+ "single_word": false
877
+ },
878
+ {
879
+ "content": "|<EXTRA_TOKENS_125>|",
880
+ "lstrip": false,
881
+ "normalized": false,
882
+ "rstrip": false,
883
+ "single_word": false
884
+ },
885
+ {
886
+ "content": "|<EXTRA_TOKENS_126>|",
887
+ "lstrip": false,
888
+ "normalized": false,
889
+ "rstrip": false,
890
+ "single_word": false
891
+ },
892
+ {
893
+ "content": "|<EXTRA_TOKENS_127>|",
894
+ "lstrip": false,
895
+ "normalized": false,
896
+ "rstrip": false,
897
+ "single_word": false
898
+ },
899
+ {
900
+ "content": "|<EXTRA_TOKENS_128>|",
901
+ "lstrip": false,
902
+ "normalized": false,
903
+ "rstrip": false,
904
+ "single_word": false
905
+ },
906
+ {
907
+ "content": "|<EXTRA_TOKENS_129>|",
908
+ "lstrip": false,
909
+ "normalized": false,
910
+ "rstrip": false,
911
+ "single_word": false
912
+ },
913
+ {
914
+ "content": "|<EXTRA_TOKENS_130>|",
915
+ "lstrip": false,
916
+ "normalized": false,
917
+ "rstrip": false,
918
+ "single_word": false
919
+ },
920
+ {
921
+ "content": "|<EXTRA_TOKENS_131>|",
922
+ "lstrip": false,
923
+ "normalized": false,
924
+ "rstrip": false,
925
+ "single_word": false
926
+ },
927
+ {
928
+ "content": "|<EXTRA_TOKENS_132>|",
929
+ "lstrip": false,
930
+ "normalized": false,
931
+ "rstrip": false,
932
+ "single_word": false
933
+ },
934
+ {
935
+ "content": "|<EXTRA_TOKENS_133>|",
936
+ "lstrip": false,
937
+ "normalized": false,
938
+ "rstrip": false,
939
+ "single_word": false
940
+ },
941
+ {
942
+ "content": "|<EXTRA_TOKENS_134>|",
943
+ "lstrip": false,
944
+ "normalized": false,
945
+ "rstrip": false,
946
+ "single_word": false
947
+ },
948
+ {
949
+ "content": "|<EXTRA_TOKENS_135>|",
950
+ "lstrip": false,
951
+ "normalized": false,
952
+ "rstrip": false,
953
+ "single_word": false
954
+ },
955
+ {
956
+ "content": "|<EXTRA_TOKENS_136>|",
957
+ "lstrip": false,
958
+ "normalized": false,
959
+ "rstrip": false,
960
+ "single_word": false
961
+ },
962
+ {
963
+ "content": "|<EXTRA_TOKENS_137>|",
964
+ "lstrip": false,
965
+ "normalized": false,
966
+ "rstrip": false,
967
+ "single_word": false
968
+ },
969
+ {
970
+ "content": "|<EXTRA_TOKENS_138>|",
971
+ "lstrip": false,
972
+ "normalized": false,
973
+ "rstrip": false,
974
+ "single_word": false
975
+ },
976
+ {
977
+ "content": "|<EXTRA_TOKENS_139>|",
978
+ "lstrip": false,
979
+ "normalized": false,
980
+ "rstrip": false,
981
+ "single_word": false
982
+ },
983
+ {
984
+ "content": "|<EXTRA_TOKENS_140>|",
985
+ "lstrip": false,
986
+ "normalized": false,
987
+ "rstrip": false,
988
+ "single_word": false
989
+ },
990
+ {
991
+ "content": "|<EXTRA_TOKENS_141>|",
992
+ "lstrip": false,
993
+ "normalized": false,
994
+ "rstrip": false,
995
+ "single_word": false
996
+ },
997
+ {
998
+ "content": "|<EXTRA_TOKENS_142>|",
999
+ "lstrip": false,
1000
+ "normalized": false,
1001
+ "rstrip": false,
1002
+ "single_word": false
1003
+ },
1004
+ {
1005
+ "content": "|<EXTRA_TOKENS_143>|",
1006
+ "lstrip": false,
1007
+ "normalized": false,
1008
+ "rstrip": false,
1009
+ "single_word": false
1010
+ },
1011
+ {
1012
+ "content": "|<EXTRA_TOKENS_144>|",
1013
+ "lstrip": false,
1014
+ "normalized": false,
1015
+ "rstrip": false,
1016
+ "single_word": false
1017
+ },
1018
+ {
1019
+ "content": "|<EXTRA_TOKENS_145>|",
1020
+ "lstrip": false,
1021
+ "normalized": false,
1022
+ "rstrip": false,
1023
+ "single_word": false
1024
+ },
1025
+ {
1026
+ "content": "|<EXTRA_TOKENS_146>|",
1027
+ "lstrip": false,
1028
+ "normalized": false,
1029
+ "rstrip": false,
1030
+ "single_word": false
1031
+ },
1032
+ {
1033
+ "content": "|<EXTRA_TOKENS_147>|",
1034
+ "lstrip": false,
1035
+ "normalized": false,
1036
+ "rstrip": false,
1037
+ "single_word": false
1038
+ },
1039
+ {
1040
+ "content": "|<EXTRA_TOKENS_148>|",
1041
+ "lstrip": false,
1042
+ "normalized": false,
1043
+ "rstrip": false,
1044
+ "single_word": false
1045
+ },
1046
+ {
1047
+ "content": "|<EXTRA_TOKENS_149>|",
1048
+ "lstrip": false,
1049
+ "normalized": false,
1050
+ "rstrip": false,
1051
+ "single_word": false
1052
+ },
1053
+ {
1054
+ "content": "|<EXTRA_TOKENS_150>|",
1055
+ "lstrip": false,
1056
+ "normalized": false,
1057
+ "rstrip": false,
1058
+ "single_word": false
1059
+ },
1060
+ {
1061
+ "content": "|<EXTRA_TOKENS_151>|",
1062
+ "lstrip": false,
1063
+ "normalized": false,
1064
+ "rstrip": false,
1065
+ "single_word": false
1066
+ },
1067
+ {
1068
+ "content": "|<EXTRA_TOKENS_152>|",
1069
+ "lstrip": false,
1070
+ "normalized": false,
1071
+ "rstrip": false,
1072
+ "single_word": false
1073
+ },
1074
+ {
1075
+ "content": "|<EXTRA_TOKENS_153>|",
1076
+ "lstrip": false,
1077
+ "normalized": false,
1078
+ "rstrip": false,
1079
+ "single_word": false
1080
+ },
1081
+ {
1082
+ "content": "|<EXTRA_TOKENS_154>|",
1083
+ "lstrip": false,
1084
+ "normalized": false,
1085
+ "rstrip": false,
1086
+ "single_word": false
1087
+ },
1088
+ {
1089
+ "content": "|<EXTRA_TOKENS_155>|",
1090
+ "lstrip": false,
1091
+ "normalized": false,
1092
+ "rstrip": false,
1093
+ "single_word": false
1094
+ },
1095
+ {
1096
+ "content": "|<EXTRA_TOKENS_156>|",
1097
+ "lstrip": false,
1098
+ "normalized": false,
1099
+ "rstrip": false,
1100
+ "single_word": false
1101
+ },
1102
+ {
1103
+ "content": "|<EXTRA_TOKENS_157>|",
1104
+ "lstrip": false,
1105
+ "normalized": false,
1106
+ "rstrip": false,
1107
+ "single_word": false
1108
+ },
1109
+ {
1110
+ "content": "|<EXTRA_TOKENS_158>|",
1111
+ "lstrip": false,
1112
+ "normalized": false,
1113
+ "rstrip": false,
1114
+ "single_word": false
1115
+ },
1116
+ {
1117
+ "content": "|<EXTRA_TOKENS_159>|",
1118
+ "lstrip": false,
1119
+ "normalized": false,
1120
+ "rstrip": false,
1121
+ "single_word": false
1122
+ },
1123
+ {
1124
+ "content": "|<EXTRA_TOKENS_160>|",
1125
+ "lstrip": false,
1126
+ "normalized": false,
1127
+ "rstrip": false,
1128
+ "single_word": false
1129
+ },
1130
+ {
1131
+ "content": "|<EXTRA_TOKENS_161>|",
1132
+ "lstrip": false,
1133
+ "normalized": false,
1134
+ "rstrip": false,
1135
+ "single_word": false
1136
+ },
1137
+ {
1138
+ "content": "|<EXTRA_TOKENS_162>|",
1139
+ "lstrip": false,
1140
+ "normalized": false,
1141
+ "rstrip": false,
1142
+ "single_word": false
1143
+ },
1144
+ {
1145
+ "content": "|<EXTRA_TOKENS_163>|",
1146
+ "lstrip": false,
1147
+ "normalized": false,
1148
+ "rstrip": false,
1149
+ "single_word": false
1150
+ },
1151
+ {
1152
+ "content": "|<EXTRA_TOKENS_164>|",
1153
+ "lstrip": false,
1154
+ "normalized": false,
1155
+ "rstrip": false,
1156
+ "single_word": false
1157
+ },
1158
+ {
1159
+ "content": "|<EXTRA_TOKENS_165>|",
1160
+ "lstrip": false,
1161
+ "normalized": false,
1162
+ "rstrip": false,
1163
+ "single_word": false
1164
+ },
1165
+ {
1166
+ "content": "|<EXTRA_TOKENS_166>|",
1167
+ "lstrip": false,
1168
+ "normalized": false,
1169
+ "rstrip": false,
1170
+ "single_word": false
1171
+ },
1172
+ {
1173
+ "content": "|<EXTRA_TOKENS_167>|",
1174
+ "lstrip": false,
1175
+ "normalized": false,
1176
+ "rstrip": false,
1177
+ "single_word": false
1178
+ },
1179
+ {
1180
+ "content": "|<EXTRA_TOKENS_168>|",
1181
+ "lstrip": false,
1182
+ "normalized": false,
1183
+ "rstrip": false,
1184
+ "single_word": false
1185
+ },
1186
+ {
1187
+ "content": "|<EXTRA_TOKENS_169>|",
1188
+ "lstrip": false,
1189
+ "normalized": false,
1190
+ "rstrip": false,
1191
+ "single_word": false
1192
+ },
1193
+ {
1194
+ "content": "|<EXTRA_TOKENS_170>|",
1195
+ "lstrip": false,
1196
+ "normalized": false,
1197
+ "rstrip": false,
1198
+ "single_word": false
1199
+ },
1200
+ {
1201
+ "content": "|<EXTRA_TOKENS_171>|",
1202
+ "lstrip": false,
1203
+ "normalized": false,
1204
+ "rstrip": false,
1205
+ "single_word": false
1206
+ },
1207
+ {
1208
+ "content": "|<EXTRA_TOKENS_172>|",
1209
+ "lstrip": false,
1210
+ "normalized": false,
1211
+ "rstrip": false,
1212
+ "single_word": false
1213
+ },
1214
+ {
1215
+ "content": "|<EXTRA_TOKENS_173>|",
1216
+ "lstrip": false,
1217
+ "normalized": false,
1218
+ "rstrip": false,
1219
+ "single_word": false
1220
+ },
1221
+ {
1222
+ "content": "|<EXTRA_TOKENS_174>|",
1223
+ "lstrip": false,
1224
+ "normalized": false,
1225
+ "rstrip": false,
1226
+ "single_word": false
1227
+ },
1228
+ {
1229
+ "content": "|<EXTRA_TOKENS_175>|",
1230
+ "lstrip": false,
1231
+ "normalized": false,
1232
+ "rstrip": false,
1233
+ "single_word": false
1234
+ },
1235
+ {
1236
+ "content": "|<EXTRA_TOKENS_176>|",
1237
+ "lstrip": false,
1238
+ "normalized": false,
1239
+ "rstrip": false,
1240
+ "single_word": false
1241
+ },
1242
+ {
1243
+ "content": "|<EXTRA_TOKENS_177>|",
1244
+ "lstrip": false,
1245
+ "normalized": false,
1246
+ "rstrip": false,
1247
+ "single_word": false
1248
+ },
1249
+ {
1250
+ "content": "|<EXTRA_TOKENS_178>|",
1251
+ "lstrip": false,
1252
+ "normalized": false,
1253
+ "rstrip": false,
1254
+ "single_word": false
1255
+ },
1256
+ {
1257
+ "content": "|<EXTRA_TOKENS_179>|",
1258
+ "lstrip": false,
1259
+ "normalized": false,
1260
+ "rstrip": false,
1261
+ "single_word": false
1262
+ },
1263
+ {
1264
+ "content": "|<EXTRA_TOKENS_180>|",
1265
+ "lstrip": false,
1266
+ "normalized": false,
1267
+ "rstrip": false,
1268
+ "single_word": false
1269
+ },
1270
+ {
1271
+ "content": "|<EXTRA_TOKENS_181>|",
1272
+ "lstrip": false,
1273
+ "normalized": false,
1274
+ "rstrip": false,
1275
+ "single_word": false
1276
+ },
1277
+ {
1278
+ "content": "|<EXTRA_TOKENS_182>|",
1279
+ "lstrip": false,
1280
+ "normalized": false,
1281
+ "rstrip": false,
1282
+ "single_word": false
1283
+ },
1284
+ {
1285
+ "content": "|<EXTRA_TOKENS_183>|",
1286
+ "lstrip": false,
1287
+ "normalized": false,
1288
+ "rstrip": false,
1289
+ "single_word": false
1290
+ },
1291
+ {
1292
+ "content": "|<EXTRA_TOKENS_184>|",
1293
+ "lstrip": false,
1294
+ "normalized": false,
1295
+ "rstrip": false,
1296
+ "single_word": false
1297
+ },
1298
+ {
1299
+ "content": "|<EXTRA_TOKENS_185>|",
1300
+ "lstrip": false,
1301
+ "normalized": false,
1302
+ "rstrip": false,
1303
+ "single_word": false
1304
+ },
1305
+ {
1306
+ "content": "|<EXTRA_TOKENS_186>|",
1307
+ "lstrip": false,
1308
+ "normalized": false,
1309
+ "rstrip": false,
1310
+ "single_word": false
1311
+ },
1312
+ {
1313
+ "content": "|<EXTRA_TOKENS_187>|",
1314
+ "lstrip": false,
1315
+ "normalized": false,
1316
+ "rstrip": false,
1317
+ "single_word": false
1318
+ },
1319
+ {
1320
+ "content": "|<EXTRA_TOKENS_188>|",
1321
+ "lstrip": false,
1322
+ "normalized": false,
1323
+ "rstrip": false,
1324
+ "single_word": false
1325
+ },
1326
+ {
1327
+ "content": "|<EXTRA_TOKENS_189>|",
1328
+ "lstrip": false,
1329
+ "normalized": false,
1330
+ "rstrip": false,
1331
+ "single_word": false
1332
+ },
1333
+ {
1334
+ "content": "|<EXTRA_TOKENS_190>|",
1335
+ "lstrip": false,
1336
+ "normalized": false,
1337
+ "rstrip": false,
1338
+ "single_word": false
1339
+ },
1340
+ {
1341
+ "content": "|<EXTRA_TOKENS_191>|",
1342
+ "lstrip": false,
1343
+ "normalized": false,
1344
+ "rstrip": false,
1345
+ "single_word": false
1346
+ },
1347
+ {
1348
+ "content": "|<EXTRA_TOKENS_192>|",
1349
+ "lstrip": false,
1350
+ "normalized": false,
1351
+ "rstrip": false,
1352
+ "single_word": false
1353
+ },
1354
+ {
1355
+ "content": "|<EXTRA_TOKENS_193>|",
1356
+ "lstrip": false,
1357
+ "normalized": false,
1358
+ "rstrip": false,
1359
+ "single_word": false
1360
+ },
1361
+ {
1362
+ "content": "|<EXTRA_TOKENS_194>|",
1363
+ "lstrip": false,
1364
+ "normalized": false,
1365
+ "rstrip": false,
1366
+ "single_word": false
1367
+ },
1368
+ {
1369
+ "content": "|<EXTRA_TOKENS_195>|",
1370
+ "lstrip": false,
1371
+ "normalized": false,
1372
+ "rstrip": false,
1373
+ "single_word": false
1374
+ },
1375
+ {
1376
+ "content": "|<EXTRA_TOKENS_196>|",
1377
+ "lstrip": false,
1378
+ "normalized": false,
1379
+ "rstrip": false,
1380
+ "single_word": false
1381
+ },
1382
+ {
1383
+ "content": "|<EXTRA_TOKENS_197>|",
1384
+ "lstrip": false,
1385
+ "normalized": false,
1386
+ "rstrip": false,
1387
+ "single_word": false
1388
+ },
1389
+ {
1390
+ "content": "|<EXTRA_TOKENS_198>|",
1391
+ "lstrip": false,
1392
+ "normalized": false,
1393
+ "rstrip": false,
1394
+ "single_word": false
1395
+ },
1396
+ {
1397
+ "content": "|<EXTRA_TOKENS_199>|",
1398
+ "lstrip": false,
1399
+ "normalized": false,
1400
+ "rstrip": false,
1401
+ "single_word": false
1402
+ },
1403
+ {
1404
+ "content": "|<EXTRA_TOKENS_200>|",
1405
+ "lstrip": false,
1406
+ "normalized": false,
1407
+ "rstrip": false,
1408
+ "single_word": false
1409
+ },
1410
+ {
1411
+ "content": "|<EXTRA_TOKENS_201>|",
1412
+ "lstrip": false,
1413
+ "normalized": false,
1414
+ "rstrip": false,
1415
+ "single_word": false
1416
+ },
1417
+ {
1418
+ "content": "|<EXTRA_TOKENS_202>|",
1419
+ "lstrip": false,
1420
+ "normalized": false,
1421
+ "rstrip": false,
1422
+ "single_word": false
1423
+ },
1424
+ {
1425
+ "content": "|<EXTRA_TOKENS_203>|",
1426
+ "lstrip": false,
1427
+ "normalized": false,
1428
+ "rstrip": false,
1429
+ "single_word": false
1430
+ },
1431
+ {
1432
+ "content": "|<EXTRA_TOKENS_204>|",
1433
+ "lstrip": false,
1434
+ "normalized": false,
1435
+ "rstrip": false,
1436
+ "single_word": false
1437
+ },
1438
+ {
1439
+ "content": "|<EXTRA_TOKENS_205>|",
1440
+ "lstrip": false,
1441
+ "normalized": false,
1442
+ "rstrip": false,
1443
+ "single_word": false
1444
+ },
1445
+ {
1446
+ "content": "|<EXTRA_TOKENS_206>|",
1447
+ "lstrip": false,
1448
+ "normalized": false,
1449
+ "rstrip": false,
1450
+ "single_word": false
1451
+ },
1452
+ {
1453
+ "content": "|<EXTRA_TOKENS_207>|",
1454
+ "lstrip": false,
1455
+ "normalized": false,
1456
+ "rstrip": false,
1457
+ "single_word": false
1458
+ },
1459
+ {
1460
+ "content": "|<EXTRA_TOKENS_208>|",
1461
+ "lstrip": false,
1462
+ "normalized": false,
1463
+ "rstrip": false,
1464
+ "single_word": false
1465
+ },
1466
+ {
1467
+ "content": "|<EXTRA_TOKENS_209>|",
1468
+ "lstrip": false,
1469
+ "normalized": false,
1470
+ "rstrip": false,
1471
+ "single_word": false
1472
+ },
1473
+ {
1474
+ "content": "|<EXTRA_TOKENS_210>|",
1475
+ "lstrip": false,
1476
+ "normalized": false,
1477
+ "rstrip": false,
1478
+ "single_word": false
1479
+ },
1480
+ {
1481
+ "content": "|<EXTRA_TOKENS_211>|",
1482
+ "lstrip": false,
1483
+ "normalized": false,
1484
+ "rstrip": false,
1485
+ "single_word": false
1486
+ },
1487
+ {
1488
+ "content": "|<EXTRA_TOKENS_212>|",
1489
+ "lstrip": false,
1490
+ "normalized": false,
1491
+ "rstrip": false,
1492
+ "single_word": false
1493
+ },
1494
+ {
1495
+ "content": "|<EXTRA_TOKENS_213>|",
1496
+ "lstrip": false,
1497
+ "normalized": false,
1498
+ "rstrip": false,
1499
+ "single_word": false
1500
+ },
1501
+ {
1502
+ "content": "|<EXTRA_TOKENS_214>|",
1503
+ "lstrip": false,
1504
+ "normalized": false,
1505
+ "rstrip": false,
1506
+ "single_word": false
1507
+ },
1508
+ {
1509
+ "content": "|<EXTRA_TOKENS_215>|",
1510
+ "lstrip": false,
1511
+ "normalized": false,
1512
+ "rstrip": false,
1513
+ "single_word": false
1514
+ },
1515
+ {
1516
+ "content": "|<EXTRA_TOKENS_216>|",
1517
+ "lstrip": false,
1518
+ "normalized": false,
1519
+ "rstrip": false,
1520
+ "single_word": false
1521
+ },
1522
+ {
1523
+ "content": "|<EXTRA_TOKENS_217>|",
1524
+ "lstrip": false,
1525
+ "normalized": false,
1526
+ "rstrip": false,
1527
+ "single_word": false
1528
+ },
1529
+ {
1530
+ "content": "|<EXTRA_TOKENS_218>|",
1531
+ "lstrip": false,
1532
+ "normalized": false,
1533
+ "rstrip": false,
1534
+ "single_word": false
1535
+ },
1536
+ {
1537
+ "content": "|<EXTRA_TOKENS_219>|",
1538
+ "lstrip": false,
1539
+ "normalized": false,
1540
+ "rstrip": false,
1541
+ "single_word": false
1542
+ },
1543
+ {
1544
+ "content": "|<EXTRA_TOKENS_220>|",
1545
+ "lstrip": false,
1546
+ "normalized": false,
1547
+ "rstrip": false,
1548
+ "single_word": false
1549
+ },
1550
+ {
1551
+ "content": "|<EXTRA_TOKENS_221>|",
1552
+ "lstrip": false,
1553
+ "normalized": false,
1554
+ "rstrip": false,
1555
+ "single_word": false
1556
+ },
1557
+ {
1558
+ "content": "|<EXTRA_TOKENS_222>|",
1559
+ "lstrip": false,
1560
+ "normalized": false,
1561
+ "rstrip": false,
1562
+ "single_word": false
1563
+ },
1564
+ {
1565
+ "content": "|<EXTRA_TOKENS_223>|",
1566
+ "lstrip": false,
1567
+ "normalized": false,
1568
+ "rstrip": false,
1569
+ "single_word": false
1570
+ },
1571
+ {
1572
+ "content": "|<EXTRA_TOKENS_224>|",
1573
+ "lstrip": false,
1574
+ "normalized": false,
1575
+ "rstrip": false,
1576
+ "single_word": false
1577
+ },
1578
+ {
1579
+ "content": "|<EXTRA_TOKENS_225>|",
1580
+ "lstrip": false,
1581
+ "normalized": false,
1582
+ "rstrip": false,
1583
+ "single_word": false
1584
+ },
1585
+ {
1586
+ "content": "|<EXTRA_TOKENS_226>|",
1587
+ "lstrip": false,
1588
+ "normalized": false,
1589
+ "rstrip": false,
1590
+ "single_word": false
1591
+ },
1592
+ {
1593
+ "content": "|<EXTRA_TOKENS_227>|",
1594
+ "lstrip": false,
1595
+ "normalized": false,
1596
+ "rstrip": false,
1597
+ "single_word": false
1598
+ },
1599
+ {
1600
+ "content": "|<EXTRA_TOKENS_228>|",
1601
+ "lstrip": false,
1602
+ "normalized": false,
1603
+ "rstrip": false,
1604
+ "single_word": false
1605
+ },
1606
+ {
1607
+ "content": "|<EXTRA_TOKENS_229>|",
1608
+ "lstrip": false,
1609
+ "normalized": false,
1610
+ "rstrip": false,
1611
+ "single_word": false
1612
+ },
1613
+ {
1614
+ "content": "|<EXTRA_TOKENS_230>|",
1615
+ "lstrip": false,
1616
+ "normalized": false,
1617
+ "rstrip": false,
1618
+ "single_word": false
1619
+ },
1620
+ {
1621
+ "content": "|<EXTRA_TOKENS_231>|",
1622
+ "lstrip": false,
1623
+ "normalized": false,
1624
+ "rstrip": false,
1625
+ "single_word": false
1626
+ },
1627
+ {
1628
+ "content": "|<EXTRA_TOKENS_232>|",
1629
+ "lstrip": false,
1630
+ "normalized": false,
1631
+ "rstrip": false,
1632
+ "single_word": false
1633
+ },
1634
+ {
1635
+ "content": "|<EXTRA_TOKENS_233>|",
1636
+ "lstrip": false,
1637
+ "normalized": false,
1638
+ "rstrip": false,
1639
+ "single_word": false
1640
+ },
1641
+ {
1642
+ "content": "|<EXTRA_TOKENS_234>|",
1643
+ "lstrip": false,
1644
+ "normalized": false,
1645
+ "rstrip": false,
1646
+ "single_word": false
1647
+ },
1648
+ {
1649
+ "content": "|<EXTRA_TOKENS_235>|",
1650
+ "lstrip": false,
1651
+ "normalized": false,
1652
+ "rstrip": false,
1653
+ "single_word": false
1654
+ },
1655
+ {
1656
+ "content": "|<EXTRA_TOKENS_236>|",
1657
+ "lstrip": false,
1658
+ "normalized": false,
1659
+ "rstrip": false,
1660
+ "single_word": false
1661
+ },
1662
+ {
1663
+ "content": "|<EXTRA_TOKENS_237>|",
1664
+ "lstrip": false,
1665
+ "normalized": false,
1666
+ "rstrip": false,
1667
+ "single_word": false
1668
+ },
1669
+ {
1670
+ "content": "|<EXTRA_TOKENS_238>|",
1671
+ "lstrip": false,
1672
+ "normalized": false,
1673
+ "rstrip": false,
1674
+ "single_word": false
1675
+ },
1676
+ {
1677
+ "content": "|<EXTRA_TOKENS_239>|",
1678
+ "lstrip": false,
1679
+ "normalized": false,
1680
+ "rstrip": false,
1681
+ "single_word": false
1682
+ },
1683
+ {
1684
+ "content": "|<EXTRA_TOKENS_240>|",
1685
+ "lstrip": false,
1686
+ "normalized": false,
1687
+ "rstrip": false,
1688
+ "single_word": false
1689
+ },
1690
+ {
1691
+ "content": "|<EXTRA_TOKENS_241>|",
1692
+ "lstrip": false,
1693
+ "normalized": false,
1694
+ "rstrip": false,
1695
+ "single_word": false
1696
+ },
1697
+ {
1698
+ "content": "|<EXTRA_TOKENS_242>|",
1699
+ "lstrip": false,
1700
+ "normalized": false,
1701
+ "rstrip": false,
1702
+ "single_word": false
1703
+ },
1704
+ {
1705
+ "content": "|<EXTRA_TOKENS_243>|",
1706
+ "lstrip": false,
1707
+ "normalized": false,
1708
+ "rstrip": false,
1709
+ "single_word": false
1710
+ },
1711
+ {
1712
+ "content": "|<EXTRA_TOKENS_244>|",
1713
+ "lstrip": false,
1714
+ "normalized": false,
1715
+ "rstrip": false,
1716
+ "single_word": false
1717
+ },
1718
+ {
1719
+ "content": "|<EXTRA_TOKENS_245>|",
1720
+ "lstrip": false,
1721
+ "normalized": false,
1722
+ "rstrip": false,
1723
+ "single_word": false
1724
+ },
1725
+ {
1726
+ "content": "|<EXTRA_TOKENS_246>|",
1727
+ "lstrip": false,
1728
+ "normalized": false,
1729
+ "rstrip": false,
1730
+ "single_word": false
1731
+ },
1732
+ {
1733
+ "content": "|<EXTRA_TOKENS_247>|",
1734
+ "lstrip": false,
1735
+ "normalized": false,
1736
+ "rstrip": false,
1737
+ "single_word": false
1738
+ },
1739
+ {
1740
+ "content": "|<EXTRA_TOKENS_248>|",
1741
+ "lstrip": false,
1742
+ "normalized": false,
1743
+ "rstrip": false,
1744
+ "single_word": false
1745
+ },
1746
+ {
1747
+ "content": "|<EXTRA_TOKENS_249>|",
1748
+ "lstrip": false,
1749
+ "normalized": false,
1750
+ "rstrip": false,
1751
+ "single_word": false
1752
+ },
1753
+ {
1754
+ "content": "|<EXTRA_TOKENS_250>|",
1755
+ "lstrip": false,
1756
+ "normalized": false,
1757
+ "rstrip": false,
1758
+ "single_word": false
1759
+ },
1760
+ {
1761
+ "content": "|<EXTRA_TOKENS_251>|",
1762
+ "lstrip": false,
1763
+ "normalized": false,
1764
+ "rstrip": false,
1765
+ "single_word": false
1766
+ },
1767
+ {
1768
+ "content": "|<EXTRA_TOKENS_252>|",
1769
+ "lstrip": false,
1770
+ "normalized": false,
1771
+ "rstrip": false,
1772
+ "single_word": false
1773
+ },
1774
+ {
1775
+ "content": "|<EXTRA_TOKENS_253>|",
1776
+ "lstrip": false,
1777
+ "normalized": false,
1778
+ "rstrip": false,
1779
+ "single_word": false
1780
+ },
1781
+ {
1782
+ "content": "|<EXTRA_TOKENS_254>|",
1783
+ "lstrip": false,
1784
+ "normalized": false,
1785
+ "rstrip": false,
1786
+ "single_word": false
1787
+ },
1788
+ {
1789
+ "content": "|<EXTRA_TOKENS_255>|",
1790
+ "lstrip": false,
1791
+ "normalized": false,
1792
+ "rstrip": false,
1793
+ "single_word": false
1794
+ },
1795
+ {
1796
+ "content": "|<EXTRA_TOKENS_256>|",
1797
+ "lstrip": false,
1798
+ "normalized": false,
1799
+ "rstrip": false,
1800
+ "single_word": false
1801
+ },
1802
+ {
1803
+ "content": "|<EXTRA_TOKENS_257>|",
1804
+ "lstrip": false,
1805
+ "normalized": false,
1806
+ "rstrip": false,
1807
+ "single_word": false
1808
+ },
1809
+ {
1810
+ "content": "|<EXTRA_TOKENS_258>|",
1811
+ "lstrip": false,
1812
+ "normalized": false,
1813
+ "rstrip": false,
1814
+ "single_word": false
1815
+ },
1816
+ {
1817
+ "content": "|<EXTRA_TOKENS_259>|",
1818
+ "lstrip": false,
1819
+ "normalized": false,
1820
+ "rstrip": false,
1821
+ "single_word": false
1822
+ },
1823
+ {
1824
+ "content": "|<EXTRA_TOKENS_260>|",
1825
+ "lstrip": false,
1826
+ "normalized": false,
1827
+ "rstrip": false,
1828
+ "single_word": false
1829
+ },
1830
+ {
1831
+ "content": "|<EXTRA_TOKENS_261>|",
1832
+ "lstrip": false,
1833
+ "normalized": false,
1834
+ "rstrip": false,
1835
+ "single_word": false
1836
+ },
1837
+ {
1838
+ "content": "|<EXTRA_TOKENS_262>|",
1839
+ "lstrip": false,
1840
+ "normalized": false,
1841
+ "rstrip": false,
1842
+ "single_word": false
1843
+ },
1844
+ {
1845
+ "content": "|<EXTRA_TOKENS_263>|",
1846
+ "lstrip": false,
1847
+ "normalized": false,
1848
+ "rstrip": false,
1849
+ "single_word": false
1850
+ },
1851
+ {
1852
+ "content": "|<EXTRA_TOKENS_264>|",
1853
+ "lstrip": false,
1854
+ "normalized": false,
1855
+ "rstrip": false,
1856
+ "single_word": false
1857
+ },
1858
+ {
1859
+ "content": "|<EXTRA_TOKENS_265>|",
1860
+ "lstrip": false,
1861
+ "normalized": false,
1862
+ "rstrip": false,
1863
+ "single_word": false
1864
+ },
1865
+ {
1866
+ "content": "|<EXTRA_TOKENS_266>|",
1867
+ "lstrip": false,
1868
+ "normalized": false,
1869
+ "rstrip": false,
1870
+ "single_word": false
1871
+ },
1872
+ {
1873
+ "content": "|<EXTRA_TOKENS_267>|",
1874
+ "lstrip": false,
1875
+ "normalized": false,
1876
+ "rstrip": false,
1877
+ "single_word": false
1878
+ },
1879
+ {
1880
+ "content": "|<EXTRA_TOKENS_268>|",
1881
+ "lstrip": false,
1882
+ "normalized": false,
1883
+ "rstrip": false,
1884
+ "single_word": false
1885
+ },
1886
+ {
1887
+ "content": "<im_start>",
1888
+ "lstrip": false,
1889
+ "normalized": false,
1890
+ "rstrip": false,
1891
+ "single_word": false
1892
+ },
1893
+ {
1894
+ "content": "<im_end>",
1895
+ "lstrip": false,
1896
+ "normalized": false,
1897
+ "rstrip": false,
1898
+ "single_word": false
1899
+ },
1900
+ {
1901
+ "content": "<im_patch>",
1902
+ "lstrip": false,
1903
+ "normalized": false,
1904
+ "rstrip": false,
1905
+ "single_word": false
1906
+ },
1907
+ {
1908
+ "content": "<im_col>",
1909
+ "lstrip": false,
1910
+ "normalized": false,
1911
+ "rstrip": false,
1912
+ "single_word": false
1913
+ },
1914
+ {
1915
+ "content": "<|image|>",
1916
+ "lstrip": false,
1917
+ "normalized": false,
1918
+ "rstrip": false,
1919
+ "single_word": false
1920
+ },
1921
+ {
1922
+ "content": "<im_low>",
1923
+ "lstrip": false,
1924
+ "normalized": false,
1925
+ "rstrip": false,
1926
+ "single_word": false
1927
+ }
1928
+ ],
1929
+ "bos_token": "<|endoftext|>",
1930
+ "eos_token": {
1931
+ "content": "<|endoftext|>",
1932
+ "lstrip": false,
1933
+ "normalized": false,
1934
+ "rstrip": false,
1935
+ "single_word": false
1936
+ },
1937
+ "pad_token": {
1938
+ "content": "<|endoftext|>",
1939
+ "lstrip": false,
1940
+ "normalized": false,
1941
+ "rstrip": false,
1942
+ "single_word": false
1943
+ }
1944
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70522ad61c51fe8b137105665e222eea81d787e5603c75641b02ba5480628ad6
3
+ size 11500226
tokenizer_config.json ADDED
@@ -0,0 +1,3713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<DEPTH_START>",
183
+ "lstrip": false,
184
+ "normalized": true,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "<DEPTH_END>",
191
+ "lstrip": false,
192
+ "normalized": true,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<DEPTH_0>",
199
+ "lstrip": false,
200
+ "normalized": true,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "<DEPTH_1>",
207
+ "lstrip": false,
208
+ "normalized": true,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ },
213
+ "151669": {
214
+ "content": "<DEPTH_2>",
215
+ "lstrip": false,
216
+ "normalized": true,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": false
220
+ },
221
+ "151670": {
222
+ "content": "<DEPTH_3>",
223
+ "lstrip": false,
224
+ "normalized": true,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": false
228
+ },
229
+ "151671": {
230
+ "content": "<DEPTH_4>",
231
+ "lstrip": false,
232
+ "normalized": true,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": false
236
+ },
237
+ "151672": {
238
+ "content": "<DEPTH_5>",
239
+ "lstrip": false,
240
+ "normalized": true,
241
+ "rstrip": false,
242
+ "single_word": false,
243
+ "special": false
244
+ },
245
+ "151673": {
246
+ "content": "<DEPTH_6>",
247
+ "lstrip": false,
248
+ "normalized": true,
249
+ "rstrip": false,
250
+ "single_word": false,
251
+ "special": false
252
+ },
253
+ "151674": {
254
+ "content": "<DEPTH_7>",
255
+ "lstrip": false,
256
+ "normalized": true,
257
+ "rstrip": false,
258
+ "single_word": false,
259
+ "special": false
260
+ },
261
+ "151675": {
262
+ "content": "<DEPTH_8>",
263
+ "lstrip": false,
264
+ "normalized": true,
265
+ "rstrip": false,
266
+ "single_word": false,
267
+ "special": false
268
+ },
269
+ "151676": {
270
+ "content": "<DEPTH_9>",
271
+ "lstrip": false,
272
+ "normalized": true,
273
+ "rstrip": false,
274
+ "single_word": false,
275
+ "special": false
276
+ },
277
+ "151677": {
278
+ "content": "<DEPTH_10>",
279
+ "lstrip": false,
280
+ "normalized": true,
281
+ "rstrip": false,
282
+ "single_word": false,
283
+ "special": false
284
+ },
285
+ "151678": {
286
+ "content": "<DEPTH_11>",
287
+ "lstrip": false,
288
+ "normalized": true,
289
+ "rstrip": false,
290
+ "single_word": false,
291
+ "special": false
292
+ },
293
+ "151679": {
294
+ "content": "<DEPTH_12>",
295
+ "lstrip": false,
296
+ "normalized": true,
297
+ "rstrip": false,
298
+ "single_word": false,
299
+ "special": false
300
+ },
301
+ "151680": {
302
+ "content": "<DEPTH_13>",
303
+ "lstrip": false,
304
+ "normalized": true,
305
+ "rstrip": false,
306
+ "single_word": false,
307
+ "special": false
308
+ },
309
+ "151681": {
310
+ "content": "<DEPTH_14>",
311
+ "lstrip": false,
312
+ "normalized": true,
313
+ "rstrip": false,
314
+ "single_word": false,
315
+ "special": false
316
+ },
317
+ "151682": {
318
+ "content": "<DEPTH_15>",
319
+ "lstrip": false,
320
+ "normalized": true,
321
+ "rstrip": false,
322
+ "single_word": false,
323
+ "special": false
324
+ },
325
+ "151683": {
326
+ "content": "<DEPTH_16>",
327
+ "lstrip": false,
328
+ "normalized": true,
329
+ "rstrip": false,
330
+ "single_word": false,
331
+ "special": false
332
+ },
333
+ "151684": {
334
+ "content": "<DEPTH_17>",
335
+ "lstrip": false,
336
+ "normalized": true,
337
+ "rstrip": false,
338
+ "single_word": false,
339
+ "special": false
340
+ },
341
+ "151685": {
342
+ "content": "<DEPTH_18>",
343
+ "lstrip": false,
344
+ "normalized": true,
345
+ "rstrip": false,
346
+ "single_word": false,
347
+ "special": false
348
+ },
349
+ "151686": {
350
+ "content": "<DEPTH_19>",
351
+ "lstrip": false,
352
+ "normalized": true,
353
+ "rstrip": false,
354
+ "single_word": false,
355
+ "special": false
356
+ },
357
+ "151687": {
358
+ "content": "<DEPTH_20>",
359
+ "lstrip": false,
360
+ "normalized": true,
361
+ "rstrip": false,
362
+ "single_word": false,
363
+ "special": false
364
+ },
365
+ "151688": {
366
+ "content": "<DEPTH_21>",
367
+ "lstrip": false,
368
+ "normalized": true,
369
+ "rstrip": false,
370
+ "single_word": false,
371
+ "special": false
372
+ },
373
+ "151689": {
374
+ "content": "<DEPTH_22>",
375
+ "lstrip": false,
376
+ "normalized": true,
377
+ "rstrip": false,
378
+ "single_word": false,
379
+ "special": false
380
+ },
381
+ "151690": {
382
+ "content": "<DEPTH_23>",
383
+ "lstrip": false,
384
+ "normalized": true,
385
+ "rstrip": false,
386
+ "single_word": false,
387
+ "special": false
388
+ },
389
+ "151691": {
390
+ "content": "<DEPTH_24>",
391
+ "lstrip": false,
392
+ "normalized": true,
393
+ "rstrip": false,
394
+ "single_word": false,
395
+ "special": false
396
+ },
397
+ "151692": {
398
+ "content": "<DEPTH_25>",
399
+ "lstrip": false,
400
+ "normalized": true,
401
+ "rstrip": false,
402
+ "single_word": false,
403
+ "special": false
404
+ },
405
+ "151693": {
406
+ "content": "<DEPTH_26>",
407
+ "lstrip": false,
408
+ "normalized": true,
409
+ "rstrip": false,
410
+ "single_word": false,
411
+ "special": false
412
+ },
413
+ "151694": {
414
+ "content": "<DEPTH_27>",
415
+ "lstrip": false,
416
+ "normalized": true,
417
+ "rstrip": false,
418
+ "single_word": false,
419
+ "special": false
420
+ },
421
+ "151695": {
422
+ "content": "<DEPTH_28>",
423
+ "lstrip": false,
424
+ "normalized": true,
425
+ "rstrip": false,
426
+ "single_word": false,
427
+ "special": false
428
+ },
429
+ "151696": {
430
+ "content": "<DEPTH_29>",
431
+ "lstrip": false,
432
+ "normalized": true,
433
+ "rstrip": false,
434
+ "single_word": false,
435
+ "special": false
436
+ },
437
+ "151697": {
438
+ "content": "<DEPTH_30>",
439
+ "lstrip": false,
440
+ "normalized": true,
441
+ "rstrip": false,
442
+ "single_word": false,
443
+ "special": false
444
+ },
445
+ "151698": {
446
+ "content": "<DEPTH_31>",
447
+ "lstrip": false,
448
+ "normalized": true,
449
+ "rstrip": false,
450
+ "single_word": false,
451
+ "special": false
452
+ },
453
+ "151699": {
454
+ "content": "<DEPTH_32>",
455
+ "lstrip": false,
456
+ "normalized": true,
457
+ "rstrip": false,
458
+ "single_word": false,
459
+ "special": false
460
+ },
461
+ "151700": {
462
+ "content": "<DEPTH_33>",
463
+ "lstrip": false,
464
+ "normalized": true,
465
+ "rstrip": false,
466
+ "single_word": false,
467
+ "special": false
468
+ },
469
+ "151701": {
470
+ "content": "<DEPTH_34>",
471
+ "lstrip": false,
472
+ "normalized": true,
473
+ "rstrip": false,
474
+ "single_word": false,
475
+ "special": false
476
+ },
477
+ "151702": {
478
+ "content": "<DEPTH_35>",
479
+ "lstrip": false,
480
+ "normalized": true,
481
+ "rstrip": false,
482
+ "single_word": false,
483
+ "special": false
484
+ },
485
+ "151703": {
486
+ "content": "<DEPTH_36>",
487
+ "lstrip": false,
488
+ "normalized": true,
489
+ "rstrip": false,
490
+ "single_word": false,
491
+ "special": false
492
+ },
493
+ "151704": {
494
+ "content": "<DEPTH_37>",
495
+ "lstrip": false,
496
+ "normalized": true,
497
+ "rstrip": false,
498
+ "single_word": false,
499
+ "special": false
500
+ },
501
+ "151705": {
502
+ "content": "<DEPTH_38>",
503
+ "lstrip": false,
504
+ "normalized": true,
505
+ "rstrip": false,
506
+ "single_word": false,
507
+ "special": false
508
+ },
509
+ "151706": {
510
+ "content": "<DEPTH_39>",
511
+ "lstrip": false,
512
+ "normalized": true,
513
+ "rstrip": false,
514
+ "single_word": false,
515
+ "special": false
516
+ },
517
+ "151707": {
518
+ "content": "<DEPTH_40>",
519
+ "lstrip": false,
520
+ "normalized": true,
521
+ "rstrip": false,
522
+ "single_word": false,
523
+ "special": false
524
+ },
525
+ "151708": {
526
+ "content": "<DEPTH_41>",
527
+ "lstrip": false,
528
+ "normalized": true,
529
+ "rstrip": false,
530
+ "single_word": false,
531
+ "special": false
532
+ },
533
+ "151709": {
534
+ "content": "<DEPTH_42>",
535
+ "lstrip": false,
536
+ "normalized": true,
537
+ "rstrip": false,
538
+ "single_word": false,
539
+ "special": false
540
+ },
541
+ "151710": {
542
+ "content": "<DEPTH_43>",
543
+ "lstrip": false,
544
+ "normalized": true,
545
+ "rstrip": false,
546
+ "single_word": false,
547
+ "special": false
548
+ },
549
+ "151711": {
550
+ "content": "<DEPTH_44>",
551
+ "lstrip": false,
552
+ "normalized": true,
553
+ "rstrip": false,
554
+ "single_word": false,
555
+ "special": false
556
+ },
557
+ "151712": {
558
+ "content": "<DEPTH_45>",
559
+ "lstrip": false,
560
+ "normalized": true,
561
+ "rstrip": false,
562
+ "single_word": false,
563
+ "special": false
564
+ },
565
+ "151713": {
566
+ "content": "<DEPTH_46>",
567
+ "lstrip": false,
568
+ "normalized": true,
569
+ "rstrip": false,
570
+ "single_word": false,
571
+ "special": false
572
+ },
573
+ "151714": {
574
+ "content": "<DEPTH_47>",
575
+ "lstrip": false,
576
+ "normalized": true,
577
+ "rstrip": false,
578
+ "single_word": false,
579
+ "special": false
580
+ },
581
+ "151715": {
582
+ "content": "<DEPTH_48>",
583
+ "lstrip": false,
584
+ "normalized": true,
585
+ "rstrip": false,
586
+ "single_word": false,
587
+ "special": false
588
+ },
589
+ "151716": {
590
+ "content": "<DEPTH_49>",
591
+ "lstrip": false,
592
+ "normalized": true,
593
+ "rstrip": false,
594
+ "single_word": false,
595
+ "special": false
596
+ },
597
+ "151717": {
598
+ "content": "<DEPTH_50>",
599
+ "lstrip": false,
600
+ "normalized": true,
601
+ "rstrip": false,
602
+ "single_word": false,
603
+ "special": false
604
+ },
605
+ "151718": {
606
+ "content": "<DEPTH_51>",
607
+ "lstrip": false,
608
+ "normalized": true,
609
+ "rstrip": false,
610
+ "single_word": false,
611
+ "special": false
612
+ },
613
+ "151719": {
614
+ "content": "<DEPTH_52>",
615
+ "lstrip": false,
616
+ "normalized": true,
617
+ "rstrip": false,
618
+ "single_word": false,
619
+ "special": false
620
+ },
621
+ "151720": {
622
+ "content": "<DEPTH_53>",
623
+ "lstrip": false,
624
+ "normalized": true,
625
+ "rstrip": false,
626
+ "single_word": false,
627
+ "special": false
628
+ },
629
+ "151721": {
630
+ "content": "<DEPTH_54>",
631
+ "lstrip": false,
632
+ "normalized": true,
633
+ "rstrip": false,
634
+ "single_word": false,
635
+ "special": false
636
+ },
637
+ "151722": {
638
+ "content": "<DEPTH_55>",
639
+ "lstrip": false,
640
+ "normalized": true,
641
+ "rstrip": false,
642
+ "single_word": false,
643
+ "special": false
644
+ },
645
+ "151723": {
646
+ "content": "<DEPTH_56>",
647
+ "lstrip": false,
648
+ "normalized": true,
649
+ "rstrip": false,
650
+ "single_word": false,
651
+ "special": false
652
+ },
653
+ "151724": {
654
+ "content": "<DEPTH_57>",
655
+ "lstrip": false,
656
+ "normalized": true,
657
+ "rstrip": false,
658
+ "single_word": false,
659
+ "special": false
660
+ },
661
+ "151725": {
662
+ "content": "<DEPTH_58>",
663
+ "lstrip": false,
664
+ "normalized": true,
665
+ "rstrip": false,
666
+ "single_word": false,
667
+ "special": false
668
+ },
669
+ "151726": {
670
+ "content": "<DEPTH_59>",
671
+ "lstrip": false,
672
+ "normalized": true,
673
+ "rstrip": false,
674
+ "single_word": false,
675
+ "special": false
676
+ },
677
+ "151727": {
678
+ "content": "<DEPTH_60>",
679
+ "lstrip": false,
680
+ "normalized": true,
681
+ "rstrip": false,
682
+ "single_word": false,
683
+ "special": false
684
+ },
685
+ "151728": {
686
+ "content": "<DEPTH_61>",
687
+ "lstrip": false,
688
+ "normalized": true,
689
+ "rstrip": false,
690
+ "single_word": false,
691
+ "special": false
692
+ },
693
+ "151729": {
694
+ "content": "<DEPTH_62>",
695
+ "lstrip": false,
696
+ "normalized": true,
697
+ "rstrip": false,
698
+ "single_word": false,
699
+ "special": false
700
+ },
701
+ "151730": {
702
+ "content": "<DEPTH_63>",
703
+ "lstrip": false,
704
+ "normalized": true,
705
+ "rstrip": false,
706
+ "single_word": false,
707
+ "special": false
708
+ },
709
+ "151731": {
710
+ "content": "<DEPTH_64>",
711
+ "lstrip": false,
712
+ "normalized": true,
713
+ "rstrip": false,
714
+ "single_word": false,
715
+ "special": false
716
+ },
717
+ "151732": {
718
+ "content": "<DEPTH_65>",
719
+ "lstrip": false,
720
+ "normalized": true,
721
+ "rstrip": false,
722
+ "single_word": false,
723
+ "special": false
724
+ },
725
+ "151733": {
726
+ "content": "<DEPTH_66>",
727
+ "lstrip": false,
728
+ "normalized": true,
729
+ "rstrip": false,
730
+ "single_word": false,
731
+ "special": false
732
+ },
733
+ "151734": {
734
+ "content": "<DEPTH_67>",
735
+ "lstrip": false,
736
+ "normalized": true,
737
+ "rstrip": false,
738
+ "single_word": false,
739
+ "special": false
740
+ },
741
+ "151735": {
742
+ "content": "<DEPTH_68>",
743
+ "lstrip": false,
744
+ "normalized": true,
745
+ "rstrip": false,
746
+ "single_word": false,
747
+ "special": false
748
+ },
749
+ "151736": {
750
+ "content": "<DEPTH_69>",
751
+ "lstrip": false,
752
+ "normalized": true,
753
+ "rstrip": false,
754
+ "single_word": false,
755
+ "special": false
756
+ },
757
+ "151737": {
758
+ "content": "<DEPTH_70>",
759
+ "lstrip": false,
760
+ "normalized": true,
761
+ "rstrip": false,
762
+ "single_word": false,
763
+ "special": false
764
+ },
765
+ "151738": {
766
+ "content": "<DEPTH_71>",
767
+ "lstrip": false,
768
+ "normalized": true,
769
+ "rstrip": false,
770
+ "single_word": false,
771
+ "special": false
772
+ },
773
+ "151739": {
774
+ "content": "<DEPTH_72>",
775
+ "lstrip": false,
776
+ "normalized": true,
777
+ "rstrip": false,
778
+ "single_word": false,
779
+ "special": false
780
+ },
781
+ "151740": {
782
+ "content": "<DEPTH_73>",
783
+ "lstrip": false,
784
+ "normalized": true,
785
+ "rstrip": false,
786
+ "single_word": false,
787
+ "special": false
788
+ },
789
+ "151741": {
790
+ "content": "<DEPTH_74>",
791
+ "lstrip": false,
792
+ "normalized": true,
793
+ "rstrip": false,
794
+ "single_word": false,
795
+ "special": false
796
+ },
797
+ "151742": {
798
+ "content": "<DEPTH_75>",
799
+ "lstrip": false,
800
+ "normalized": true,
801
+ "rstrip": false,
802
+ "single_word": false,
803
+ "special": false
804
+ },
805
+ "151743": {
806
+ "content": "<DEPTH_76>",
807
+ "lstrip": false,
808
+ "normalized": true,
809
+ "rstrip": false,
810
+ "single_word": false,
811
+ "special": false
812
+ },
813
+ "151744": {
814
+ "content": "<DEPTH_77>",
815
+ "lstrip": false,
816
+ "normalized": true,
817
+ "rstrip": false,
818
+ "single_word": false,
819
+ "special": false
820
+ },
821
+ "151745": {
822
+ "content": "<DEPTH_78>",
823
+ "lstrip": false,
824
+ "normalized": true,
825
+ "rstrip": false,
826
+ "single_word": false,
827
+ "special": false
828
+ },
829
+ "151746": {
830
+ "content": "<DEPTH_79>",
831
+ "lstrip": false,
832
+ "normalized": true,
833
+ "rstrip": false,
834
+ "single_word": false,
835
+ "special": false
836
+ },
837
+ "151747": {
838
+ "content": "<DEPTH_80>",
839
+ "lstrip": false,
840
+ "normalized": true,
841
+ "rstrip": false,
842
+ "single_word": false,
843
+ "special": false
844
+ },
845
+ "151748": {
846
+ "content": "<DEPTH_81>",
847
+ "lstrip": false,
848
+ "normalized": true,
849
+ "rstrip": false,
850
+ "single_word": false,
851
+ "special": false
852
+ },
853
+ "151749": {
854
+ "content": "<DEPTH_82>",
855
+ "lstrip": false,
856
+ "normalized": true,
857
+ "rstrip": false,
858
+ "single_word": false,
859
+ "special": false
860
+ },
861
+ "151750": {
862
+ "content": "<DEPTH_83>",
863
+ "lstrip": false,
864
+ "normalized": true,
865
+ "rstrip": false,
866
+ "single_word": false,
867
+ "special": false
868
+ },
869
+ "151751": {
870
+ "content": "<DEPTH_84>",
871
+ "lstrip": false,
872
+ "normalized": true,
873
+ "rstrip": false,
874
+ "single_word": false,
875
+ "special": false
876
+ },
877
+ "151752": {
878
+ "content": "<DEPTH_85>",
879
+ "lstrip": false,
880
+ "normalized": true,
881
+ "rstrip": false,
882
+ "single_word": false,
883
+ "special": false
884
+ },
885
+ "151753": {
886
+ "content": "<DEPTH_86>",
887
+ "lstrip": false,
888
+ "normalized": true,
889
+ "rstrip": false,
890
+ "single_word": false,
891
+ "special": false
892
+ },
893
+ "151754": {
894
+ "content": "<DEPTH_87>",
895
+ "lstrip": false,
896
+ "normalized": true,
897
+ "rstrip": false,
898
+ "single_word": false,
899
+ "special": false
900
+ },
901
+ "151755": {
902
+ "content": "<DEPTH_88>",
903
+ "lstrip": false,
904
+ "normalized": true,
905
+ "rstrip": false,
906
+ "single_word": false,
907
+ "special": false
908
+ },
909
+ "151756": {
910
+ "content": "<DEPTH_89>",
911
+ "lstrip": false,
912
+ "normalized": true,
913
+ "rstrip": false,
914
+ "single_word": false,
915
+ "special": false
916
+ },
917
+ "151757": {
918
+ "content": "<DEPTH_90>",
919
+ "lstrip": false,
920
+ "normalized": true,
921
+ "rstrip": false,
922
+ "single_word": false,
923
+ "special": false
924
+ },
925
+ "151758": {
926
+ "content": "<DEPTH_91>",
927
+ "lstrip": false,
928
+ "normalized": true,
929
+ "rstrip": false,
930
+ "single_word": false,
931
+ "special": false
932
+ },
933
+ "151759": {
934
+ "content": "<DEPTH_92>",
935
+ "lstrip": false,
936
+ "normalized": true,
937
+ "rstrip": false,
938
+ "single_word": false,
939
+ "special": false
940
+ },
941
+ "151760": {
942
+ "content": "<DEPTH_93>",
943
+ "lstrip": false,
944
+ "normalized": true,
945
+ "rstrip": false,
946
+ "single_word": false,
947
+ "special": false
948
+ },
949
+ "151761": {
950
+ "content": "<DEPTH_94>",
951
+ "lstrip": false,
952
+ "normalized": true,
953
+ "rstrip": false,
954
+ "single_word": false,
955
+ "special": false
956
+ },
957
+ "151762": {
958
+ "content": "<DEPTH_95>",
959
+ "lstrip": false,
960
+ "normalized": true,
961
+ "rstrip": false,
962
+ "single_word": false,
963
+ "special": false
964
+ },
965
+ "151763": {
966
+ "content": "<DEPTH_96>",
967
+ "lstrip": false,
968
+ "normalized": true,
969
+ "rstrip": false,
970
+ "single_word": false,
971
+ "special": false
972
+ },
973
+ "151764": {
974
+ "content": "<DEPTH_97>",
975
+ "lstrip": false,
976
+ "normalized": true,
977
+ "rstrip": false,
978
+ "single_word": false,
979
+ "special": false
980
+ },
981
+ "151765": {
982
+ "content": "<DEPTH_98>",
983
+ "lstrip": false,
984
+ "normalized": true,
985
+ "rstrip": false,
986
+ "single_word": false,
987
+ "special": false
988
+ },
989
+ "151766": {
990
+ "content": "<DEPTH_99>",
991
+ "lstrip": false,
992
+ "normalized": true,
993
+ "rstrip": false,
994
+ "single_word": false,
995
+ "special": false
996
+ },
997
+ "151767": {
998
+ "content": "<DEPTH_100>",
999
+ "lstrip": false,
1000
+ "normalized": true,
1001
+ "rstrip": false,
1002
+ "single_word": false,
1003
+ "special": false
1004
+ },
1005
+ "151768": {
1006
+ "content": "<DEPTH_101>",
1007
+ "lstrip": false,
1008
+ "normalized": true,
1009
+ "rstrip": false,
1010
+ "single_word": false,
1011
+ "special": false
1012
+ },
1013
+ "151769": {
1014
+ "content": "<DEPTH_102>",
1015
+ "lstrip": false,
1016
+ "normalized": true,
1017
+ "rstrip": false,
1018
+ "single_word": false,
1019
+ "special": false
1020
+ },
1021
+ "151770": {
1022
+ "content": "<DEPTH_103>",
1023
+ "lstrip": false,
1024
+ "normalized": true,
1025
+ "rstrip": false,
1026
+ "single_word": false,
1027
+ "special": false
1028
+ },
1029
+ "151771": {
1030
+ "content": "<DEPTH_104>",
1031
+ "lstrip": false,
1032
+ "normalized": true,
1033
+ "rstrip": false,
1034
+ "single_word": false,
1035
+ "special": false
1036
+ },
1037
+ "151772": {
1038
+ "content": "<DEPTH_105>",
1039
+ "lstrip": false,
1040
+ "normalized": true,
1041
+ "rstrip": false,
1042
+ "single_word": false,
1043
+ "special": false
1044
+ },
1045
+ "151773": {
1046
+ "content": "<DEPTH_106>",
1047
+ "lstrip": false,
1048
+ "normalized": true,
1049
+ "rstrip": false,
1050
+ "single_word": false,
1051
+ "special": false
1052
+ },
1053
+ "151774": {
1054
+ "content": "<DEPTH_107>",
1055
+ "lstrip": false,
1056
+ "normalized": true,
1057
+ "rstrip": false,
1058
+ "single_word": false,
1059
+ "special": false
1060
+ },
1061
+ "151775": {
1062
+ "content": "<DEPTH_108>",
1063
+ "lstrip": false,
1064
+ "normalized": true,
1065
+ "rstrip": false,
1066
+ "single_word": false,
1067
+ "special": false
1068
+ },
1069
+ "151776": {
1070
+ "content": "<DEPTH_109>",
1071
+ "lstrip": false,
1072
+ "normalized": true,
1073
+ "rstrip": false,
1074
+ "single_word": false,
1075
+ "special": false
1076
+ },
1077
+ "151777": {
1078
+ "content": "<DEPTH_110>",
1079
+ "lstrip": false,
1080
+ "normalized": true,
1081
+ "rstrip": false,
1082
+ "single_word": false,
1083
+ "special": false
1084
+ },
1085
+ "151778": {
1086
+ "content": "<DEPTH_111>",
1087
+ "lstrip": false,
1088
+ "normalized": true,
1089
+ "rstrip": false,
1090
+ "single_word": false,
1091
+ "special": false
1092
+ },
1093
+ "151779": {
1094
+ "content": "<DEPTH_112>",
1095
+ "lstrip": false,
1096
+ "normalized": true,
1097
+ "rstrip": false,
1098
+ "single_word": false,
1099
+ "special": false
1100
+ },
1101
+ "151780": {
1102
+ "content": "<DEPTH_113>",
1103
+ "lstrip": false,
1104
+ "normalized": true,
1105
+ "rstrip": false,
1106
+ "single_word": false,
1107
+ "special": false
1108
+ },
1109
+ "151781": {
1110
+ "content": "<DEPTH_114>",
1111
+ "lstrip": false,
1112
+ "normalized": true,
1113
+ "rstrip": false,
1114
+ "single_word": false,
1115
+ "special": false
1116
+ },
1117
+ "151782": {
1118
+ "content": "<DEPTH_115>",
1119
+ "lstrip": false,
1120
+ "normalized": true,
1121
+ "rstrip": false,
1122
+ "single_word": false,
1123
+ "special": false
1124
+ },
1125
+ "151783": {
1126
+ "content": "<DEPTH_116>",
1127
+ "lstrip": false,
1128
+ "normalized": true,
1129
+ "rstrip": false,
1130
+ "single_word": false,
1131
+ "special": false
1132
+ },
1133
+ "151784": {
1134
+ "content": "<DEPTH_117>",
1135
+ "lstrip": false,
1136
+ "normalized": true,
1137
+ "rstrip": false,
1138
+ "single_word": false,
1139
+ "special": false
1140
+ },
1141
+ "151785": {
1142
+ "content": "<DEPTH_118>",
1143
+ "lstrip": false,
1144
+ "normalized": true,
1145
+ "rstrip": false,
1146
+ "single_word": false,
1147
+ "special": false
1148
+ },
1149
+ "151786": {
1150
+ "content": "<DEPTH_119>",
1151
+ "lstrip": false,
1152
+ "normalized": true,
1153
+ "rstrip": false,
1154
+ "single_word": false,
1155
+ "special": false
1156
+ },
1157
+ "151787": {
1158
+ "content": "<DEPTH_120>",
1159
+ "lstrip": false,
1160
+ "normalized": true,
1161
+ "rstrip": false,
1162
+ "single_word": false,
1163
+ "special": false
1164
+ },
1165
+ "151788": {
1166
+ "content": "<DEPTH_121>",
1167
+ "lstrip": false,
1168
+ "normalized": true,
1169
+ "rstrip": false,
1170
+ "single_word": false,
1171
+ "special": false
1172
+ },
1173
+ "151789": {
1174
+ "content": "<DEPTH_122>",
1175
+ "lstrip": false,
1176
+ "normalized": true,
1177
+ "rstrip": false,
1178
+ "single_word": false,
1179
+ "special": false
1180
+ },
1181
+ "151790": {
1182
+ "content": "<DEPTH_123>",
1183
+ "lstrip": false,
1184
+ "normalized": true,
1185
+ "rstrip": false,
1186
+ "single_word": false,
1187
+ "special": false
1188
+ },
1189
+ "151791": {
1190
+ "content": "<DEPTH_124>",
1191
+ "lstrip": false,
1192
+ "normalized": true,
1193
+ "rstrip": false,
1194
+ "single_word": false,
1195
+ "special": false
1196
+ },
1197
+ "151792": {
1198
+ "content": "<DEPTH_125>",
1199
+ "lstrip": false,
1200
+ "normalized": true,
1201
+ "rstrip": false,
1202
+ "single_word": false,
1203
+ "special": false
1204
+ },
1205
+ "151793": {
1206
+ "content": "<DEPTH_126>",
1207
+ "lstrip": false,
1208
+ "normalized": true,
1209
+ "rstrip": false,
1210
+ "single_word": false,
1211
+ "special": false
1212
+ },
1213
+ "151794": {
1214
+ "content": "<DEPTH_127>",
1215
+ "lstrip": false,
1216
+ "normalized": true,
1217
+ "rstrip": false,
1218
+ "single_word": false,
1219
+ "special": false
1220
+ },
1221
+ "151795": {
1222
+ "content": "|<EXTRA_TOKENS_0>|",
1223
+ "lstrip": false,
1224
+ "normalized": false,
1225
+ "rstrip": false,
1226
+ "single_word": false,
1227
+ "special": true
1228
+ },
1229
+ "151796": {
1230
+ "content": "|<EXTRA_TOKENS_1>|",
1231
+ "lstrip": false,
1232
+ "normalized": false,
1233
+ "rstrip": false,
1234
+ "single_word": false,
1235
+ "special": true
1236
+ },
1237
+ "151797": {
1238
+ "content": "|<EXTRA_TOKENS_2>|",
1239
+ "lstrip": false,
1240
+ "normalized": false,
1241
+ "rstrip": false,
1242
+ "single_word": false,
1243
+ "special": true
1244
+ },
1245
+ "151798": {
1246
+ "content": "|<EXTRA_TOKENS_3>|",
1247
+ "lstrip": false,
1248
+ "normalized": false,
1249
+ "rstrip": false,
1250
+ "single_word": false,
1251
+ "special": true
1252
+ },
1253
+ "151799": {
1254
+ "content": "|<EXTRA_TOKENS_4>|",
1255
+ "lstrip": false,
1256
+ "normalized": false,
1257
+ "rstrip": false,
1258
+ "single_word": false,
1259
+ "special": true
1260
+ },
1261
+ "151800": {
1262
+ "content": "|<EXTRA_TOKENS_5>|",
1263
+ "lstrip": false,
1264
+ "normalized": false,
1265
+ "rstrip": false,
1266
+ "single_word": false,
1267
+ "special": true
1268
+ },
1269
+ "151801": {
1270
+ "content": "|<EXTRA_TOKENS_6>|",
1271
+ "lstrip": false,
1272
+ "normalized": false,
1273
+ "rstrip": false,
1274
+ "single_word": false,
1275
+ "special": true
1276
+ },
1277
+ "151802": {
1278
+ "content": "|<EXTRA_TOKENS_7>|",
1279
+ "lstrip": false,
1280
+ "normalized": false,
1281
+ "rstrip": false,
1282
+ "single_word": false,
1283
+ "special": true
1284
+ },
1285
+ "151803": {
1286
+ "content": "|<EXTRA_TOKENS_8>|",
1287
+ "lstrip": false,
1288
+ "normalized": false,
1289
+ "rstrip": false,
1290
+ "single_word": false,
1291
+ "special": true
1292
+ },
1293
+ "151804": {
1294
+ "content": "|<EXTRA_TOKENS_9>|",
1295
+ "lstrip": false,
1296
+ "normalized": false,
1297
+ "rstrip": false,
1298
+ "single_word": false,
1299
+ "special": true
1300
+ },
1301
+ "151805": {
1302
+ "content": "|<EXTRA_TOKENS_10>|",
1303
+ "lstrip": false,
1304
+ "normalized": false,
1305
+ "rstrip": false,
1306
+ "single_word": false,
1307
+ "special": true
1308
+ },
1309
+ "151806": {
1310
+ "content": "|<EXTRA_TOKENS_11>|",
1311
+ "lstrip": false,
1312
+ "normalized": false,
1313
+ "rstrip": false,
1314
+ "single_word": false,
1315
+ "special": true
1316
+ },
1317
+ "151807": {
1318
+ "content": "|<EXTRA_TOKENS_12>|",
1319
+ "lstrip": false,
1320
+ "normalized": false,
1321
+ "rstrip": false,
1322
+ "single_word": false,
1323
+ "special": true
1324
+ },
1325
+ "151808": {
1326
+ "content": "|<EXTRA_TOKENS_13>|",
1327
+ "lstrip": false,
1328
+ "normalized": false,
1329
+ "rstrip": false,
1330
+ "single_word": false,
1331
+ "special": true
1332
+ },
1333
+ "151809": {
1334
+ "content": "|<EXTRA_TOKENS_14>|",
1335
+ "lstrip": false,
1336
+ "normalized": false,
1337
+ "rstrip": false,
1338
+ "single_word": false,
1339
+ "special": true
1340
+ },
1341
+ "151810": {
1342
+ "content": "|<EXTRA_TOKENS_15>|",
1343
+ "lstrip": false,
1344
+ "normalized": false,
1345
+ "rstrip": false,
1346
+ "single_word": false,
1347
+ "special": true
1348
+ },
1349
+ "151811": {
1350
+ "content": "|<EXTRA_TOKENS_16>|",
1351
+ "lstrip": false,
1352
+ "normalized": false,
1353
+ "rstrip": false,
1354
+ "single_word": false,
1355
+ "special": true
1356
+ },
1357
+ "151812": {
1358
+ "content": "|<EXTRA_TOKENS_17>|",
1359
+ "lstrip": false,
1360
+ "normalized": false,
1361
+ "rstrip": false,
1362
+ "single_word": false,
1363
+ "special": true
1364
+ },
1365
+ "151813": {
1366
+ "content": "|<EXTRA_TOKENS_18>|",
1367
+ "lstrip": false,
1368
+ "normalized": false,
1369
+ "rstrip": false,
1370
+ "single_word": false,
1371
+ "special": true
1372
+ },
1373
+ "151814": {
1374
+ "content": "|<EXTRA_TOKENS_19>|",
1375
+ "lstrip": false,
1376
+ "normalized": false,
1377
+ "rstrip": false,
1378
+ "single_word": false,
1379
+ "special": true
1380
+ },
1381
+ "151815": {
1382
+ "content": "|<EXTRA_TOKENS_20>|",
1383
+ "lstrip": false,
1384
+ "normalized": false,
1385
+ "rstrip": false,
1386
+ "single_word": false,
1387
+ "special": true
1388
+ },
1389
+ "151816": {
1390
+ "content": "|<EXTRA_TOKENS_21>|",
1391
+ "lstrip": false,
1392
+ "normalized": false,
1393
+ "rstrip": false,
1394
+ "single_word": false,
1395
+ "special": true
1396
+ },
1397
+ "151817": {
1398
+ "content": "|<EXTRA_TOKENS_22>|",
1399
+ "lstrip": false,
1400
+ "normalized": false,
1401
+ "rstrip": false,
1402
+ "single_word": false,
1403
+ "special": true
1404
+ },
1405
+ "151818": {
1406
+ "content": "|<EXTRA_TOKENS_23>|",
1407
+ "lstrip": false,
1408
+ "normalized": false,
1409
+ "rstrip": false,
1410
+ "single_word": false,
1411
+ "special": true
1412
+ },
1413
+ "151819": {
1414
+ "content": "|<EXTRA_TOKENS_24>|",
1415
+ "lstrip": false,
1416
+ "normalized": false,
1417
+ "rstrip": false,
1418
+ "single_word": false,
1419
+ "special": true
1420
+ },
1421
+ "151820": {
1422
+ "content": "|<EXTRA_TOKENS_25>|",
1423
+ "lstrip": false,
1424
+ "normalized": false,
1425
+ "rstrip": false,
1426
+ "single_word": false,
1427
+ "special": true
1428
+ },
1429
+ "151821": {
1430
+ "content": "|<EXTRA_TOKENS_26>|",
1431
+ "lstrip": false,
1432
+ "normalized": false,
1433
+ "rstrip": false,
1434
+ "single_word": false,
1435
+ "special": true
1436
+ },
1437
+ "151822": {
1438
+ "content": "|<EXTRA_TOKENS_27>|",
1439
+ "lstrip": false,
1440
+ "normalized": false,
1441
+ "rstrip": false,
1442
+ "single_word": false,
1443
+ "special": true
1444
+ },
1445
+ "151823": {
1446
+ "content": "|<EXTRA_TOKENS_28>|",
1447
+ "lstrip": false,
1448
+ "normalized": false,
1449
+ "rstrip": false,
1450
+ "single_word": false,
1451
+ "special": true
1452
+ },
1453
+ "151824": {
1454
+ "content": "|<EXTRA_TOKENS_29>|",
1455
+ "lstrip": false,
1456
+ "normalized": false,
1457
+ "rstrip": false,
1458
+ "single_word": false,
1459
+ "special": true
1460
+ },
1461
+ "151825": {
1462
+ "content": "|<EXTRA_TOKENS_30>|",
1463
+ "lstrip": false,
1464
+ "normalized": false,
1465
+ "rstrip": false,
1466
+ "single_word": false,
1467
+ "special": true
1468
+ },
1469
+ "151826": {
1470
+ "content": "|<EXTRA_TOKENS_31>|",
1471
+ "lstrip": false,
1472
+ "normalized": false,
1473
+ "rstrip": false,
1474
+ "single_word": false,
1475
+ "special": true
1476
+ },
1477
+ "151827": {
1478
+ "content": "|<EXTRA_TOKENS_32>|",
1479
+ "lstrip": false,
1480
+ "normalized": false,
1481
+ "rstrip": false,
1482
+ "single_word": false,
1483
+ "special": true
1484
+ },
1485
+ "151828": {
1486
+ "content": "|<EXTRA_TOKENS_33>|",
1487
+ "lstrip": false,
1488
+ "normalized": false,
1489
+ "rstrip": false,
1490
+ "single_word": false,
1491
+ "special": true
1492
+ },
1493
+ "151829": {
1494
+ "content": "|<EXTRA_TOKENS_34>|",
1495
+ "lstrip": false,
1496
+ "normalized": false,
1497
+ "rstrip": false,
1498
+ "single_word": false,
1499
+ "special": true
1500
+ },
1501
+ "151830": {
1502
+ "content": "|<EXTRA_TOKENS_35>|",
1503
+ "lstrip": false,
1504
+ "normalized": false,
1505
+ "rstrip": false,
1506
+ "single_word": false,
1507
+ "special": true
1508
+ },
1509
+ "151831": {
1510
+ "content": "|<EXTRA_TOKENS_36>|",
1511
+ "lstrip": false,
1512
+ "normalized": false,
1513
+ "rstrip": false,
1514
+ "single_word": false,
1515
+ "special": true
1516
+ },
1517
+ "151832": {
1518
+ "content": "|<EXTRA_TOKENS_37>|",
1519
+ "lstrip": false,
1520
+ "normalized": false,
1521
+ "rstrip": false,
1522
+ "single_word": false,
1523
+ "special": true
1524
+ },
1525
+ "151833": {
1526
+ "content": "|<EXTRA_TOKENS_38>|",
1527
+ "lstrip": false,
1528
+ "normalized": false,
1529
+ "rstrip": false,
1530
+ "single_word": false,
1531
+ "special": true
1532
+ },
1533
+ "151834": {
1534
+ "content": "|<EXTRA_TOKENS_39>|",
1535
+ "lstrip": false,
1536
+ "normalized": false,
1537
+ "rstrip": false,
1538
+ "single_word": false,
1539
+ "special": true
1540
+ },
1541
+ "151835": {
1542
+ "content": "|<EXTRA_TOKENS_40>|",
1543
+ "lstrip": false,
1544
+ "normalized": false,
1545
+ "rstrip": false,
1546
+ "single_word": false,
1547
+ "special": true
1548
+ },
1549
+ "151836": {
1550
+ "content": "|<EXTRA_TOKENS_41>|",
1551
+ "lstrip": false,
1552
+ "normalized": false,
1553
+ "rstrip": false,
1554
+ "single_word": false,
1555
+ "special": true
1556
+ },
1557
+ "151837": {
1558
+ "content": "|<EXTRA_TOKENS_42>|",
1559
+ "lstrip": false,
1560
+ "normalized": false,
1561
+ "rstrip": false,
1562
+ "single_word": false,
1563
+ "special": true
1564
+ },
1565
+ "151838": {
1566
+ "content": "|<EXTRA_TOKENS_43>|",
1567
+ "lstrip": false,
1568
+ "normalized": false,
1569
+ "rstrip": false,
1570
+ "single_word": false,
1571
+ "special": true
1572
+ },
1573
+ "151839": {
1574
+ "content": "|<EXTRA_TOKENS_44>|",
1575
+ "lstrip": false,
1576
+ "normalized": false,
1577
+ "rstrip": false,
1578
+ "single_word": false,
1579
+ "special": true
1580
+ },
1581
+ "151840": {
1582
+ "content": "|<EXTRA_TOKENS_45>|",
1583
+ "lstrip": false,
1584
+ "normalized": false,
1585
+ "rstrip": false,
1586
+ "single_word": false,
1587
+ "special": true
1588
+ },
1589
+ "151841": {
1590
+ "content": "|<EXTRA_TOKENS_46>|",
1591
+ "lstrip": false,
1592
+ "normalized": false,
1593
+ "rstrip": false,
1594
+ "single_word": false,
1595
+ "special": true
1596
+ },
1597
+ "151842": {
1598
+ "content": "|<EXTRA_TOKENS_47>|",
1599
+ "lstrip": false,
1600
+ "normalized": false,
1601
+ "rstrip": false,
1602
+ "single_word": false,
1603
+ "special": true
1604
+ },
1605
+ "151843": {
1606
+ "content": "|<EXTRA_TOKENS_48>|",
1607
+ "lstrip": false,
1608
+ "normalized": false,
1609
+ "rstrip": false,
1610
+ "single_word": false,
1611
+ "special": true
1612
+ },
1613
+ "151844": {
1614
+ "content": "|<EXTRA_TOKENS_49>|",
1615
+ "lstrip": false,
1616
+ "normalized": false,
1617
+ "rstrip": false,
1618
+ "single_word": false,
1619
+ "special": true
1620
+ },
1621
+ "151845": {
1622
+ "content": "|<EXTRA_TOKENS_50>|",
1623
+ "lstrip": false,
1624
+ "normalized": false,
1625
+ "rstrip": false,
1626
+ "single_word": false,
1627
+ "special": true
1628
+ },
1629
+ "151846": {
1630
+ "content": "|<EXTRA_TOKENS_51>|",
1631
+ "lstrip": false,
1632
+ "normalized": false,
1633
+ "rstrip": false,
1634
+ "single_word": false,
1635
+ "special": true
1636
+ },
1637
+ "151847": {
1638
+ "content": "|<EXTRA_TOKENS_52>|",
1639
+ "lstrip": false,
1640
+ "normalized": false,
1641
+ "rstrip": false,
1642
+ "single_word": false,
1643
+ "special": true
1644
+ },
1645
+ "151848": {
1646
+ "content": "|<EXTRA_TOKENS_53>|",
1647
+ "lstrip": false,
1648
+ "normalized": false,
1649
+ "rstrip": false,
1650
+ "single_word": false,
1651
+ "special": true
1652
+ },
1653
+ "151849": {
1654
+ "content": "|<EXTRA_TOKENS_54>|",
1655
+ "lstrip": false,
1656
+ "normalized": false,
1657
+ "rstrip": false,
1658
+ "single_word": false,
1659
+ "special": true
1660
+ },
1661
+ "151850": {
1662
+ "content": "|<EXTRA_TOKENS_55>|",
1663
+ "lstrip": false,
1664
+ "normalized": false,
1665
+ "rstrip": false,
1666
+ "single_word": false,
1667
+ "special": true
1668
+ },
1669
+ "151851": {
1670
+ "content": "|<EXTRA_TOKENS_56>|",
1671
+ "lstrip": false,
1672
+ "normalized": false,
1673
+ "rstrip": false,
1674
+ "single_word": false,
1675
+ "special": true
1676
+ },
1677
+ "151852": {
1678
+ "content": "|<EXTRA_TOKENS_57>|",
1679
+ "lstrip": false,
1680
+ "normalized": false,
1681
+ "rstrip": false,
1682
+ "single_word": false,
1683
+ "special": true
1684
+ },
1685
+ "151853": {
1686
+ "content": "|<EXTRA_TOKENS_58>|",
1687
+ "lstrip": false,
1688
+ "normalized": false,
1689
+ "rstrip": false,
1690
+ "single_word": false,
1691
+ "special": true
1692
+ },
1693
+ "151854": {
1694
+ "content": "|<EXTRA_TOKENS_59>|",
1695
+ "lstrip": false,
1696
+ "normalized": false,
1697
+ "rstrip": false,
1698
+ "single_word": false,
1699
+ "special": true
1700
+ },
1701
+ "151855": {
1702
+ "content": "|<EXTRA_TOKENS_60>|",
1703
+ "lstrip": false,
1704
+ "normalized": false,
1705
+ "rstrip": false,
1706
+ "single_word": false,
1707
+ "special": true
1708
+ },
1709
+ "151856": {
1710
+ "content": "|<EXTRA_TOKENS_61>|",
1711
+ "lstrip": false,
1712
+ "normalized": false,
1713
+ "rstrip": false,
1714
+ "single_word": false,
1715
+ "special": true
1716
+ },
1717
+ "151857": {
1718
+ "content": "|<EXTRA_TOKENS_62>|",
1719
+ "lstrip": false,
1720
+ "normalized": false,
1721
+ "rstrip": false,
1722
+ "single_word": false,
1723
+ "special": true
1724
+ },
1725
+ "151858": {
1726
+ "content": "|<EXTRA_TOKENS_63>|",
1727
+ "lstrip": false,
1728
+ "normalized": false,
1729
+ "rstrip": false,
1730
+ "single_word": false,
1731
+ "special": true
1732
+ },
1733
+ "151859": {
1734
+ "content": "|<EXTRA_TOKENS_64>|",
1735
+ "lstrip": false,
1736
+ "normalized": false,
1737
+ "rstrip": false,
1738
+ "single_word": false,
1739
+ "special": true
1740
+ },
1741
+ "151860": {
1742
+ "content": "|<EXTRA_TOKENS_65>|",
1743
+ "lstrip": false,
1744
+ "normalized": false,
1745
+ "rstrip": false,
1746
+ "single_word": false,
1747
+ "special": true
1748
+ },
1749
+ "151861": {
1750
+ "content": "|<EXTRA_TOKENS_66>|",
1751
+ "lstrip": false,
1752
+ "normalized": false,
1753
+ "rstrip": false,
1754
+ "single_word": false,
1755
+ "special": true
1756
+ },
1757
+ "151862": {
1758
+ "content": "|<EXTRA_TOKENS_67>|",
1759
+ "lstrip": false,
1760
+ "normalized": false,
1761
+ "rstrip": false,
1762
+ "single_word": false,
1763
+ "special": true
1764
+ },
1765
+ "151863": {
1766
+ "content": "|<EXTRA_TOKENS_68>|",
1767
+ "lstrip": false,
1768
+ "normalized": false,
1769
+ "rstrip": false,
1770
+ "single_word": false,
1771
+ "special": true
1772
+ },
1773
+ "151864": {
1774
+ "content": "|<EXTRA_TOKENS_69>|",
1775
+ "lstrip": false,
1776
+ "normalized": false,
1777
+ "rstrip": false,
1778
+ "single_word": false,
1779
+ "special": true
1780
+ },
1781
+ "151865": {
1782
+ "content": "|<EXTRA_TOKENS_70>|",
1783
+ "lstrip": false,
1784
+ "normalized": false,
1785
+ "rstrip": false,
1786
+ "single_word": false,
1787
+ "special": true
1788
+ },
1789
+ "151866": {
1790
+ "content": "|<EXTRA_TOKENS_71>|",
1791
+ "lstrip": false,
1792
+ "normalized": false,
1793
+ "rstrip": false,
1794
+ "single_word": false,
1795
+ "special": true
1796
+ },
1797
+ "151867": {
1798
+ "content": "|<EXTRA_TOKENS_72>|",
1799
+ "lstrip": false,
1800
+ "normalized": false,
1801
+ "rstrip": false,
1802
+ "single_word": false,
1803
+ "special": true
1804
+ },
1805
+ "151868": {
1806
+ "content": "|<EXTRA_TOKENS_73>|",
1807
+ "lstrip": false,
1808
+ "normalized": false,
1809
+ "rstrip": false,
1810
+ "single_word": false,
1811
+ "special": true
1812
+ },
1813
+ "151869": {
1814
+ "content": "|<EXTRA_TOKENS_74>|",
1815
+ "lstrip": false,
1816
+ "normalized": false,
1817
+ "rstrip": false,
1818
+ "single_word": false,
1819
+ "special": true
1820
+ },
1821
+ "151870": {
1822
+ "content": "|<EXTRA_TOKENS_75>|",
1823
+ "lstrip": false,
1824
+ "normalized": false,
1825
+ "rstrip": false,
1826
+ "single_word": false,
1827
+ "special": true
1828
+ },
1829
+ "151871": {
1830
+ "content": "|<EXTRA_TOKENS_76>|",
1831
+ "lstrip": false,
1832
+ "normalized": false,
1833
+ "rstrip": false,
1834
+ "single_word": false,
1835
+ "special": true
1836
+ },
1837
+ "151872": {
1838
+ "content": "|<EXTRA_TOKENS_77>|",
1839
+ "lstrip": false,
1840
+ "normalized": false,
1841
+ "rstrip": false,
1842
+ "single_word": false,
1843
+ "special": true
1844
+ },
1845
+ "151873": {
1846
+ "content": "|<EXTRA_TOKENS_78>|",
1847
+ "lstrip": false,
1848
+ "normalized": false,
1849
+ "rstrip": false,
1850
+ "single_word": false,
1851
+ "special": true
1852
+ },
1853
+ "151874": {
1854
+ "content": "|<EXTRA_TOKENS_79>|",
1855
+ "lstrip": false,
1856
+ "normalized": false,
1857
+ "rstrip": false,
1858
+ "single_word": false,
1859
+ "special": true
1860
+ },
1861
+ "151875": {
1862
+ "content": "|<EXTRA_TOKENS_80>|",
1863
+ "lstrip": false,
1864
+ "normalized": false,
1865
+ "rstrip": false,
1866
+ "single_word": false,
1867
+ "special": true
1868
+ },
1869
+ "151876": {
1870
+ "content": "|<EXTRA_TOKENS_81>|",
1871
+ "lstrip": false,
1872
+ "normalized": false,
1873
+ "rstrip": false,
1874
+ "single_word": false,
1875
+ "special": true
1876
+ },
1877
+ "151877": {
1878
+ "content": "|<EXTRA_TOKENS_82>|",
1879
+ "lstrip": false,
1880
+ "normalized": false,
1881
+ "rstrip": false,
1882
+ "single_word": false,
1883
+ "special": true
1884
+ },
1885
+ "151878": {
1886
+ "content": "|<EXTRA_TOKENS_83>|",
1887
+ "lstrip": false,
1888
+ "normalized": false,
1889
+ "rstrip": false,
1890
+ "single_word": false,
1891
+ "special": true
1892
+ },
1893
+ "151879": {
1894
+ "content": "|<EXTRA_TOKENS_84>|",
1895
+ "lstrip": false,
1896
+ "normalized": false,
1897
+ "rstrip": false,
1898
+ "single_word": false,
1899
+ "special": true
1900
+ },
1901
+ "151880": {
1902
+ "content": "|<EXTRA_TOKENS_85>|",
1903
+ "lstrip": false,
1904
+ "normalized": false,
1905
+ "rstrip": false,
1906
+ "single_word": false,
1907
+ "special": true
1908
+ },
1909
+ "151881": {
1910
+ "content": "|<EXTRA_TOKENS_86>|",
1911
+ "lstrip": false,
1912
+ "normalized": false,
1913
+ "rstrip": false,
1914
+ "single_word": false,
1915
+ "special": true
1916
+ },
1917
+ "151882": {
1918
+ "content": "|<EXTRA_TOKENS_87>|",
1919
+ "lstrip": false,
1920
+ "normalized": false,
1921
+ "rstrip": false,
1922
+ "single_word": false,
1923
+ "special": true
1924
+ },
1925
+ "151883": {
1926
+ "content": "|<EXTRA_TOKENS_88>|",
1927
+ "lstrip": false,
1928
+ "normalized": false,
1929
+ "rstrip": false,
1930
+ "single_word": false,
1931
+ "special": true
1932
+ },
1933
+ "151884": {
1934
+ "content": "|<EXTRA_TOKENS_89>|",
1935
+ "lstrip": false,
1936
+ "normalized": false,
1937
+ "rstrip": false,
1938
+ "single_word": false,
1939
+ "special": true
1940
+ },
1941
+ "151885": {
1942
+ "content": "|<EXTRA_TOKENS_90>|",
1943
+ "lstrip": false,
1944
+ "normalized": false,
1945
+ "rstrip": false,
1946
+ "single_word": false,
1947
+ "special": true
1948
+ },
1949
+ "151886": {
1950
+ "content": "|<EXTRA_TOKENS_91>|",
1951
+ "lstrip": false,
1952
+ "normalized": false,
1953
+ "rstrip": false,
1954
+ "single_word": false,
1955
+ "special": true
1956
+ },
1957
+ "151887": {
1958
+ "content": "|<EXTRA_TOKENS_92>|",
1959
+ "lstrip": false,
1960
+ "normalized": false,
1961
+ "rstrip": false,
1962
+ "single_word": false,
1963
+ "special": true
1964
+ },
1965
+ "151888": {
1966
+ "content": "|<EXTRA_TOKENS_93>|",
1967
+ "lstrip": false,
1968
+ "normalized": false,
1969
+ "rstrip": false,
1970
+ "single_word": false,
1971
+ "special": true
1972
+ },
1973
+ "151889": {
1974
+ "content": "|<EXTRA_TOKENS_94>|",
1975
+ "lstrip": false,
1976
+ "normalized": false,
1977
+ "rstrip": false,
1978
+ "single_word": false,
1979
+ "special": true
1980
+ },
1981
+ "151890": {
1982
+ "content": "|<EXTRA_TOKENS_95>|",
1983
+ "lstrip": false,
1984
+ "normalized": false,
1985
+ "rstrip": false,
1986
+ "single_word": false,
1987
+ "special": true
1988
+ },
1989
+ "151891": {
1990
+ "content": "|<EXTRA_TOKENS_96>|",
1991
+ "lstrip": false,
1992
+ "normalized": false,
1993
+ "rstrip": false,
1994
+ "single_word": false,
1995
+ "special": true
1996
+ },
1997
+ "151892": {
1998
+ "content": "|<EXTRA_TOKENS_97>|",
1999
+ "lstrip": false,
2000
+ "normalized": false,
2001
+ "rstrip": false,
2002
+ "single_word": false,
2003
+ "special": true
2004
+ },
2005
+ "151893": {
2006
+ "content": "|<EXTRA_TOKENS_98>|",
2007
+ "lstrip": false,
2008
+ "normalized": false,
2009
+ "rstrip": false,
2010
+ "single_word": false,
2011
+ "special": true
2012
+ },
2013
+ "151894": {
2014
+ "content": "|<EXTRA_TOKENS_99>|",
2015
+ "lstrip": false,
2016
+ "normalized": false,
2017
+ "rstrip": false,
2018
+ "single_word": false,
2019
+ "special": true
2020
+ },
2021
+ "151895": {
2022
+ "content": "|<EXTRA_TOKENS_100>|",
2023
+ "lstrip": false,
2024
+ "normalized": false,
2025
+ "rstrip": false,
2026
+ "single_word": false,
2027
+ "special": true
2028
+ },
2029
+ "151896": {
2030
+ "content": "|<EXTRA_TOKENS_101>|",
2031
+ "lstrip": false,
2032
+ "normalized": false,
2033
+ "rstrip": false,
2034
+ "single_word": false,
2035
+ "special": true
2036
+ },
2037
+ "151897": {
2038
+ "content": "|<EXTRA_TOKENS_102>|",
2039
+ "lstrip": false,
2040
+ "normalized": false,
2041
+ "rstrip": false,
2042
+ "single_word": false,
2043
+ "special": true
2044
+ },
2045
+ "151898": {
2046
+ "content": "|<EXTRA_TOKENS_103>|",
2047
+ "lstrip": false,
2048
+ "normalized": false,
2049
+ "rstrip": false,
2050
+ "single_word": false,
2051
+ "special": true
2052
+ },
2053
+ "151899": {
2054
+ "content": "|<EXTRA_TOKENS_104>|",
2055
+ "lstrip": false,
2056
+ "normalized": false,
2057
+ "rstrip": false,
2058
+ "single_word": false,
2059
+ "special": true
2060
+ },
2061
+ "151900": {
2062
+ "content": "|<EXTRA_TOKENS_105>|",
2063
+ "lstrip": false,
2064
+ "normalized": false,
2065
+ "rstrip": false,
2066
+ "single_word": false,
2067
+ "special": true
2068
+ },
2069
+ "151901": {
2070
+ "content": "|<EXTRA_TOKENS_106>|",
2071
+ "lstrip": false,
2072
+ "normalized": false,
2073
+ "rstrip": false,
2074
+ "single_word": false,
2075
+ "special": true
2076
+ },
2077
+ "151902": {
2078
+ "content": "|<EXTRA_TOKENS_107>|",
2079
+ "lstrip": false,
2080
+ "normalized": false,
2081
+ "rstrip": false,
2082
+ "single_word": false,
2083
+ "special": true
2084
+ },
2085
+ "151903": {
2086
+ "content": "|<EXTRA_TOKENS_108>|",
2087
+ "lstrip": false,
2088
+ "normalized": false,
2089
+ "rstrip": false,
2090
+ "single_word": false,
2091
+ "special": true
2092
+ },
2093
+ "151904": {
2094
+ "content": "|<EXTRA_TOKENS_109>|",
2095
+ "lstrip": false,
2096
+ "normalized": false,
2097
+ "rstrip": false,
2098
+ "single_word": false,
2099
+ "special": true
2100
+ },
2101
+ "151905": {
2102
+ "content": "|<EXTRA_TOKENS_110>|",
2103
+ "lstrip": false,
2104
+ "normalized": false,
2105
+ "rstrip": false,
2106
+ "single_word": false,
2107
+ "special": true
2108
+ },
2109
+ "151906": {
2110
+ "content": "|<EXTRA_TOKENS_111>|",
2111
+ "lstrip": false,
2112
+ "normalized": false,
2113
+ "rstrip": false,
2114
+ "single_word": false,
2115
+ "special": true
2116
+ },
2117
+ "151907": {
2118
+ "content": "|<EXTRA_TOKENS_112>|",
2119
+ "lstrip": false,
2120
+ "normalized": false,
2121
+ "rstrip": false,
2122
+ "single_word": false,
2123
+ "special": true
2124
+ },
2125
+ "151908": {
2126
+ "content": "|<EXTRA_TOKENS_113>|",
2127
+ "lstrip": false,
2128
+ "normalized": false,
2129
+ "rstrip": false,
2130
+ "single_word": false,
2131
+ "special": true
2132
+ },
2133
+ "151909": {
2134
+ "content": "|<EXTRA_TOKENS_114>|",
2135
+ "lstrip": false,
2136
+ "normalized": false,
2137
+ "rstrip": false,
2138
+ "single_word": false,
2139
+ "special": true
2140
+ },
2141
+ "151910": {
2142
+ "content": "|<EXTRA_TOKENS_115>|",
2143
+ "lstrip": false,
2144
+ "normalized": false,
2145
+ "rstrip": false,
2146
+ "single_word": false,
2147
+ "special": true
2148
+ },
2149
+ "151911": {
2150
+ "content": "|<EXTRA_TOKENS_116>|",
2151
+ "lstrip": false,
2152
+ "normalized": false,
2153
+ "rstrip": false,
2154
+ "single_word": false,
2155
+ "special": true
2156
+ },
2157
+ "151912": {
2158
+ "content": "|<EXTRA_TOKENS_117>|",
2159
+ "lstrip": false,
2160
+ "normalized": false,
2161
+ "rstrip": false,
2162
+ "single_word": false,
2163
+ "special": true
2164
+ },
2165
+ "151913": {
2166
+ "content": "|<EXTRA_TOKENS_118>|",
2167
+ "lstrip": false,
2168
+ "normalized": false,
2169
+ "rstrip": false,
2170
+ "single_word": false,
2171
+ "special": true
2172
+ },
2173
+ "151914": {
2174
+ "content": "|<EXTRA_TOKENS_119>|",
2175
+ "lstrip": false,
2176
+ "normalized": false,
2177
+ "rstrip": false,
2178
+ "single_word": false,
2179
+ "special": true
2180
+ },
2181
+ "151915": {
2182
+ "content": "|<EXTRA_TOKENS_120>|",
2183
+ "lstrip": false,
2184
+ "normalized": false,
2185
+ "rstrip": false,
2186
+ "single_word": false,
2187
+ "special": true
2188
+ },
2189
+ "151916": {
2190
+ "content": "|<EXTRA_TOKENS_121>|",
2191
+ "lstrip": false,
2192
+ "normalized": false,
2193
+ "rstrip": false,
2194
+ "single_word": false,
2195
+ "special": true
2196
+ },
2197
+ "151917": {
2198
+ "content": "|<EXTRA_TOKENS_122>|",
2199
+ "lstrip": false,
2200
+ "normalized": false,
2201
+ "rstrip": false,
2202
+ "single_word": false,
2203
+ "special": true
2204
+ },
2205
+ "151918": {
2206
+ "content": "|<EXTRA_TOKENS_123>|",
2207
+ "lstrip": false,
2208
+ "normalized": false,
2209
+ "rstrip": false,
2210
+ "single_word": false,
2211
+ "special": true
2212
+ },
2213
+ "151919": {
2214
+ "content": "|<EXTRA_TOKENS_124>|",
2215
+ "lstrip": false,
2216
+ "normalized": false,
2217
+ "rstrip": false,
2218
+ "single_word": false,
2219
+ "special": true
2220
+ },
2221
+ "151920": {
2222
+ "content": "|<EXTRA_TOKENS_125>|",
2223
+ "lstrip": false,
2224
+ "normalized": false,
2225
+ "rstrip": false,
2226
+ "single_word": false,
2227
+ "special": true
2228
+ },
2229
+ "151921": {
2230
+ "content": "|<EXTRA_TOKENS_126>|",
2231
+ "lstrip": false,
2232
+ "normalized": false,
2233
+ "rstrip": false,
2234
+ "single_word": false,
2235
+ "special": true
2236
+ },
2237
+ "151922": {
2238
+ "content": "|<EXTRA_TOKENS_127>|",
2239
+ "lstrip": false,
2240
+ "normalized": false,
2241
+ "rstrip": false,
2242
+ "single_word": false,
2243
+ "special": true
2244
+ },
2245
+ "151923": {
2246
+ "content": "|<EXTRA_TOKENS_128>|",
2247
+ "lstrip": false,
2248
+ "normalized": false,
2249
+ "rstrip": false,
2250
+ "single_word": false,
2251
+ "special": true
2252
+ },
2253
+ "151924": {
2254
+ "content": "|<EXTRA_TOKENS_129>|",
2255
+ "lstrip": false,
2256
+ "normalized": false,
2257
+ "rstrip": false,
2258
+ "single_word": false,
2259
+ "special": true
2260
+ },
2261
+ "151925": {
2262
+ "content": "|<EXTRA_TOKENS_130>|",
2263
+ "lstrip": false,
2264
+ "normalized": false,
2265
+ "rstrip": false,
2266
+ "single_word": false,
2267
+ "special": true
2268
+ },
2269
+ "151926": {
2270
+ "content": "|<EXTRA_TOKENS_131>|",
2271
+ "lstrip": false,
2272
+ "normalized": false,
2273
+ "rstrip": false,
2274
+ "single_word": false,
2275
+ "special": true
2276
+ },
2277
+ "151927": {
2278
+ "content": "|<EXTRA_TOKENS_132>|",
2279
+ "lstrip": false,
2280
+ "normalized": false,
2281
+ "rstrip": false,
2282
+ "single_word": false,
2283
+ "special": true
2284
+ },
2285
+ "151928": {
2286
+ "content": "|<EXTRA_TOKENS_133>|",
2287
+ "lstrip": false,
2288
+ "normalized": false,
2289
+ "rstrip": false,
2290
+ "single_word": false,
2291
+ "special": true
2292
+ },
2293
+ "151929": {
2294
+ "content": "|<EXTRA_TOKENS_134>|",
2295
+ "lstrip": false,
2296
+ "normalized": false,
2297
+ "rstrip": false,
2298
+ "single_word": false,
2299
+ "special": true
2300
+ },
2301
+ "151930": {
2302
+ "content": "|<EXTRA_TOKENS_135>|",
2303
+ "lstrip": false,
2304
+ "normalized": false,
2305
+ "rstrip": false,
2306
+ "single_word": false,
2307
+ "special": true
2308
+ },
2309
+ "151931": {
2310
+ "content": "|<EXTRA_TOKENS_136>|",
2311
+ "lstrip": false,
2312
+ "normalized": false,
2313
+ "rstrip": false,
2314
+ "single_word": false,
2315
+ "special": true
2316
+ },
2317
+ "151932": {
2318
+ "content": "|<EXTRA_TOKENS_137>|",
2319
+ "lstrip": false,
2320
+ "normalized": false,
2321
+ "rstrip": false,
2322
+ "single_word": false,
2323
+ "special": true
2324
+ },
2325
+ "151933": {
2326
+ "content": "|<EXTRA_TOKENS_138>|",
2327
+ "lstrip": false,
2328
+ "normalized": false,
2329
+ "rstrip": false,
2330
+ "single_word": false,
2331
+ "special": true
2332
+ },
2333
+ "151934": {
2334
+ "content": "|<EXTRA_TOKENS_139>|",
2335
+ "lstrip": false,
2336
+ "normalized": false,
2337
+ "rstrip": false,
2338
+ "single_word": false,
2339
+ "special": true
2340
+ },
2341
+ "151935": {
2342
+ "content": "|<EXTRA_TOKENS_140>|",
2343
+ "lstrip": false,
2344
+ "normalized": false,
2345
+ "rstrip": false,
2346
+ "single_word": false,
2347
+ "special": true
2348
+ },
2349
+ "151936": {
2350
+ "content": "|<EXTRA_TOKENS_141>|",
2351
+ "lstrip": false,
2352
+ "normalized": false,
2353
+ "rstrip": false,
2354
+ "single_word": false,
2355
+ "special": true
2356
+ },
2357
+ "151937": {
2358
+ "content": "|<EXTRA_TOKENS_142>|",
2359
+ "lstrip": false,
2360
+ "normalized": false,
2361
+ "rstrip": false,
2362
+ "single_word": false,
2363
+ "special": true
2364
+ },
2365
+ "151938": {
2366
+ "content": "|<EXTRA_TOKENS_143>|",
2367
+ "lstrip": false,
2368
+ "normalized": false,
2369
+ "rstrip": false,
2370
+ "single_word": false,
2371
+ "special": true
2372
+ },
2373
+ "151939": {
2374
+ "content": "|<EXTRA_TOKENS_144>|",
2375
+ "lstrip": false,
2376
+ "normalized": false,
2377
+ "rstrip": false,
2378
+ "single_word": false,
2379
+ "special": true
2380
+ },
2381
+ "151940": {
2382
+ "content": "|<EXTRA_TOKENS_145>|",
2383
+ "lstrip": false,
2384
+ "normalized": false,
2385
+ "rstrip": false,
2386
+ "single_word": false,
2387
+ "special": true
2388
+ },
2389
+ "151941": {
2390
+ "content": "|<EXTRA_TOKENS_146>|",
2391
+ "lstrip": false,
2392
+ "normalized": false,
2393
+ "rstrip": false,
2394
+ "single_word": false,
2395
+ "special": true
2396
+ },
2397
+ "151942": {
2398
+ "content": "|<EXTRA_TOKENS_147>|",
2399
+ "lstrip": false,
2400
+ "normalized": false,
2401
+ "rstrip": false,
2402
+ "single_word": false,
2403
+ "special": true
2404
+ },
2405
+ "151943": {
2406
+ "content": "|<EXTRA_TOKENS_148>|",
2407
+ "lstrip": false,
2408
+ "normalized": false,
2409
+ "rstrip": false,
2410
+ "single_word": false,
2411
+ "special": true
2412
+ },
2413
+ "151944": {
2414
+ "content": "|<EXTRA_TOKENS_149>|",
2415
+ "lstrip": false,
2416
+ "normalized": false,
2417
+ "rstrip": false,
2418
+ "single_word": false,
2419
+ "special": true
2420
+ },
2421
+ "151945": {
2422
+ "content": "|<EXTRA_TOKENS_150>|",
2423
+ "lstrip": false,
2424
+ "normalized": false,
2425
+ "rstrip": false,
2426
+ "single_word": false,
2427
+ "special": true
2428
+ },
2429
+ "151946": {
2430
+ "content": "|<EXTRA_TOKENS_151>|",
2431
+ "lstrip": false,
2432
+ "normalized": false,
2433
+ "rstrip": false,
2434
+ "single_word": false,
2435
+ "special": true
2436
+ },
2437
+ "151947": {
2438
+ "content": "|<EXTRA_TOKENS_152>|",
2439
+ "lstrip": false,
2440
+ "normalized": false,
2441
+ "rstrip": false,
2442
+ "single_word": false,
2443
+ "special": true
2444
+ },
2445
+ "151948": {
2446
+ "content": "|<EXTRA_TOKENS_153>|",
2447
+ "lstrip": false,
2448
+ "normalized": false,
2449
+ "rstrip": false,
2450
+ "single_word": false,
2451
+ "special": true
2452
+ },
2453
+ "151949": {
2454
+ "content": "|<EXTRA_TOKENS_154>|",
2455
+ "lstrip": false,
2456
+ "normalized": false,
2457
+ "rstrip": false,
2458
+ "single_word": false,
2459
+ "special": true
2460
+ },
2461
+ "151950": {
2462
+ "content": "|<EXTRA_TOKENS_155>|",
2463
+ "lstrip": false,
2464
+ "normalized": false,
2465
+ "rstrip": false,
2466
+ "single_word": false,
2467
+ "special": true
2468
+ },
2469
+ "151951": {
2470
+ "content": "|<EXTRA_TOKENS_156>|",
2471
+ "lstrip": false,
2472
+ "normalized": false,
2473
+ "rstrip": false,
2474
+ "single_word": false,
2475
+ "special": true
2476
+ },
2477
+ "151952": {
2478
+ "content": "|<EXTRA_TOKENS_157>|",
2479
+ "lstrip": false,
2480
+ "normalized": false,
2481
+ "rstrip": false,
2482
+ "single_word": false,
2483
+ "special": true
2484
+ },
2485
+ "151953": {
2486
+ "content": "|<EXTRA_TOKENS_158>|",
2487
+ "lstrip": false,
2488
+ "normalized": false,
2489
+ "rstrip": false,
2490
+ "single_word": false,
2491
+ "special": true
2492
+ },
2493
+ "151954": {
2494
+ "content": "|<EXTRA_TOKENS_159>|",
2495
+ "lstrip": false,
2496
+ "normalized": false,
2497
+ "rstrip": false,
2498
+ "single_word": false,
2499
+ "special": true
2500
+ },
2501
+ "151955": {
2502
+ "content": "|<EXTRA_TOKENS_160>|",
2503
+ "lstrip": false,
2504
+ "normalized": false,
2505
+ "rstrip": false,
2506
+ "single_word": false,
2507
+ "special": true
2508
+ },
2509
+ "151956": {
2510
+ "content": "|<EXTRA_TOKENS_161>|",
2511
+ "lstrip": false,
2512
+ "normalized": false,
2513
+ "rstrip": false,
2514
+ "single_word": false,
2515
+ "special": true
2516
+ },
2517
+ "151957": {
2518
+ "content": "|<EXTRA_TOKENS_162>|",
2519
+ "lstrip": false,
2520
+ "normalized": false,
2521
+ "rstrip": false,
2522
+ "single_word": false,
2523
+ "special": true
2524
+ },
2525
+ "151958": {
2526
+ "content": "|<EXTRA_TOKENS_163>|",
2527
+ "lstrip": false,
2528
+ "normalized": false,
2529
+ "rstrip": false,
2530
+ "single_word": false,
2531
+ "special": true
2532
+ },
2533
+ "151959": {
2534
+ "content": "|<EXTRA_TOKENS_164>|",
2535
+ "lstrip": false,
2536
+ "normalized": false,
2537
+ "rstrip": false,
2538
+ "single_word": false,
2539
+ "special": true
2540
+ },
2541
+ "151960": {
2542
+ "content": "|<EXTRA_TOKENS_165>|",
2543
+ "lstrip": false,
2544
+ "normalized": false,
2545
+ "rstrip": false,
2546
+ "single_word": false,
2547
+ "special": true
2548
+ },
2549
+ "151961": {
2550
+ "content": "|<EXTRA_TOKENS_166>|",
2551
+ "lstrip": false,
2552
+ "normalized": false,
2553
+ "rstrip": false,
2554
+ "single_word": false,
2555
+ "special": true
2556
+ },
2557
+ "151962": {
2558
+ "content": "|<EXTRA_TOKENS_167>|",
2559
+ "lstrip": false,
2560
+ "normalized": false,
2561
+ "rstrip": false,
2562
+ "single_word": false,
2563
+ "special": true
2564
+ },
2565
+ "151963": {
2566
+ "content": "|<EXTRA_TOKENS_168>|",
2567
+ "lstrip": false,
2568
+ "normalized": false,
2569
+ "rstrip": false,
2570
+ "single_word": false,
2571
+ "special": true
2572
+ },
2573
+ "151964": {
2574
+ "content": "|<EXTRA_TOKENS_169>|",
2575
+ "lstrip": false,
2576
+ "normalized": false,
2577
+ "rstrip": false,
2578
+ "single_word": false,
2579
+ "special": true
2580
+ },
2581
+ "151965": {
2582
+ "content": "|<EXTRA_TOKENS_170>|",
2583
+ "lstrip": false,
2584
+ "normalized": false,
2585
+ "rstrip": false,
2586
+ "single_word": false,
2587
+ "special": true
2588
+ },
2589
+ "151966": {
2590
+ "content": "|<EXTRA_TOKENS_171>|",
2591
+ "lstrip": false,
2592
+ "normalized": false,
2593
+ "rstrip": false,
2594
+ "single_word": false,
2595
+ "special": true
2596
+ },
2597
+ "151967": {
2598
+ "content": "|<EXTRA_TOKENS_172>|",
2599
+ "lstrip": false,
2600
+ "normalized": false,
2601
+ "rstrip": false,
2602
+ "single_word": false,
2603
+ "special": true
2604
+ },
2605
+ "151968": {
2606
+ "content": "|<EXTRA_TOKENS_173>|",
2607
+ "lstrip": false,
2608
+ "normalized": false,
2609
+ "rstrip": false,
2610
+ "single_word": false,
2611
+ "special": true
2612
+ },
2613
+ "151969": {
2614
+ "content": "|<EXTRA_TOKENS_174>|",
2615
+ "lstrip": false,
2616
+ "normalized": false,
2617
+ "rstrip": false,
2618
+ "single_word": false,
2619
+ "special": true
2620
+ },
2621
+ "151970": {
2622
+ "content": "|<EXTRA_TOKENS_175>|",
2623
+ "lstrip": false,
2624
+ "normalized": false,
2625
+ "rstrip": false,
2626
+ "single_word": false,
2627
+ "special": true
2628
+ },
2629
+ "151971": {
2630
+ "content": "|<EXTRA_TOKENS_176>|",
2631
+ "lstrip": false,
2632
+ "normalized": false,
2633
+ "rstrip": false,
2634
+ "single_word": false,
2635
+ "special": true
2636
+ },
2637
+ "151972": {
2638
+ "content": "|<EXTRA_TOKENS_177>|",
2639
+ "lstrip": false,
2640
+ "normalized": false,
2641
+ "rstrip": false,
2642
+ "single_word": false,
2643
+ "special": true
2644
+ },
2645
+ "151973": {
2646
+ "content": "|<EXTRA_TOKENS_178>|",
2647
+ "lstrip": false,
2648
+ "normalized": false,
2649
+ "rstrip": false,
2650
+ "single_word": false,
2651
+ "special": true
2652
+ },
2653
+ "151974": {
2654
+ "content": "|<EXTRA_TOKENS_179>|",
2655
+ "lstrip": false,
2656
+ "normalized": false,
2657
+ "rstrip": false,
2658
+ "single_word": false,
2659
+ "special": true
2660
+ },
2661
+ "151975": {
2662
+ "content": "|<EXTRA_TOKENS_180>|",
2663
+ "lstrip": false,
2664
+ "normalized": false,
2665
+ "rstrip": false,
2666
+ "single_word": false,
2667
+ "special": true
2668
+ },
2669
+ "151976": {
2670
+ "content": "|<EXTRA_TOKENS_181>|",
2671
+ "lstrip": false,
2672
+ "normalized": false,
2673
+ "rstrip": false,
2674
+ "single_word": false,
2675
+ "special": true
2676
+ },
2677
+ "151977": {
2678
+ "content": "|<EXTRA_TOKENS_182>|",
2679
+ "lstrip": false,
2680
+ "normalized": false,
2681
+ "rstrip": false,
2682
+ "single_word": false,
2683
+ "special": true
2684
+ },
2685
+ "151978": {
2686
+ "content": "|<EXTRA_TOKENS_183>|",
2687
+ "lstrip": false,
2688
+ "normalized": false,
2689
+ "rstrip": false,
2690
+ "single_word": false,
2691
+ "special": true
2692
+ },
2693
+ "151979": {
2694
+ "content": "|<EXTRA_TOKENS_184>|",
2695
+ "lstrip": false,
2696
+ "normalized": false,
2697
+ "rstrip": false,
2698
+ "single_word": false,
2699
+ "special": true
2700
+ },
2701
+ "151980": {
2702
+ "content": "|<EXTRA_TOKENS_185>|",
2703
+ "lstrip": false,
2704
+ "normalized": false,
2705
+ "rstrip": false,
2706
+ "single_word": false,
2707
+ "special": true
2708
+ },
2709
+ "151981": {
2710
+ "content": "|<EXTRA_TOKENS_186>|",
2711
+ "lstrip": false,
2712
+ "normalized": false,
2713
+ "rstrip": false,
2714
+ "single_word": false,
2715
+ "special": true
2716
+ },
2717
+ "151982": {
2718
+ "content": "|<EXTRA_TOKENS_187>|",
2719
+ "lstrip": false,
2720
+ "normalized": false,
2721
+ "rstrip": false,
2722
+ "single_word": false,
2723
+ "special": true
2724
+ },
2725
+ "151983": {
2726
+ "content": "|<EXTRA_TOKENS_188>|",
2727
+ "lstrip": false,
2728
+ "normalized": false,
2729
+ "rstrip": false,
2730
+ "single_word": false,
2731
+ "special": true
2732
+ },
2733
+ "151984": {
2734
+ "content": "|<EXTRA_TOKENS_189>|",
2735
+ "lstrip": false,
2736
+ "normalized": false,
2737
+ "rstrip": false,
2738
+ "single_word": false,
2739
+ "special": true
2740
+ },
2741
+ "151985": {
2742
+ "content": "|<EXTRA_TOKENS_190>|",
2743
+ "lstrip": false,
2744
+ "normalized": false,
2745
+ "rstrip": false,
2746
+ "single_word": false,
2747
+ "special": true
2748
+ },
2749
+ "151986": {
2750
+ "content": "|<EXTRA_TOKENS_191>|",
2751
+ "lstrip": false,
2752
+ "normalized": false,
2753
+ "rstrip": false,
2754
+ "single_word": false,
2755
+ "special": true
2756
+ },
2757
+ "151987": {
2758
+ "content": "|<EXTRA_TOKENS_192>|",
2759
+ "lstrip": false,
2760
+ "normalized": false,
2761
+ "rstrip": false,
2762
+ "single_word": false,
2763
+ "special": true
2764
+ },
2765
+ "151988": {
2766
+ "content": "|<EXTRA_TOKENS_193>|",
2767
+ "lstrip": false,
2768
+ "normalized": false,
2769
+ "rstrip": false,
2770
+ "single_word": false,
2771
+ "special": true
2772
+ },
2773
+ "151989": {
2774
+ "content": "|<EXTRA_TOKENS_194>|",
2775
+ "lstrip": false,
2776
+ "normalized": false,
2777
+ "rstrip": false,
2778
+ "single_word": false,
2779
+ "special": true
2780
+ },
2781
+ "151990": {
2782
+ "content": "|<EXTRA_TOKENS_195>|",
2783
+ "lstrip": false,
2784
+ "normalized": false,
2785
+ "rstrip": false,
2786
+ "single_word": false,
2787
+ "special": true
2788
+ },
2789
+ "151991": {
2790
+ "content": "|<EXTRA_TOKENS_196>|",
2791
+ "lstrip": false,
2792
+ "normalized": false,
2793
+ "rstrip": false,
2794
+ "single_word": false,
2795
+ "special": true
2796
+ },
2797
+ "151992": {
2798
+ "content": "|<EXTRA_TOKENS_197>|",
2799
+ "lstrip": false,
2800
+ "normalized": false,
2801
+ "rstrip": false,
2802
+ "single_word": false,
2803
+ "special": true
2804
+ },
2805
+ "151993": {
2806
+ "content": "|<EXTRA_TOKENS_198>|",
2807
+ "lstrip": false,
2808
+ "normalized": false,
2809
+ "rstrip": false,
2810
+ "single_word": false,
2811
+ "special": true
2812
+ },
2813
+ "151994": {
2814
+ "content": "|<EXTRA_TOKENS_199>|",
2815
+ "lstrip": false,
2816
+ "normalized": false,
2817
+ "rstrip": false,
2818
+ "single_word": false,
2819
+ "special": true
2820
+ },
2821
+ "151995": {
2822
+ "content": "|<EXTRA_TOKENS_200>|",
2823
+ "lstrip": false,
2824
+ "normalized": false,
2825
+ "rstrip": false,
2826
+ "single_word": false,
2827
+ "special": true
2828
+ },
2829
+ "151996": {
2830
+ "content": "|<EXTRA_TOKENS_201>|",
2831
+ "lstrip": false,
2832
+ "normalized": false,
2833
+ "rstrip": false,
2834
+ "single_word": false,
2835
+ "special": true
2836
+ },
2837
+ "151997": {
2838
+ "content": "|<EXTRA_TOKENS_202>|",
2839
+ "lstrip": false,
2840
+ "normalized": false,
2841
+ "rstrip": false,
2842
+ "single_word": false,
2843
+ "special": true
2844
+ },
2845
+ "151998": {
2846
+ "content": "|<EXTRA_TOKENS_203>|",
2847
+ "lstrip": false,
2848
+ "normalized": false,
2849
+ "rstrip": false,
2850
+ "single_word": false,
2851
+ "special": true
2852
+ },
2853
+ "151999": {
2854
+ "content": "|<EXTRA_TOKENS_204>|",
2855
+ "lstrip": false,
2856
+ "normalized": false,
2857
+ "rstrip": false,
2858
+ "single_word": false,
2859
+ "special": true
2860
+ },
2861
+ "152000": {
2862
+ "content": "|<EXTRA_TOKENS_205>|",
2863
+ "lstrip": false,
2864
+ "normalized": false,
2865
+ "rstrip": false,
2866
+ "single_word": false,
2867
+ "special": true
2868
+ },
2869
+ "152001": {
2870
+ "content": "|<EXTRA_TOKENS_206>|",
2871
+ "lstrip": false,
2872
+ "normalized": false,
2873
+ "rstrip": false,
2874
+ "single_word": false,
2875
+ "special": true
2876
+ },
2877
+ "152002": {
2878
+ "content": "|<EXTRA_TOKENS_207>|",
2879
+ "lstrip": false,
2880
+ "normalized": false,
2881
+ "rstrip": false,
2882
+ "single_word": false,
2883
+ "special": true
2884
+ },
2885
+ "152003": {
2886
+ "content": "|<EXTRA_TOKENS_208>|",
2887
+ "lstrip": false,
2888
+ "normalized": false,
2889
+ "rstrip": false,
2890
+ "single_word": false,
2891
+ "special": true
2892
+ },
2893
+ "152004": {
2894
+ "content": "|<EXTRA_TOKENS_209>|",
2895
+ "lstrip": false,
2896
+ "normalized": false,
2897
+ "rstrip": false,
2898
+ "single_word": false,
2899
+ "special": true
2900
+ },
2901
+ "152005": {
2902
+ "content": "|<EXTRA_TOKENS_210>|",
2903
+ "lstrip": false,
2904
+ "normalized": false,
2905
+ "rstrip": false,
2906
+ "single_word": false,
2907
+ "special": true
2908
+ },
2909
+ "152006": {
2910
+ "content": "|<EXTRA_TOKENS_211>|",
2911
+ "lstrip": false,
2912
+ "normalized": false,
2913
+ "rstrip": false,
2914
+ "single_word": false,
2915
+ "special": true
2916
+ },
2917
+ "152007": {
2918
+ "content": "|<EXTRA_TOKENS_212>|",
2919
+ "lstrip": false,
2920
+ "normalized": false,
2921
+ "rstrip": false,
2922
+ "single_word": false,
2923
+ "special": true
2924
+ },
2925
+ "152008": {
2926
+ "content": "|<EXTRA_TOKENS_213>|",
2927
+ "lstrip": false,
2928
+ "normalized": false,
2929
+ "rstrip": false,
2930
+ "single_word": false,
2931
+ "special": true
2932
+ },
2933
+ "152009": {
2934
+ "content": "|<EXTRA_TOKENS_214>|",
2935
+ "lstrip": false,
2936
+ "normalized": false,
2937
+ "rstrip": false,
2938
+ "single_word": false,
2939
+ "special": true
2940
+ },
2941
+ "152010": {
2942
+ "content": "|<EXTRA_TOKENS_215>|",
2943
+ "lstrip": false,
2944
+ "normalized": false,
2945
+ "rstrip": false,
2946
+ "single_word": false,
2947
+ "special": true
2948
+ },
2949
+ "152011": {
2950
+ "content": "|<EXTRA_TOKENS_216>|",
2951
+ "lstrip": false,
2952
+ "normalized": false,
2953
+ "rstrip": false,
2954
+ "single_word": false,
2955
+ "special": true
2956
+ },
2957
+ "152012": {
2958
+ "content": "|<EXTRA_TOKENS_217>|",
2959
+ "lstrip": false,
2960
+ "normalized": false,
2961
+ "rstrip": false,
2962
+ "single_word": false,
2963
+ "special": true
2964
+ },
2965
+ "152013": {
2966
+ "content": "|<EXTRA_TOKENS_218>|",
2967
+ "lstrip": false,
2968
+ "normalized": false,
2969
+ "rstrip": false,
2970
+ "single_word": false,
2971
+ "special": true
2972
+ },
2973
+ "152014": {
2974
+ "content": "|<EXTRA_TOKENS_219>|",
2975
+ "lstrip": false,
2976
+ "normalized": false,
2977
+ "rstrip": false,
2978
+ "single_word": false,
2979
+ "special": true
2980
+ },
2981
+ "152015": {
2982
+ "content": "|<EXTRA_TOKENS_220>|",
2983
+ "lstrip": false,
2984
+ "normalized": false,
2985
+ "rstrip": false,
2986
+ "single_word": false,
2987
+ "special": true
2988
+ },
2989
+ "152016": {
2990
+ "content": "|<EXTRA_TOKENS_221>|",
2991
+ "lstrip": false,
2992
+ "normalized": false,
2993
+ "rstrip": false,
2994
+ "single_word": false,
2995
+ "special": true
2996
+ },
2997
+ "152017": {
2998
+ "content": "|<EXTRA_TOKENS_222>|",
2999
+ "lstrip": false,
3000
+ "normalized": false,
3001
+ "rstrip": false,
3002
+ "single_word": false,
3003
+ "special": true
3004
+ },
3005
+ "152018": {
3006
+ "content": "|<EXTRA_TOKENS_223>|",
3007
+ "lstrip": false,
3008
+ "normalized": false,
3009
+ "rstrip": false,
3010
+ "single_word": false,
3011
+ "special": true
3012
+ },
3013
+ "152019": {
3014
+ "content": "|<EXTRA_TOKENS_224>|",
3015
+ "lstrip": false,
3016
+ "normalized": false,
3017
+ "rstrip": false,
3018
+ "single_word": false,
3019
+ "special": true
3020
+ },
3021
+ "152020": {
3022
+ "content": "|<EXTRA_TOKENS_225>|",
3023
+ "lstrip": false,
3024
+ "normalized": false,
3025
+ "rstrip": false,
3026
+ "single_word": false,
3027
+ "special": true
3028
+ },
3029
+ "152021": {
3030
+ "content": "|<EXTRA_TOKENS_226>|",
3031
+ "lstrip": false,
3032
+ "normalized": false,
3033
+ "rstrip": false,
3034
+ "single_word": false,
3035
+ "special": true
3036
+ },
3037
+ "152022": {
3038
+ "content": "|<EXTRA_TOKENS_227>|",
3039
+ "lstrip": false,
3040
+ "normalized": false,
3041
+ "rstrip": false,
3042
+ "single_word": false,
3043
+ "special": true
3044
+ },
3045
+ "152023": {
3046
+ "content": "|<EXTRA_TOKENS_228>|",
3047
+ "lstrip": false,
3048
+ "normalized": false,
3049
+ "rstrip": false,
3050
+ "single_word": false,
3051
+ "special": true
3052
+ },
3053
+ "152024": {
3054
+ "content": "|<EXTRA_TOKENS_229>|",
3055
+ "lstrip": false,
3056
+ "normalized": false,
3057
+ "rstrip": false,
3058
+ "single_word": false,
3059
+ "special": true
3060
+ },
3061
+ "152025": {
3062
+ "content": "|<EXTRA_TOKENS_230>|",
3063
+ "lstrip": false,
3064
+ "normalized": false,
3065
+ "rstrip": false,
3066
+ "single_word": false,
3067
+ "special": true
3068
+ },
3069
+ "152026": {
3070
+ "content": "|<EXTRA_TOKENS_231>|",
3071
+ "lstrip": false,
3072
+ "normalized": false,
3073
+ "rstrip": false,
3074
+ "single_word": false,
3075
+ "special": true
3076
+ },
3077
+ "152027": {
3078
+ "content": "|<EXTRA_TOKENS_232>|",
3079
+ "lstrip": false,
3080
+ "normalized": false,
3081
+ "rstrip": false,
3082
+ "single_word": false,
3083
+ "special": true
3084
+ },
3085
+ "152028": {
3086
+ "content": "|<EXTRA_TOKENS_233>|",
3087
+ "lstrip": false,
3088
+ "normalized": false,
3089
+ "rstrip": false,
3090
+ "single_word": false,
3091
+ "special": true
3092
+ },
3093
+ "152029": {
3094
+ "content": "|<EXTRA_TOKENS_234>|",
3095
+ "lstrip": false,
3096
+ "normalized": false,
3097
+ "rstrip": false,
3098
+ "single_word": false,
3099
+ "special": true
3100
+ },
3101
+ "152030": {
3102
+ "content": "|<EXTRA_TOKENS_235>|",
3103
+ "lstrip": false,
3104
+ "normalized": false,
3105
+ "rstrip": false,
3106
+ "single_word": false,
3107
+ "special": true
3108
+ },
3109
+ "152031": {
3110
+ "content": "|<EXTRA_TOKENS_236>|",
3111
+ "lstrip": false,
3112
+ "normalized": false,
3113
+ "rstrip": false,
3114
+ "single_word": false,
3115
+ "special": true
3116
+ },
3117
+ "152032": {
3118
+ "content": "|<EXTRA_TOKENS_237>|",
3119
+ "lstrip": false,
3120
+ "normalized": false,
3121
+ "rstrip": false,
3122
+ "single_word": false,
3123
+ "special": true
3124
+ },
3125
+ "152033": {
3126
+ "content": "|<EXTRA_TOKENS_238>|",
3127
+ "lstrip": false,
3128
+ "normalized": false,
3129
+ "rstrip": false,
3130
+ "single_word": false,
3131
+ "special": true
3132
+ },
3133
+ "152034": {
3134
+ "content": "|<EXTRA_TOKENS_239>|",
3135
+ "lstrip": false,
3136
+ "normalized": false,
3137
+ "rstrip": false,
3138
+ "single_word": false,
3139
+ "special": true
3140
+ },
3141
+ "152035": {
3142
+ "content": "|<EXTRA_TOKENS_240>|",
3143
+ "lstrip": false,
3144
+ "normalized": false,
3145
+ "rstrip": false,
3146
+ "single_word": false,
3147
+ "special": true
3148
+ },
3149
+ "152036": {
3150
+ "content": "|<EXTRA_TOKENS_241>|",
3151
+ "lstrip": false,
3152
+ "normalized": false,
3153
+ "rstrip": false,
3154
+ "single_word": false,
3155
+ "special": true
3156
+ },
3157
+ "152037": {
3158
+ "content": "|<EXTRA_TOKENS_242>|",
3159
+ "lstrip": false,
3160
+ "normalized": false,
3161
+ "rstrip": false,
3162
+ "single_word": false,
3163
+ "special": true
3164
+ },
3165
+ "152038": {
3166
+ "content": "|<EXTRA_TOKENS_243>|",
3167
+ "lstrip": false,
3168
+ "normalized": false,
3169
+ "rstrip": false,
3170
+ "single_word": false,
3171
+ "special": true
3172
+ },
3173
+ "152039": {
3174
+ "content": "|<EXTRA_TOKENS_244>|",
3175
+ "lstrip": false,
3176
+ "normalized": false,
3177
+ "rstrip": false,
3178
+ "single_word": false,
3179
+ "special": true
3180
+ },
3181
+ "152040": {
3182
+ "content": "|<EXTRA_TOKENS_245>|",
3183
+ "lstrip": false,
3184
+ "normalized": false,
3185
+ "rstrip": false,
3186
+ "single_word": false,
3187
+ "special": true
3188
+ },
3189
+ "152041": {
3190
+ "content": "|<EXTRA_TOKENS_246>|",
3191
+ "lstrip": false,
3192
+ "normalized": false,
3193
+ "rstrip": false,
3194
+ "single_word": false,
3195
+ "special": true
3196
+ },
3197
+ "152042": {
3198
+ "content": "|<EXTRA_TOKENS_247>|",
3199
+ "lstrip": false,
3200
+ "normalized": false,
3201
+ "rstrip": false,
3202
+ "single_word": false,
3203
+ "special": true
3204
+ },
3205
+ "152043": {
3206
+ "content": "|<EXTRA_TOKENS_248>|",
3207
+ "lstrip": false,
3208
+ "normalized": false,
3209
+ "rstrip": false,
3210
+ "single_word": false,
3211
+ "special": true
3212
+ },
3213
+ "152044": {
3214
+ "content": "|<EXTRA_TOKENS_249>|",
3215
+ "lstrip": false,
3216
+ "normalized": false,
3217
+ "rstrip": false,
3218
+ "single_word": false,
3219
+ "special": true
3220
+ },
3221
+ "152045": {
3222
+ "content": "|<EXTRA_TOKENS_250>|",
3223
+ "lstrip": false,
3224
+ "normalized": false,
3225
+ "rstrip": false,
3226
+ "single_word": false,
3227
+ "special": true
3228
+ },
3229
+ "152046": {
3230
+ "content": "|<EXTRA_TOKENS_251>|",
3231
+ "lstrip": false,
3232
+ "normalized": false,
3233
+ "rstrip": false,
3234
+ "single_word": false,
3235
+ "special": true
3236
+ },
3237
+ "152047": {
3238
+ "content": "|<EXTRA_TOKENS_252>|",
3239
+ "lstrip": false,
3240
+ "normalized": false,
3241
+ "rstrip": false,
3242
+ "single_word": false,
3243
+ "special": true
3244
+ },
3245
+ "152048": {
3246
+ "content": "|<EXTRA_TOKENS_253>|",
3247
+ "lstrip": false,
3248
+ "normalized": false,
3249
+ "rstrip": false,
3250
+ "single_word": false,
3251
+ "special": true
3252
+ },
3253
+ "152049": {
3254
+ "content": "|<EXTRA_TOKENS_254>|",
3255
+ "lstrip": false,
3256
+ "normalized": false,
3257
+ "rstrip": false,
3258
+ "single_word": false,
3259
+ "special": true
3260
+ },
3261
+ "152050": {
3262
+ "content": "|<EXTRA_TOKENS_255>|",
3263
+ "lstrip": false,
3264
+ "normalized": false,
3265
+ "rstrip": false,
3266
+ "single_word": false,
3267
+ "special": true
3268
+ },
3269
+ "152051": {
3270
+ "content": "|<EXTRA_TOKENS_256>|",
3271
+ "lstrip": false,
3272
+ "normalized": false,
3273
+ "rstrip": false,
3274
+ "single_word": false,
3275
+ "special": true
3276
+ },
3277
+ "152052": {
3278
+ "content": "|<EXTRA_TOKENS_257>|",
3279
+ "lstrip": false,
3280
+ "normalized": false,
3281
+ "rstrip": false,
3282
+ "single_word": false,
3283
+ "special": true
3284
+ },
3285
+ "152053": {
3286
+ "content": "|<EXTRA_TOKENS_258>|",
3287
+ "lstrip": false,
3288
+ "normalized": false,
3289
+ "rstrip": false,
3290
+ "single_word": false,
3291
+ "special": true
3292
+ },
3293
+ "152054": {
3294
+ "content": "|<EXTRA_TOKENS_259>|",
3295
+ "lstrip": false,
3296
+ "normalized": false,
3297
+ "rstrip": false,
3298
+ "single_word": false,
3299
+ "special": true
3300
+ },
3301
+ "152055": {
3302
+ "content": "|<EXTRA_TOKENS_260>|",
3303
+ "lstrip": false,
3304
+ "normalized": false,
3305
+ "rstrip": false,
3306
+ "single_word": false,
3307
+ "special": true
3308
+ },
3309
+ "152056": {
3310
+ "content": "|<EXTRA_TOKENS_261>|",
3311
+ "lstrip": false,
3312
+ "normalized": false,
3313
+ "rstrip": false,
3314
+ "single_word": false,
3315
+ "special": true
3316
+ },
3317
+ "152057": {
3318
+ "content": "|<EXTRA_TOKENS_262>|",
3319
+ "lstrip": false,
3320
+ "normalized": false,
3321
+ "rstrip": false,
3322
+ "single_word": false,
3323
+ "special": true
3324
+ },
3325
+ "152058": {
3326
+ "content": "|<EXTRA_TOKENS_263>|",
3327
+ "lstrip": false,
3328
+ "normalized": false,
3329
+ "rstrip": false,
3330
+ "single_word": false,
3331
+ "special": true
3332
+ },
3333
+ "152059": {
3334
+ "content": "|<EXTRA_TOKENS_264>|",
3335
+ "lstrip": false,
3336
+ "normalized": false,
3337
+ "rstrip": false,
3338
+ "single_word": false,
3339
+ "special": true
3340
+ },
3341
+ "152060": {
3342
+ "content": "|<EXTRA_TOKENS_265>|",
3343
+ "lstrip": false,
3344
+ "normalized": false,
3345
+ "rstrip": false,
3346
+ "single_word": false,
3347
+ "special": true
3348
+ },
3349
+ "152061": {
3350
+ "content": "|<EXTRA_TOKENS_266>|",
3351
+ "lstrip": false,
3352
+ "normalized": false,
3353
+ "rstrip": false,
3354
+ "single_word": false,
3355
+ "special": true
3356
+ },
3357
+ "152062": {
3358
+ "content": "|<EXTRA_TOKENS_267>|",
3359
+ "lstrip": false,
3360
+ "normalized": false,
3361
+ "rstrip": false,
3362
+ "single_word": false,
3363
+ "special": true
3364
+ },
3365
+ "152063": {
3366
+ "content": "|<EXTRA_TOKENS_268>|",
3367
+ "lstrip": false,
3368
+ "normalized": false,
3369
+ "rstrip": false,
3370
+ "single_word": false,
3371
+ "special": true
3372
+ },
3373
+ "152064": {
3374
+ "content": "<im_start>",
3375
+ "lstrip": false,
3376
+ "normalized": false,
3377
+ "rstrip": false,
3378
+ "single_word": false,
3379
+ "special": true
3380
+ },
3381
+ "152065": {
3382
+ "content": "<im_end>",
3383
+ "lstrip": false,
3384
+ "normalized": false,
3385
+ "rstrip": false,
3386
+ "single_word": false,
3387
+ "special": true
3388
+ },
3389
+ "152066": {
3390
+ "content": "<im_patch>",
3391
+ "lstrip": false,
3392
+ "normalized": false,
3393
+ "rstrip": false,
3394
+ "single_word": false,
3395
+ "special": true
3396
+ },
3397
+ "152067": {
3398
+ "content": "<im_col>",
3399
+ "lstrip": false,
3400
+ "normalized": false,
3401
+ "rstrip": false,
3402
+ "single_word": false,
3403
+ "special": true
3404
+ },
3405
+ "152068": {
3406
+ "content": "<|image|>",
3407
+ "lstrip": false,
3408
+ "normalized": false,
3409
+ "rstrip": false,
3410
+ "single_word": false,
3411
+ "special": true
3412
+ },
3413
+ "152069": {
3414
+ "content": "<im_low>",
3415
+ "lstrip": false,
3416
+ "normalized": false,
3417
+ "rstrip": false,
3418
+ "single_word": false,
3419
+ "special": true
3420
+ }
3421
+ },
3422
+ "additional_special_tokens": [
3423
+ "|<EXTRA_TOKENS_0>|",
3424
+ "|<EXTRA_TOKENS_1>|",
3425
+ "|<EXTRA_TOKENS_2>|",
3426
+ "|<EXTRA_TOKENS_3>|",
3427
+ "|<EXTRA_TOKENS_4>|",
3428
+ "|<EXTRA_TOKENS_5>|",
3429
+ "|<EXTRA_TOKENS_6>|",
3430
+ "|<EXTRA_TOKENS_7>|",
3431
+ "|<EXTRA_TOKENS_8>|",
3432
+ "|<EXTRA_TOKENS_9>|",
3433
+ "|<EXTRA_TOKENS_10>|",
3434
+ "|<EXTRA_TOKENS_11>|",
3435
+ "|<EXTRA_TOKENS_12>|",
3436
+ "|<EXTRA_TOKENS_13>|",
3437
+ "|<EXTRA_TOKENS_14>|",
3438
+ "|<EXTRA_TOKENS_15>|",
3439
+ "|<EXTRA_TOKENS_16>|",
3440
+ "|<EXTRA_TOKENS_17>|",
3441
+ "|<EXTRA_TOKENS_18>|",
3442
+ "|<EXTRA_TOKENS_19>|",
3443
+ "|<EXTRA_TOKENS_20>|",
3444
+ "|<EXTRA_TOKENS_21>|",
3445
+ "|<EXTRA_TOKENS_22>|",
3446
+ "|<EXTRA_TOKENS_23>|",
3447
+ "|<EXTRA_TOKENS_24>|",
3448
+ "|<EXTRA_TOKENS_25>|",
3449
+ "|<EXTRA_TOKENS_26>|",
3450
+ "|<EXTRA_TOKENS_27>|",
3451
+ "|<EXTRA_TOKENS_28>|",
3452
+ "|<EXTRA_TOKENS_29>|",
3453
+ "|<EXTRA_TOKENS_30>|",
3454
+ "|<EXTRA_TOKENS_31>|",
3455
+ "|<EXTRA_TOKENS_32>|",
3456
+ "|<EXTRA_TOKENS_33>|",
3457
+ "|<EXTRA_TOKENS_34>|",
3458
+ "|<EXTRA_TOKENS_35>|",
3459
+ "|<EXTRA_TOKENS_36>|",
3460
+ "|<EXTRA_TOKENS_37>|",
3461
+ "|<EXTRA_TOKENS_38>|",
3462
+ "|<EXTRA_TOKENS_39>|",
3463
+ "|<EXTRA_TOKENS_40>|",
3464
+ "|<EXTRA_TOKENS_41>|",
3465
+ "|<EXTRA_TOKENS_42>|",
3466
+ "|<EXTRA_TOKENS_43>|",
3467
+ "|<EXTRA_TOKENS_44>|",
3468
+ "|<EXTRA_TOKENS_45>|",
3469
+ "|<EXTRA_TOKENS_46>|",
3470
+ "|<EXTRA_TOKENS_47>|",
3471
+ "|<EXTRA_TOKENS_48>|",
3472
+ "|<EXTRA_TOKENS_49>|",
3473
+ "|<EXTRA_TOKENS_50>|",
3474
+ "|<EXTRA_TOKENS_51>|",
3475
+ "|<EXTRA_TOKENS_52>|",
3476
+ "|<EXTRA_TOKENS_53>|",
3477
+ "|<EXTRA_TOKENS_54>|",
3478
+ "|<EXTRA_TOKENS_55>|",
3479
+ "|<EXTRA_TOKENS_56>|",
3480
+ "|<EXTRA_TOKENS_57>|",
3481
+ "|<EXTRA_TOKENS_58>|",
3482
+ "|<EXTRA_TOKENS_59>|",
3483
+ "|<EXTRA_TOKENS_60>|",
3484
+ "|<EXTRA_TOKENS_61>|",
3485
+ "|<EXTRA_TOKENS_62>|",
3486
+ "|<EXTRA_TOKENS_63>|",
3487
+ "|<EXTRA_TOKENS_64>|",
3488
+ "|<EXTRA_TOKENS_65>|",
3489
+ "|<EXTRA_TOKENS_66>|",
3490
+ "|<EXTRA_TOKENS_67>|",
3491
+ "|<EXTRA_TOKENS_68>|",
3492
+ "|<EXTRA_TOKENS_69>|",
3493
+ "|<EXTRA_TOKENS_70>|",
3494
+ "|<EXTRA_TOKENS_71>|",
3495
+ "|<EXTRA_TOKENS_72>|",
3496
+ "|<EXTRA_TOKENS_73>|",
3497
+ "|<EXTRA_TOKENS_74>|",
3498
+ "|<EXTRA_TOKENS_75>|",
3499
+ "|<EXTRA_TOKENS_76>|",
3500
+ "|<EXTRA_TOKENS_77>|",
3501
+ "|<EXTRA_TOKENS_78>|",
3502
+ "|<EXTRA_TOKENS_79>|",
3503
+ "|<EXTRA_TOKENS_80>|",
3504
+ "|<EXTRA_TOKENS_81>|",
3505
+ "|<EXTRA_TOKENS_82>|",
3506
+ "|<EXTRA_TOKENS_83>|",
3507
+ "|<EXTRA_TOKENS_84>|",
3508
+ "|<EXTRA_TOKENS_85>|",
3509
+ "|<EXTRA_TOKENS_86>|",
3510
+ "|<EXTRA_TOKENS_87>|",
3511
+ "|<EXTRA_TOKENS_88>|",
3512
+ "|<EXTRA_TOKENS_89>|",
3513
+ "|<EXTRA_TOKENS_90>|",
3514
+ "|<EXTRA_TOKENS_91>|",
3515
+ "|<EXTRA_TOKENS_92>|",
3516
+ "|<EXTRA_TOKENS_93>|",
3517
+ "|<EXTRA_TOKENS_94>|",
3518
+ "|<EXTRA_TOKENS_95>|",
3519
+ "|<EXTRA_TOKENS_96>|",
3520
+ "|<EXTRA_TOKENS_97>|",
3521
+ "|<EXTRA_TOKENS_98>|",
3522
+ "|<EXTRA_TOKENS_99>|",
3523
+ "|<EXTRA_TOKENS_100>|",
3524
+ "|<EXTRA_TOKENS_101>|",
3525
+ "|<EXTRA_TOKENS_102>|",
3526
+ "|<EXTRA_TOKENS_103>|",
3527
+ "|<EXTRA_TOKENS_104>|",
3528
+ "|<EXTRA_TOKENS_105>|",
3529
+ "|<EXTRA_TOKENS_106>|",
3530
+ "|<EXTRA_TOKENS_107>|",
3531
+ "|<EXTRA_TOKENS_108>|",
3532
+ "|<EXTRA_TOKENS_109>|",
3533
+ "|<EXTRA_TOKENS_110>|",
3534
+ "|<EXTRA_TOKENS_111>|",
3535
+ "|<EXTRA_TOKENS_112>|",
3536
+ "|<EXTRA_TOKENS_113>|",
3537
+ "|<EXTRA_TOKENS_114>|",
3538
+ "|<EXTRA_TOKENS_115>|",
3539
+ "|<EXTRA_TOKENS_116>|",
3540
+ "|<EXTRA_TOKENS_117>|",
3541
+ "|<EXTRA_TOKENS_118>|",
3542
+ "|<EXTRA_TOKENS_119>|",
3543
+ "|<EXTRA_TOKENS_120>|",
3544
+ "|<EXTRA_TOKENS_121>|",
3545
+ "|<EXTRA_TOKENS_122>|",
3546
+ "|<EXTRA_TOKENS_123>|",
3547
+ "|<EXTRA_TOKENS_124>|",
3548
+ "|<EXTRA_TOKENS_125>|",
3549
+ "|<EXTRA_TOKENS_126>|",
3550
+ "|<EXTRA_TOKENS_127>|",
3551
+ "|<EXTRA_TOKENS_128>|",
3552
+ "|<EXTRA_TOKENS_129>|",
3553
+ "|<EXTRA_TOKENS_130>|",
3554
+ "|<EXTRA_TOKENS_131>|",
3555
+ "|<EXTRA_TOKENS_132>|",
3556
+ "|<EXTRA_TOKENS_133>|",
3557
+ "|<EXTRA_TOKENS_134>|",
3558
+ "|<EXTRA_TOKENS_135>|",
3559
+ "|<EXTRA_TOKENS_136>|",
3560
+ "|<EXTRA_TOKENS_137>|",
3561
+ "|<EXTRA_TOKENS_138>|",
3562
+ "|<EXTRA_TOKENS_139>|",
3563
+ "|<EXTRA_TOKENS_140>|",
3564
+ "|<EXTRA_TOKENS_141>|",
3565
+ "|<EXTRA_TOKENS_142>|",
3566
+ "|<EXTRA_TOKENS_143>|",
3567
+ "|<EXTRA_TOKENS_144>|",
3568
+ "|<EXTRA_TOKENS_145>|",
3569
+ "|<EXTRA_TOKENS_146>|",
3570
+ "|<EXTRA_TOKENS_147>|",
3571
+ "|<EXTRA_TOKENS_148>|",
3572
+ "|<EXTRA_TOKENS_149>|",
3573
+ "|<EXTRA_TOKENS_150>|",
3574
+ "|<EXTRA_TOKENS_151>|",
3575
+ "|<EXTRA_TOKENS_152>|",
3576
+ "|<EXTRA_TOKENS_153>|",
3577
+ "|<EXTRA_TOKENS_154>|",
3578
+ "|<EXTRA_TOKENS_155>|",
3579
+ "|<EXTRA_TOKENS_156>|",
3580
+ "|<EXTRA_TOKENS_157>|",
3581
+ "|<EXTRA_TOKENS_158>|",
3582
+ "|<EXTRA_TOKENS_159>|",
3583
+ "|<EXTRA_TOKENS_160>|",
3584
+ "|<EXTRA_TOKENS_161>|",
3585
+ "|<EXTRA_TOKENS_162>|",
3586
+ "|<EXTRA_TOKENS_163>|",
3587
+ "|<EXTRA_TOKENS_164>|",
3588
+ "|<EXTRA_TOKENS_165>|",
3589
+ "|<EXTRA_TOKENS_166>|",
3590
+ "|<EXTRA_TOKENS_167>|",
3591
+ "|<EXTRA_TOKENS_168>|",
3592
+ "|<EXTRA_TOKENS_169>|",
3593
+ "|<EXTRA_TOKENS_170>|",
3594
+ "|<EXTRA_TOKENS_171>|",
3595
+ "|<EXTRA_TOKENS_172>|",
3596
+ "|<EXTRA_TOKENS_173>|",
3597
+ "|<EXTRA_TOKENS_174>|",
3598
+ "|<EXTRA_TOKENS_175>|",
3599
+ "|<EXTRA_TOKENS_176>|",
3600
+ "|<EXTRA_TOKENS_177>|",
3601
+ "|<EXTRA_TOKENS_178>|",
3602
+ "|<EXTRA_TOKENS_179>|",
3603
+ "|<EXTRA_TOKENS_180>|",
3604
+ "|<EXTRA_TOKENS_181>|",
3605
+ "|<EXTRA_TOKENS_182>|",
3606
+ "|<EXTRA_TOKENS_183>|",
3607
+ "|<EXTRA_TOKENS_184>|",
3608
+ "|<EXTRA_TOKENS_185>|",
3609
+ "|<EXTRA_TOKENS_186>|",
3610
+ "|<EXTRA_TOKENS_187>|",
3611
+ "|<EXTRA_TOKENS_188>|",
3612
+ "|<EXTRA_TOKENS_189>|",
3613
+ "|<EXTRA_TOKENS_190>|",
3614
+ "|<EXTRA_TOKENS_191>|",
3615
+ "|<EXTRA_TOKENS_192>|",
3616
+ "|<EXTRA_TOKENS_193>|",
3617
+ "|<EXTRA_TOKENS_194>|",
3618
+ "|<EXTRA_TOKENS_195>|",
3619
+ "|<EXTRA_TOKENS_196>|",
3620
+ "|<EXTRA_TOKENS_197>|",
3621
+ "|<EXTRA_TOKENS_198>|",
3622
+ "|<EXTRA_TOKENS_199>|",
3623
+ "|<EXTRA_TOKENS_200>|",
3624
+ "|<EXTRA_TOKENS_201>|",
3625
+ "|<EXTRA_TOKENS_202>|",
3626
+ "|<EXTRA_TOKENS_203>|",
3627
+ "|<EXTRA_TOKENS_204>|",
3628
+ "|<EXTRA_TOKENS_205>|",
3629
+ "|<EXTRA_TOKENS_206>|",
3630
+ "|<EXTRA_TOKENS_207>|",
3631
+ "|<EXTRA_TOKENS_208>|",
3632
+ "|<EXTRA_TOKENS_209>|",
3633
+ "|<EXTRA_TOKENS_210>|",
3634
+ "|<EXTRA_TOKENS_211>|",
3635
+ "|<EXTRA_TOKENS_212>|",
3636
+ "|<EXTRA_TOKENS_213>|",
3637
+ "|<EXTRA_TOKENS_214>|",
3638
+ "|<EXTRA_TOKENS_215>|",
3639
+ "|<EXTRA_TOKENS_216>|",
3640
+ "|<EXTRA_TOKENS_217>|",
3641
+ "|<EXTRA_TOKENS_218>|",
3642
+ "|<EXTRA_TOKENS_219>|",
3643
+ "|<EXTRA_TOKENS_220>|",
3644
+ "|<EXTRA_TOKENS_221>|",
3645
+ "|<EXTRA_TOKENS_222>|",
3646
+ "|<EXTRA_TOKENS_223>|",
3647
+ "|<EXTRA_TOKENS_224>|",
3648
+ "|<EXTRA_TOKENS_225>|",
3649
+ "|<EXTRA_TOKENS_226>|",
3650
+ "|<EXTRA_TOKENS_227>|",
3651
+ "|<EXTRA_TOKENS_228>|",
3652
+ "|<EXTRA_TOKENS_229>|",
3653
+ "|<EXTRA_TOKENS_230>|",
3654
+ "|<EXTRA_TOKENS_231>|",
3655
+ "|<EXTRA_TOKENS_232>|",
3656
+ "|<EXTRA_TOKENS_233>|",
3657
+ "|<EXTRA_TOKENS_234>|",
3658
+ "|<EXTRA_TOKENS_235>|",
3659
+ "|<EXTRA_TOKENS_236>|",
3660
+ "|<EXTRA_TOKENS_237>|",
3661
+ "|<EXTRA_TOKENS_238>|",
3662
+ "|<EXTRA_TOKENS_239>|",
3663
+ "|<EXTRA_TOKENS_240>|",
3664
+ "|<EXTRA_TOKENS_241>|",
3665
+ "|<EXTRA_TOKENS_242>|",
3666
+ "|<EXTRA_TOKENS_243>|",
3667
+ "|<EXTRA_TOKENS_244>|",
3668
+ "|<EXTRA_TOKENS_245>|",
3669
+ "|<EXTRA_TOKENS_246>|",
3670
+ "|<EXTRA_TOKENS_247>|",
3671
+ "|<EXTRA_TOKENS_248>|",
3672
+ "|<EXTRA_TOKENS_249>|",
3673
+ "|<EXTRA_TOKENS_250>|",
3674
+ "|<EXTRA_TOKENS_251>|",
3675
+ "|<EXTRA_TOKENS_252>|",
3676
+ "|<EXTRA_TOKENS_253>|",
3677
+ "|<EXTRA_TOKENS_254>|",
3678
+ "|<EXTRA_TOKENS_255>|",
3679
+ "|<EXTRA_TOKENS_256>|",
3680
+ "|<EXTRA_TOKENS_257>|",
3681
+ "|<EXTRA_TOKENS_258>|",
3682
+ "|<EXTRA_TOKENS_259>|",
3683
+ "|<EXTRA_TOKENS_260>|",
3684
+ "|<EXTRA_TOKENS_261>|",
3685
+ "|<EXTRA_TOKENS_262>|",
3686
+ "|<EXTRA_TOKENS_263>|",
3687
+ "|<EXTRA_TOKENS_264>|",
3688
+ "|<EXTRA_TOKENS_265>|",
3689
+ "|<EXTRA_TOKENS_266>|",
3690
+ "|<EXTRA_TOKENS_267>|",
3691
+ "|<EXTRA_TOKENS_268>|",
3692
+ "<im_start>",
3693
+ "<im_end>",
3694
+ "<im_patch>",
3695
+ "<im_col>",
3696
+ "<|image|>",
3697
+ "<im_low>"
3698
+ ],
3699
+ "auto_map": {
3700
+ "AutoProcessor": "processing_molmo2.Molmo2Processor"
3701
+ },
3702
+ "bos_token": "<|endoftext|>",
3703
+ "clean_up_tokenization_spaces": false,
3704
+ "eos_token": "<|endoftext|>",
3705
+ "errors": "replace",
3706
+ "extra_special_tokens": {},
3707
+ "model_max_length": 131072,
3708
+ "pad_token": "<|endoftext|>",
3709
+ "processor_class": "Molmo2Processor",
3710
+ "split_special_tokens": false,
3711
+ "tokenizer_class": "Qwen2Tokenizer",
3712
+ "unk_token": null
3713
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff