MolmoAct-7B-D-Pretrain-RT-1-0812 / model.safetensors.index.json
hqfang's picture
Upload folder using huggingface_hub
aa3b580 verified
{
"metadata": {
"total_size": 16238835616
},
"weight_map": {
"lm_head.weight": "model-00004-of-00004.safetensors",
"model.transformer.blocks.0.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.0.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.0.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.0.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.0.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.0.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.0.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.1.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.1.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.1.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.1.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.1.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.1.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.1.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.10.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.10.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.10.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.10.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.10.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.10.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.10.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.11.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.11.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.11.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.11.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.11.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.11.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.11.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.12.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.12.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.12.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.12.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.12.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.12.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.12.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.13.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.13.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.13.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.13.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.13.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.13.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.13.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.14.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.14.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.14.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.14.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.14.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.14.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.14.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.15.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.15.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.15.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.15.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.15.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.15.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.15.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.16.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.16.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.16.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.16.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.16.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.16.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.16.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.17.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.17.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.17.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.17.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.17.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.17.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.17.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.18.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.18.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.18.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.18.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.18.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.18.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.18.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.19.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.19.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.19.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.19.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.19.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.19.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.19.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.2.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.2.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.2.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.2.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.2.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.2.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.2.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.20.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.20.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.20.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.20.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.20.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.20.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.20.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.21.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.21.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.21.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.21.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.21.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.21.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.21.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.22.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.22.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.22.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.22.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.22.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.22.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.22.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.23.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.23.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.23.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.23.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.23.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.23.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.23.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.24.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.24.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.24.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.24.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.24.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.24.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.24.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.25.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.25.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.25.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.25.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.25.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.25.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.25.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.26.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.26.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.26.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.26.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.26.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.26.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.26.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.27.attn_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.27.ff_norm.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.27.mlp.ff_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.27.mlp.ff_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.27.self_attn.att_proj.bias": "model-00003-of-00004.safetensors",
"model.transformer.blocks.27.self_attn.att_proj.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.27.self_attn.attn_out.weight": "model-00003-of-00004.safetensors",
"model.transformer.blocks.3.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.3.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.3.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.3.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.3.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.3.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.3.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.4.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.4.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.4.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.4.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.4.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.4.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.4.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.5.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.5.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.5.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.5.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.5.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.5.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.5.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.6.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.6.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.6.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.6.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.6.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.6.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.6.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.7.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.7.ff_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.7.mlp.ff_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.7.mlp.ff_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.7.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.7.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.7.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.8.attn_norm.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.8.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.8.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.8.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.8.self_attn.att_proj.bias": "model-00001-of-00004.safetensors",
"model.transformer.blocks.8.self_attn.att_proj.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.8.self_attn.attn_out.weight": "model-00001-of-00004.safetensors",
"model.transformer.blocks.9.attn_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.9.ff_norm.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.9.mlp.ff_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.9.mlp.ff_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.9.self_attn.att_proj.bias": "model-00002-of-00004.safetensors",
"model.transformer.blocks.9.self_attn.att_proj.weight": "model-00002-of-00004.safetensors",
"model.transformer.blocks.9.self_attn.attn_out.weight": "model-00002-of-00004.safetensors",
"model.transformer.ln_f.weight": "model-00003-of-00004.safetensors",
"model.transformer.wte.embedding": "model-00001-of-00004.safetensors",
"model.transformer.wte.new_embedding": "model-00001-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wk.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wk.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wo.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wo.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wq.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wq.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wv.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_pooling_2d.wv.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_projector.w1.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_projector.w2.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_projector.w3.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.patch_embedding.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.patch_embedding.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.positional_embedding": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.0.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.1.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.10.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.11.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.12.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.13.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.14.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.15.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.16.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.17.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.18.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.19.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.2.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.20.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.attention_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.ffn_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.21.ffn_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wk.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wk.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wo.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wo.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wq.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wq.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wv.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention.wv.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.attention_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.ffn_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.22.ffn_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wk.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wk.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wo.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wo.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wq.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wq.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wv.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention.wv.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.attention_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.ffn_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.23.ffn_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wk.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wk.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wo.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wo.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wq.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wq.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wv.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention.wv.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.attention_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w1.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w2.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.ffn_norm.bias": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.24.ffn_norm.weight": "model-00004-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.3.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.4.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.5.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.6.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.7.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.8.ffn_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wk.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wk.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wo.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wo.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wq.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wq.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wv.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention.wv.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.attention_norm.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w1.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w2.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.ffn_norm.bias": "model-00003-of-00004.safetensors",
"model.vision_backbone.image_vit.transformer.resblocks.9.ffn_norm.weight": "model-00003-of-00004.safetensors"
}
}