Spaces:
Runtime error
Runtime error
| # Copyright (c) 2015-present, Facebook, Inc. | |
| # All rights reserved. | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| from functools import partial | |
| from networks.timm_vit import VisionTransformer, _cfg | |
| from timm.models.registry import register_model | |
| from timm.models.layers import trunc_normal_ | |
| __all__ = [ | |
| 'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224', | |
| 'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224', | |
| 'deit_base_distilled_patch16_224', 'deit_base_patch16_384', | |
| 'deit_base_distilled_patch16_384', | |
| ] | |
| class DistilledVisionTransformer(VisionTransformer): | |
| def __init__(self, *args, **kwargs): | |
| super().__init__(*args, **kwargs) | |
| self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) | |
| num_patches = self.patch_embed.num_patches | |
| self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim)) | |
| self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity() | |
| trunc_normal_(self.dist_token, std=.02) | |
| trunc_normal_(self.pos_embed, std=.02) | |
| self.head_dist.apply(self._init_weights) | |
| def forward_features(self, x): | |
| # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py | |
| # with slight modifications to add the dist_token | |
| B = x.shape[0] | |
| x = self.patch_embed(x) | |
| cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks | |
| dist_token = self.dist_token.expand(B, -1, -1) | |
| x = torch.cat((cls_tokens, dist_token, x), dim=1) | |
| x = x + self.pos_embed | |
| x = self.pos_drop(x) | |
| for blk in self.blocks: | |
| x = blk(x) | |
| x = self.norm(x) | |
| return x[:, 0], x[:, 1] | |
| def forward(self, x): | |
| x, x_dist = self.forward_features(x) | |
| x = self.head(x) | |
| x_dist = self.head_dist(x_dist) | |
| if self.training: | |
| return x, x_dist | |
| else: | |
| # during inference, return the average of both classifier predictions | |
| return (x + x_dist) / 2 | |
| def interpolate_pos_encoding(self, x, pos_embed): | |
| """Interpolate the learnable positional encoding to match the number of patches. | |
| x: B x (1 + 1 + N patches) x dim_embedding | |
| pos_embed: B x (1 + 1 + N patches) x dim_embedding | |
| return interpolated positional embedding | |
| """ | |
| npatch = x.shape[1] - 2 # (H // patch_size * W // patch_size) | |
| N = pos_embed.shape[1] - 2 # 784 (= 28 x 28) | |
| if npatch == N: | |
| return pos_embed | |
| class_emb, distil_token, pos_embed = pos_embed[:, 0], pos_embed[:, 1], pos_embed[:, 2:] # a learnable CLS token, learnable position embeddings | |
| dim = x.shape[-1] # dimension of embeddings | |
| pos_embed = nn.functional.interpolate( | |
| pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), # B x dim x 28 x 28 | |
| scale_factor=math.sqrt(npatch / N) + 1e-5, # noel: this can be a float, but the output shape will be integer. | |
| recompute_scale_factor=True, | |
| mode='bicubic' | |
| ) | |
| # print("pos_embed", pos_embed.shape, npatch, N, math.sqrt(npatch/N), math.sqrt(npatch/N) * int(math.sqrt(N))) | |
| # exit(12) | |
| pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) | |
| pos_embed = torch.cat((class_emb.unsqueeze(0), distil_token.unsqueeze(0), pos_embed), dim=1) | |
| return pos_embed | |
| def get_tokens( | |
| self, | |
| x, | |
| layers: list, | |
| patch_tokens: bool = False, | |
| norm: bool = True, | |
| input_tokens: bool = False, | |
| post_pe: bool = False | |
| ): | |
| """Return intermediate tokens.""" | |
| list_tokens: list = [] | |
| B = x.shape[0] | |
| x = self.patch_embed(x) | |
| cls_tokens = self.cls_token.expand(B, -1, -1) | |
| dist_token = self.dist_token.expand(B, -1, -1) | |
| x = torch.cat((cls_tokens, dist_token, x), dim=1) | |
| if input_tokens: | |
| list_tokens.append(x) | |
| pos_embed = self.interpolate_pos_encoding(x, self.pos_embed) | |
| x = x + pos_embed | |
| if post_pe: | |
| list_tokens.append(x) | |
| x = self.pos_drop(x) | |
| for i, blk in enumerate(self.blocks): | |
| x = blk(x) # B x # patches x dim | |
| if layers is None or i in layers: | |
| list_tokens.append(self.norm(x) if norm else x) | |
| tokens = torch.stack(list_tokens, dim=1) # B x n_layers x (1 + # patches) x dim | |
| if not patch_tokens: | |
| return tokens[:, :, 0, :] # index [CLS] tokens only, B x n_layers x dim | |
| else: | |
| return torch.cat((tokens[:, :, 0, :].unsqueeze(dim=2), tokens[:, :, 2:, :]), dim=2) # exclude distil token. | |
| def deit_tiny_patch16_224(pretrained=False, **kwargs): | |
| model = VisionTransformer( | |
| patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |
| def deit_small_patch16_224(pretrained=False, **kwargs): | |
| model = VisionTransformer( | |
| patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |
| def deit_base_patch16_224(pretrained=False, **kwargs): | |
| model = VisionTransformer( | |
| patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |
| def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): | |
| model = DistilledVisionTransformer( | |
| patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |
| def deit_small_distilled_patch16_224(pretrained=False, **kwargs): | |
| model = DistilledVisionTransformer( | |
| patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |
| def deit_base_distilled_patch16_224(pretrained=False, **kwargs): | |
| model = DistilledVisionTransformer( | |
| patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |
| def deit_base_patch16_384(pretrained=False, **kwargs): | |
| model = VisionTransformer( | |
| img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |
| def deit_base_distilled_patch16_384(pretrained=False, **kwargs): | |
| model = DistilledVisionTransformer( | |
| img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, | |
| norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) | |
| model.default_cfg = _cfg() | |
| if pretrained: | |
| checkpoint = torch.hub.load_state_dict_from_url( | |
| url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth", | |
| map_location="cpu", check_hash=True | |
| ) | |
| model.load_state_dict(checkpoint["model"]) | |
| return model | |