File size: 5,900 Bytes
c9b5796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import timm
import numpy as np
import torch.nn as nn

from ._base import EncoderMixin


def _make_divisible(x, divisible_by=8):
    return int(np.ceil(x * 1.0 / divisible_by) * divisible_by)


class MobileNetV3Encoder(nn.Module, EncoderMixin):
    def __init__(self, model_name, width_mult, depth=5, **kwargs):
        super().__init__()
        if "large" not in model_name and "small" not in model_name:
            raise ValueError("MobileNetV3 wrong model name {}".format(model_name))

        self._mode = "small" if "small" in model_name else "large"
        self._depth = depth
        self._out_channels = self._get_channels(self._mode, width_mult)
        self._in_channels = 3

        # minimal models replace hardswish with relu
        self.model = timm.create_model(
            model_name=model_name,
            scriptable=True,  # torch.jit scriptable
            exportable=True,  # onnx export
            features_only=True,
        )

    def _get_channels(self, mode, width_mult):
        if mode == "small":
            channels = [16, 16, 24, 48, 576]
        else:
            channels = [16, 24, 40, 112, 960]
        channels = [3] + [_make_divisible(x * width_mult) for x in channels]
        return tuple(channels)

    def get_stages(self):
        if self._mode == "small":
            return [
                nn.Identity(),
                nn.Sequential(self.model.conv_stem, self.model.bn1, self.model.act1),
                self.model.blocks[0],
                self.model.blocks[1],
                self.model.blocks[2:4],
                self.model.blocks[4:],
            ]
        elif self._mode == "large":
            return [
                nn.Identity(),
                nn.Sequential(
                    self.model.conv_stem,
                    self.model.bn1,
                    self.model.act1,
                    self.model.blocks[0],
                ),
                self.model.blocks[1],
                self.model.blocks[2],
                self.model.blocks[3:5],
                self.model.blocks[5:],
            ]
        else:
            ValueError(
                "MobileNetV3 mode should be small or large, got {}".format(self._mode)
            )

    def forward(self, x):
        stages = self.get_stages()

        features = []
        for i in range(self._depth + 1):
            x = stages[i](x)
            features.append(x)

        return features

    def load_state_dict(self, state_dict, **kwargs):
        state_dict.pop("conv_head.weight", None)
        state_dict.pop("conv_head.bias", None)
        state_dict.pop("classifier.weight", None)
        state_dict.pop("classifier.bias", None)
        self.model.load_state_dict(state_dict, **kwargs)


mobilenetv3_weights = {
    "tf_mobilenetv3_large_075": {
        "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth"  # noqa
    },
    "tf_mobilenetv3_large_100": {
        "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth"  # noqa
    },
    "tf_mobilenetv3_large_minimal_100": {
        "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth"  # noqa
    },
    "tf_mobilenetv3_small_075": {
        "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth"  # noqa
    },
    "tf_mobilenetv3_small_100": {
        "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth"  # noqa
    },
    "tf_mobilenetv3_small_minimal_100": {
        "imagenet": "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth"  # noqa
    },
}

pretrained_settings = {}
for model_name, sources in mobilenetv3_weights.items():
    pretrained_settings[model_name] = {}
    for source_name, source_url in sources.items():
        pretrained_settings[model_name][source_name] = {
            "url": source_url,
            "input_range": [0, 1],
            "mean": [0.485, 0.456, 0.406],
            "std": [0.229, 0.224, 0.225],
            "input_space": "RGB",
        }


timm_mobilenetv3_encoders = {
    "timm-mobilenetv3_large_075": {
        "encoder": MobileNetV3Encoder,
        "pretrained_settings": pretrained_settings["tf_mobilenetv3_large_075"],
        "params": {"model_name": "tf_mobilenetv3_large_075", "width_mult": 0.75},
    },
    "timm-mobilenetv3_large_100": {
        "encoder": MobileNetV3Encoder,
        "pretrained_settings": pretrained_settings["tf_mobilenetv3_large_100"],
        "params": {"model_name": "tf_mobilenetv3_large_100", "width_mult": 1.0},
    },
    "timm-mobilenetv3_large_minimal_100": {
        "encoder": MobileNetV3Encoder,
        "pretrained_settings": pretrained_settings["tf_mobilenetv3_large_minimal_100"],
        "params": {"model_name": "tf_mobilenetv3_large_minimal_100", "width_mult": 1.0},
    },
    "timm-mobilenetv3_small_075": {
        "encoder": MobileNetV3Encoder,
        "pretrained_settings": pretrained_settings["tf_mobilenetv3_small_075"],
        "params": {"model_name": "tf_mobilenetv3_small_075", "width_mult": 0.75},
    },
    "timm-mobilenetv3_small_100": {
        "encoder": MobileNetV3Encoder,
        "pretrained_settings": pretrained_settings["tf_mobilenetv3_small_100"],
        "params": {"model_name": "tf_mobilenetv3_small_100", "width_mult": 1.0},
    },
    "timm-mobilenetv3_small_minimal_100": {
        "encoder": MobileNetV3Encoder,
        "pretrained_settings": pretrained_settings["tf_mobilenetv3_small_minimal_100"],
        "params": {"model_name": "tf_mobilenetv3_small_minimal_100", "width_mult": 1.0},
    },
}