pepitolechevalier commited on
Commit
596eb71
Β·
verified Β·
1 Parent(s): a85f5e3

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +153 -0
  2. pim_module.py +566 -0
  3. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ from typing import List
3
+ from fastapi import FastAPI, UploadFile, File
4
+ from fastapi.responses import JSONResponse
5
+ import uvicorn
6
+ import torch
7
+ from PIL import Image
8
+ import torchvision.transforms as transforms
9
+ import timm
10
+ from pim_module import PluginMoodel
11
+ import cv2
12
+ import copy
13
+ import numpy as np
14
+ import numpy.matlib
15
+ import os
16
+
17
+ app = FastAPI()
18
+
19
+ # === Classes
20
+ classes_list = [
21
+ "Ferrage_et_accessoires_ANTI_FAUSSE_MANOEUVRE",
22
+ "Ferrage_et_accessoires_Busettes",
23
+ "Ferrage_et_accessoires_Butees",
24
+ "Ferrage_et_accessoires_Chariots",
25
+ "Ferrage_et_accessoires_Charniere",
26
+ "Ferrage_et_accessoires_Compas_limiteur",
27
+ "Ferrage_et_accessoires_Renvois_d'angle",
28
+ "Joints_et_consommables_Equerres_aluminium_moulees",
29
+ "Joints_et_consommables_Joints_a_clipser",
30
+ "Joints_et_consommables_Joints_a_coller",
31
+ "Joints_et_consommables_Joints_a_glisser",
32
+ "Joints_et_consommables_Joints_EPDM",
33
+ "Joints_et_consommables_Joints_PVC_aluminium",
34
+ "Joints_et_consommables_Silicone_pour_vitrage_alu",
35
+ "Joints_et_consommables_Visserie_inox_alu",
36
+ "Poignee_carre_7_mm",
37
+ "Poignee_carre_8_mm",
38
+ "Poignee_cremone",
39
+ "Poignee_cuvette",
40
+ "Poignee_de_tirage",
41
+ "Poignee_pour_Levant_Coulissant",
42
+ "Serrure_Cremone_multipoints",
43
+ "Serrure_Cuvette",
44
+ "Serrure_Gaches",
45
+ "Serrure_Pene_Crochet",
46
+ "Serrure_pour_Porte",
47
+ "Serrure_Tringles",
48
+ ]
49
+
50
+ data_size = 384
51
+ fpn_size = 1536
52
+ num_classes = 27
53
+ num_selects = {'layer1': 256, 'layer2': 128, 'layer3': 64, 'layer4': 32}
54
+
55
+ module_id_mapper, features, grads = {}, {}, {}
56
+
57
+ def forward_hook(module, inp_hs, out_hs):
58
+ layer_id = len(features) + 1
59
+ module_id_mapper[module] = layer_id
60
+ features[layer_id] = {"in": inp_hs, "out": out_hs}
61
+
62
+ def backward_hook(module, inp_grad, out_grad):
63
+ layer_id = module_id_mapper[module]
64
+ grads[layer_id] = {"in": inp_grad, "out": out_grad}
65
+
66
+ def build_model(path: str):
67
+ backbone = timm.create_model('swin_large_patch4_window12_384_in22k', pretrained=True)
68
+ model = PluginMoodel(
69
+ backbone=backbone,
70
+ return_nodes=None,
71
+ img_size=data_size,
72
+ use_fpn=True,
73
+ fpn_size=fpn_size,
74
+ proj_type="Linear",
75
+ upsample_type="Conv",
76
+ use_selection=True,
77
+ num_classes=num_classes,
78
+ num_selects=num_selects,
79
+ use_combiner=True,
80
+ comb_proj_size=None
81
+ )
82
+ ckpt = torch.load(path, map_location="cpu")
83
+ model.load_state_dict(ckpt["model"], strict=False)
84
+ model.eval()
85
+
86
+ for layer in [0, 1, 2, 3]:
87
+ model.backbone.layers[layer].register_forward_hook(forward_hook)
88
+ model.backbone.layers[layer].register_full_backward_hook(backward_hook)
89
+
90
+ for i in range(1, 5):
91
+ getattr(model.fpn_down, f'Proj_layer{i}').register_forward_hook(forward_hook)
92
+ getattr(model.fpn_down, f'Proj_layer{i}').register_full_backward_hook(backward_hook)
93
+ getattr(model.fpn_up, f'Proj_layer{i}').register_forward_hook(forward_hook)
94
+ getattr(model.fpn_up, f'Proj_layer{i}').register_full_backward_hook(backward_hook)
95
+
96
+ return model
97
+
98
+ class ImgLoader:
99
+ def __init__(self, img_size):
100
+ self.transform = transforms.Compose([
101
+ transforms.Resize((510, 510), Image.BILINEAR),
102
+ transforms.CenterCrop((img_size, img_size)),
103
+ transforms.ToTensor(),
104
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
105
+ ])
106
+
107
+ def load(self, path):
108
+ ori_img = cv2.imread(path)
109
+ img = copy.deepcopy(ori_img[:, :, ::-1])
110
+ img = Image.fromarray(img)
111
+ return self.transform(img).unsqueeze(0)
112
+
113
+ def cal_backward(out):
114
+ target_layer_names = ['layer1', 'layer2', 'layer3', 'layer4',
115
+ 'FPN1_layer1', 'FPN1_layer2', 'FPN1_layer3', 'FPN1_layer4', 'comb_outs']
116
+ sum_out = None
117
+ for name in target_layer_names:
118
+ tmp_out = out[name].mean(1) if name != "comb_outs" else out[name]
119
+ tmp_out = torch.softmax(tmp_out, dim=-1)
120
+ sum_out = tmp_out if sum_out is None else sum_out + tmp_out
121
+
122
+ with torch.no_grad():
123
+ smax = torch.softmax(sum_out, dim=-1)
124
+ A = np.transpose(np.matlib.repmat(smax[0], num_classes, 1)) - np.eye(num_classes)
125
+ _, _, V = np.linalg.svd(A, full_matrices=True)
126
+ V = V[num_classes - 1, :]
127
+ if V[0] < 0:
128
+ V = -V
129
+ V = np.log(V)
130
+ V = V - min(V)
131
+ V = V / sum(V)
132
+
133
+ top5 = np.argsort(-V)[:5]
134
+ accs = -np.sort(-V)[:5]
135
+ return [f"{classes_list[int(cls)]}: {acc*100:.2f}%" for cls, acc in zip(top5, accs)]
136
+
137
+ # === Charge le modèle au démarrage
138
+ model = build_model("weights.pt")
139
+ img_loader = ImgLoader(data_size)
140
+
141
+ @app.post("/predict")
142
+ async def predict(file: UploadFile = File(...)):
143
+ global features, grads, module_id_mapper
144
+ features, grads, module_id_mapper = {}, {}, {}
145
+
146
+ file_path = f"/tmp/{file.filename}"
147
+ with open(file_path, "wb") as buffer:
148
+ buffer.write(await file.read())
149
+
150
+ img_tensor = img_loader.load(file_path)
151
+ out = model(img_tensor)
152
+ result = cal_backward(out)
153
+ return JSONResponse(content=result)
pim_module.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision.models as models
4
+ import torch.nn.functional as F
5
+ from torchvision.models.feature_extraction import get_graph_node_names
6
+ from torchvision.models.feature_extraction import create_feature_extractor
7
+ from typing import Union
8
+ import copy
9
+
10
+ class GCNCombiner(nn.Module):
11
+
12
+ def __init__(self,
13
+ total_num_selects: int,
14
+ num_classes: int,
15
+ inputs: Union[dict, None] = None,
16
+ proj_size: Union[int, None] = None,
17
+ fpn_size: Union[int, None] = None):
18
+ """
19
+ If building backbone without FPN, set fpn_size to None and MUST give
20
+ 'inputs' and 'proj_size', the reason of these setting is to constrain the
21
+ dimension of graph convolutional network input.
22
+ """
23
+ super(GCNCombiner, self).__init__()
24
+
25
+ assert inputs is not None or fpn_size is not None, \
26
+ "To build GCN combiner, you must give one features dimension."
27
+
28
+ ### auto-proj
29
+ self.fpn_size = fpn_size
30
+ if fpn_size is None:
31
+ for name in inputs:
32
+ if len(name) == 4:
33
+ in_size = inputs[name].size(1)
34
+ elif len(name) == 3:
35
+ in_size = inputs[name].size(2)
36
+ else:
37
+ raise ValusError("The size of output dimension of previous must be 3 or 4.")
38
+ m = nn.Sequential(
39
+ nn.Linear(in_size, proj_size),
40
+ nn.ReLU(),
41
+ nn.Linear(proj_size, proj_size)
42
+ )
43
+ self.add_module("proj_"+name, m)
44
+ self.proj_size = proj_size
45
+ else:
46
+ self.proj_size = fpn_size
47
+
48
+ ### build one layer structure (with adaptive module)
49
+ num_joints = total_num_selects // 64
50
+
51
+ self.param_pool0 = nn.Linear(total_num_selects, num_joints)
52
+
53
+ A = torch.eye(num_joints) / 100 + 1 / 100
54
+ self.adj1 = nn.Parameter(copy.deepcopy(A))
55
+ self.conv1 = nn.Conv1d(self.proj_size, self.proj_size, 1)
56
+ self.batch_norm1 = nn.BatchNorm1d(self.proj_size)
57
+
58
+ self.conv_q1 = nn.Conv1d(self.proj_size, self.proj_size//4, 1)
59
+ self.conv_k1 = nn.Conv1d(self.proj_size, self.proj_size//4, 1)
60
+ self.alpha1 = nn.Parameter(torch.zeros(1))
61
+
62
+ ### merge information
63
+ self.param_pool1 = nn.Linear(num_joints, 1)
64
+
65
+ #### class predict
66
+ self.dropout = nn.Dropout(p=0.1)
67
+ self.classifier = nn.Linear(self.proj_size, num_classes)
68
+
69
+ self.tanh = nn.Tanh()
70
+
71
+ def forward(self, x):
72
+ """
73
+ """
74
+ hs = []
75
+ names = []
76
+ for name in x:
77
+ if "FPN1_" in name:
78
+ continue
79
+ if self.fpn_size is None:
80
+ _tmp = getattr(self, "proj_"+name)(x[name])
81
+ else:
82
+ _tmp = x[name]
83
+ hs.append(_tmp)
84
+ names.append([name, _tmp.size()])
85
+
86
+ hs = torch.cat(hs, dim=1).transpose(1, 2).contiguous() # B, S', C --> B, C, S
87
+ # print(hs.size(), names)
88
+ hs = self.param_pool0(hs)
89
+ ### adaptive adjacency
90
+ q1 = self.conv_q1(hs).mean(1)
91
+ k1 = self.conv_k1(hs).mean(1)
92
+ A1 = self.tanh(q1.unsqueeze(-1) - k1.unsqueeze(1))
93
+ A1 = self.adj1 + A1 * self.alpha1
94
+ ### graph convolution
95
+ hs = self.conv1(hs)
96
+ hs = torch.matmul(hs, A1)
97
+ hs = self.batch_norm1(hs)
98
+ ### predict
99
+ hs = self.param_pool1(hs)
100
+ hs = self.dropout(hs)
101
+ hs = hs.flatten(1)
102
+ hs = self.classifier(hs)
103
+
104
+ return hs
105
+
106
+ class WeaklySelector(nn.Module):
107
+
108
+ def __init__(self, inputs: dict, num_classes: int, num_select: dict, fpn_size: Union[int, None] = None):
109
+ """
110
+ inputs: dictionary contain torch.Tensors, which comes from backbone
111
+ [Tensor1(hidden feature1), Tensor2(hidden feature2)...]
112
+ Please note that if len(features.size) equal to 3, the order of dimension must be [B,S,C],
113
+ S mean the spatial domain, and if len(features.size) equal to 4, the order must be [B,C,H,W]
114
+ """
115
+ super(WeaklySelector, self).__init__()
116
+
117
+ self.num_select = num_select
118
+
119
+ self.fpn_size = fpn_size
120
+ ### build classifier
121
+ if self.fpn_size is None:
122
+ self.num_classes = num_classes
123
+ for name in inputs:
124
+ fs_size = inputs[name].size()
125
+ if len(fs_size) == 3:
126
+ in_size = fs_size[2]
127
+ elif len(fs_size) == 4:
128
+ in_size = fs_size[1]
129
+ m = nn.Linear(in_size, num_classes)
130
+ self.add_module("classifier_l_"+name, m)
131
+
132
+ self.thresholds = {}
133
+ for name in inputs:
134
+ self.thresholds[name] = []
135
+
136
+ # def select(self, logits, l_name):
137
+ # """
138
+ # logits: [B, S, num_classes]
139
+ # """
140
+ # probs = torch.softmax(logits, dim=-1)
141
+ # scores, _ = torch.max(probs, dim=-1)
142
+ # _, ids = torch.sort(scores, -1, descending=True)
143
+ # sn = self.num_select[l_name]
144
+ # s_ids = ids[:, :sn]
145
+ # not_s_ids = ids[:, sn:]
146
+ # return s_ids.unsqueeze(-1), not_s_ids.unsqueeze(-1)
147
+
148
+ def forward(self, x, logits=None):
149
+ """
150
+ x :
151
+ dictionary contain the features maps which
152
+ come from your choosen layers.
153
+ size must be [B, HxW, C] ([B, S, C]) or [B, C, H, W].
154
+ [B,C,H,W] will be transpose to [B, HxW, C] automatically.
155
+ """
156
+ if self.fpn_size is None:
157
+ logits = {}
158
+ selections = {}
159
+ for name in x:
160
+ # print("[selector]", name, x[name].size())
161
+ if "FPN1_" in name:
162
+ continue
163
+ if len(x[name].size()) == 4:
164
+ B, C, H, W = x[name].size()
165
+ x[name] = x[name].view(B, C, H*W).permute(0, 2, 1).contiguous()
166
+ C = x[name].size(-1)
167
+ if self.fpn_size is None:
168
+ logits[name] = getattr(self, "classifier_l_"+name)(x[name])
169
+
170
+ probs = torch.softmax(logits[name], dim=-1)
171
+ sum_probs = torch.softmax(logits[name].mean(1), dim=-1)
172
+ selections[name] = []
173
+ preds_1 = []
174
+ preds_0 = []
175
+ num_select = self.num_select[name]
176
+ for bi in range(logits[name].size(0)):
177
+ _, max_ids = torch.max(sum_probs[bi], dim=-1)
178
+ confs, ranks = torch.sort(probs[bi, :, max_ids], descending=True)
179
+ sf = x[name][bi][ranks[:num_select]]
180
+ nf = x[name][bi][ranks[num_select:]] # calculate
181
+ selections[name].append(sf) # [num_selected, C]
182
+ preds_1.append(logits[name][bi][ranks[:num_select]])
183
+ preds_0.append(logits[name][bi][ranks[num_select:]])
184
+
185
+ if bi >= len(self.thresholds[name]):
186
+ self.thresholds[name].append(confs[num_select]) # for initialize
187
+ else:
188
+ self.thresholds[name][bi] = confs[num_select]
189
+
190
+ selections[name] = torch.stack(selections[name])
191
+ preds_1 = torch.stack(preds_1)
192
+ preds_0 = torch.stack(preds_0)
193
+
194
+ logits["select_"+name] = preds_1
195
+ logits["drop_"+name] = preds_0
196
+
197
+ return selections
198
+
199
+
200
+ class FPN(nn.Module):
201
+
202
+ def __init__(self, inputs: dict, fpn_size: int, proj_type: str, upsample_type: str):
203
+ """
204
+ inputs : dictionary contains torch.Tensor
205
+ which comes from backbone output
206
+ fpn_size: integer, fpn
207
+ proj_type:
208
+ in ["Conv", "Linear"]
209
+ upsample_type:
210
+ in ["Bilinear", "Conv", "Fc"]
211
+ for convolution neural network (e.g. ResNet, EfficientNet), recommand 'Bilinear'.
212
+ for Vit, "Fc". and Swin-T, "Conv"
213
+ """
214
+ super(FPN, self).__init__()
215
+ assert proj_type in ["Conv", "Linear"], \
216
+ "FPN projection type {} were not support yet, please choose type 'Conv' or 'Linear'".format(proj_type)
217
+ assert upsample_type in ["Bilinear", "Conv"], \
218
+ "FPN upsample type {} were not support yet, please choose type 'Bilinear' or 'Conv'".format(proj_type)
219
+
220
+ self.fpn_size = fpn_size
221
+ self.upsample_type = upsample_type
222
+ inp_names = [name for name in inputs]
223
+
224
+ for i, node_name in enumerate(inputs):
225
+ ### projection module
226
+ if proj_type == "Conv":
227
+ m = nn.Sequential(
228
+ nn.Conv2d(inputs[node_name].size(1), inputs[node_name].size(1), 1),
229
+ nn.ReLU(),
230
+ nn.Conv2d(inputs[node_name].size(1), fpn_size, 1)
231
+ )
232
+ elif proj_type == "Linear":
233
+ m = nn.Sequential(
234
+ nn.Linear(inputs[node_name].size(-1), inputs[node_name].size(-1)),
235
+ nn.ReLU(),
236
+ nn.Linear(inputs[node_name].size(-1), fpn_size),
237
+ )
238
+ self.add_module("Proj_"+node_name, m)
239
+
240
+ ### upsample module
241
+ if upsample_type == "Conv" and i != 0:
242
+ assert len(inputs[node_name].size()) == 3 # B, S, C
243
+ in_dim = inputs[node_name].size(1)
244
+ out_dim = inputs[inp_names[i-1]].size(1)
245
+ # if in_dim != out_dim:
246
+ m = nn.Conv1d(in_dim, out_dim, 1) # for spatial domain
247
+ # else:
248
+ # m = nn.Identity()
249
+ self.add_module("Up_"+node_name, m)
250
+
251
+ if upsample_type == "Bilinear":
252
+ self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
253
+
254
+ def upsample_add(self, x0: torch.Tensor, x1: torch.Tensor, x1_name: str):
255
+ """
256
+ return Upsample(x1) + x1
257
+ """
258
+ if self.upsample_type == "Bilinear":
259
+ if x1.size(-1) != x0.size(-1):
260
+ x1 = self.upsample(x1)
261
+ else:
262
+ x1 = getattr(self, "Up_"+x1_name)(x1)
263
+ return x1 + x0
264
+
265
+ def forward(self, x):
266
+ """
267
+ x : dictionary
268
+ {
269
+ "node_name1": feature1,
270
+ "node_name2": feature2, ...
271
+ }
272
+ """
273
+ ### project to same dimension
274
+ hs = []
275
+ for i, name in enumerate(x):
276
+ if "FPN1_" in name:
277
+ continue
278
+ x[name] = getattr(self, "Proj_"+name)(x[name])
279
+ hs.append(name)
280
+
281
+ x["FPN1_" + "layer4"] = x["layer4"]
282
+
283
+ for i in range(len(hs)-1, 0, -1):
284
+ x1_name = hs[i]
285
+ x0_name = hs[i-1]
286
+ x[x0_name] = self.upsample_add(x[x0_name],
287
+ x[x1_name],
288
+ x1_name)
289
+ x["FPN1_" + x0_name] = x[x0_name]
290
+
291
+ return x
292
+
293
+
294
+ class FPN_UP(nn.Module):
295
+
296
+ def __init__(self,
297
+ inputs: dict,
298
+ fpn_size: int):
299
+ super(FPN_UP, self).__init__()
300
+
301
+ inp_names = [name for name in inputs]
302
+
303
+ for i, node_name in enumerate(inputs):
304
+ ### projection module
305
+ m = nn.Sequential(
306
+ nn.Linear(fpn_size, fpn_size),
307
+ nn.ReLU(),
308
+ nn.Linear(fpn_size, fpn_size),
309
+ )
310
+ self.add_module("Proj_"+node_name, m)
311
+
312
+ ### upsample module
313
+ if i != (len(inputs) - 1):
314
+ assert len(inputs[node_name].size()) == 3 # B, S, C
315
+ in_dim = inputs[node_name].size(1)
316
+ out_dim = inputs[inp_names[i+1]].size(1)
317
+ m = nn.Conv1d(in_dim, out_dim, 1) # for spatial domain
318
+ self.add_module("Down_"+node_name, m)
319
+ # print("Down_"+node_name, in_dim, out_dim)
320
+ """
321
+ Down_layer1 2304 576
322
+ Down_layer2 576 144
323
+ Down_layer3 144 144
324
+ """
325
+
326
+ def downsample_add(self, x0: torch.Tensor, x1: torch.Tensor, x0_name: str):
327
+ """
328
+ return Upsample(x1) + x1
329
+ """
330
+ # print("[downsample_add] Down_" + x0_name)
331
+ x0 = getattr(self, "Down_" + x0_name)(x0)
332
+ return x1 + x0
333
+
334
+ def forward(self, x):
335
+ """
336
+ x : dictionary
337
+ {
338
+ "node_name1": feature1,
339
+ "node_name2": feature2, ...
340
+ }
341
+ """
342
+ ### project to same dimension
343
+ hs = []
344
+ for i, name in enumerate(x):
345
+ if "FPN1_" in name:
346
+ continue
347
+ x[name] = getattr(self, "Proj_"+name)(x[name])
348
+ hs.append(name)
349
+
350
+ # print(hs)
351
+ for i in range(0, len(hs) - 1):
352
+ x0_name = hs[i]
353
+ x1_name = hs[i+1]
354
+ # print(x0_name, x1_name)
355
+ # print(x[x0_name].size(), x[x1_name].size())
356
+ x[x1_name] = self.downsample_add(x[x0_name],
357
+ x[x1_name],
358
+ x0_name)
359
+ return x
360
+
361
+
362
+
363
+
364
+ class PluginMoodel(nn.Module):
365
+
366
+ def __init__(self,
367
+ backbone: torch.nn.Module,
368
+ return_nodes: Union[dict, None],
369
+ img_size: int,
370
+ use_fpn: bool,
371
+ fpn_size: Union[int, None],
372
+ proj_type: str,
373
+ upsample_type: str,
374
+ use_selection: bool,
375
+ num_classes: int,
376
+ num_selects: dict,
377
+ use_combiner: bool,
378
+ comb_proj_size: Union[int, None]
379
+ ):
380
+ """
381
+ * backbone:
382
+ torch.nn.Module class (recommand pretrained on ImageNet or IG-3.5B-17k(provided by FAIR))
383
+ * return_nodes:
384
+ e.g.
385
+ return_nodes = {
386
+ # node_name: user-specified key for output dict
387
+ 'layer1.2.relu_2': 'layer1',
388
+ 'layer2.3.relu_2': 'layer2',
389
+ 'layer3.5.relu_2': 'layer3',
390
+ 'layer4.2.relu_2': 'layer4',
391
+ } # you can see the example on https://pytorch.org/vision/main/feature_extraction.html
392
+ !!! if using 'Swin-Transformer', please set return_nodes to None
393
+ !!! and please set use_fpn to True
394
+ * feat_sizes:
395
+ tuple or list contain features map size of each layers.
396
+ ((C, H, W)). e.g. ((1024, 14, 14), (2048, 7, 7))
397
+ * use_fpn:
398
+ boolean, use features pyramid network or not
399
+ * fpn_size:
400
+ integer, features pyramid network projection dimension
401
+ * num_selects:
402
+ num_selects = {
403
+ # match user-specified in return_nodes
404
+ "layer1": 2048,
405
+ "layer2": 512,
406
+ "layer3": 128,
407
+ "layer4": 32,
408
+ }
409
+ Note: after selector module (WeaklySelector) , the feature map's size is [B, S', C] which
410
+ contained by 'logits' or 'selections' dictionary (S' is selection number, different layer
411
+ could be different).
412
+ """
413
+ super(PluginMoodel, self).__init__()
414
+
415
+ ### = = = = = Backbone = = = = =
416
+ self.return_nodes = return_nodes
417
+ if return_nodes is not None:
418
+ self.backbone = create_feature_extractor(backbone, return_nodes=return_nodes)
419
+ else:
420
+ self.backbone = backbone
421
+
422
+ ### get hidden feartues size
423
+ rand_in = torch.randn(1, 3, img_size, img_size)
424
+ outs = self.backbone(rand_in)
425
+
426
+ ### just original backbone
427
+ if not use_fpn and (not use_selection and not use_combiner):
428
+ for name in outs:
429
+ fs_size = outs[name].size()
430
+ if len(fs_size) == 3:
431
+ out_size = fs_size.size(-1)
432
+ elif len(fs_size) == 4:
433
+ out_size = fs_size.size(1)
434
+ else:
435
+ raise ValusError("The size of output dimension of previous must be 3 or 4.")
436
+ self.classifier = nn.Linear(out_size, num_classes)
437
+
438
+ ### = = = = = FPN = = = = =
439
+ self.use_fpn = use_fpn
440
+ if self.use_fpn:
441
+ self.fpn_down = FPN(outs, fpn_size, proj_type, upsample_type)
442
+ self.build_fpn_classifier_down(outs, fpn_size, num_classes)
443
+ self.fpn_up = FPN_UP(outs, fpn_size)
444
+ self.build_fpn_classifier_up(outs, fpn_size, num_classes)
445
+
446
+ self.fpn_size = fpn_size
447
+
448
+ ### = = = = = Selector = = = = =
449
+ self.use_selection = use_selection
450
+ if self.use_selection:
451
+ w_fpn_size = self.fpn_size if self.use_fpn else None # if not using fpn, build classifier in weakly selector
452
+ self.selector = WeaklySelector(outs, num_classes, num_selects, w_fpn_size)
453
+
454
+ ### = = = = = Combiner = = = = =
455
+ self.use_combiner = use_combiner
456
+ if self.use_combiner:
457
+ assert self.use_selection, "Please use selection module before combiner"
458
+ if self.use_fpn:
459
+ gcn_inputs, gcn_proj_size = None, None
460
+ else:
461
+ gcn_inputs, gcn_proj_size = outs, comb_proj_size # redundant, fix in future
462
+ total_num_selects = sum([num_selects[name] for name in num_selects]) # sum
463
+ self.combiner = GCNCombiner(total_num_selects, num_classes, gcn_inputs, gcn_proj_size, self.fpn_size)
464
+
465
+ def build_fpn_classifier_up(self, inputs: dict, fpn_size: int, num_classes: int):
466
+ """
467
+ Teh results of our experiments show that linear classifier in this case may cause some problem.
468
+ """
469
+ for name in inputs:
470
+ m = nn.Sequential(
471
+ nn.Conv1d(fpn_size, fpn_size, 1),
472
+ nn.BatchNorm1d(fpn_size),
473
+ nn.ReLU(),
474
+ nn.Conv1d(fpn_size, num_classes, 1)
475
+ )
476
+ self.add_module("fpn_classifier_up_"+name, m)
477
+
478
+ def build_fpn_classifier_down(self, inputs: dict, fpn_size: int, num_classes: int):
479
+ """
480
+ Teh results of our experiments show that linear classifier in this case may cause some problem.
481
+ """
482
+ for name in inputs:
483
+ m = nn.Sequential(
484
+ nn.Conv1d(fpn_size, fpn_size, 1),
485
+ nn.BatchNorm1d(fpn_size),
486
+ nn.ReLU(),
487
+ nn.Conv1d(fpn_size, num_classes, 1)
488
+ )
489
+ self.add_module("fpn_classifier_down_" + name, m)
490
+
491
+ def forward_backbone(self, x):
492
+ return self.backbone(x)
493
+
494
+ def fpn_predict_down(self, x: dict, logits: dict):
495
+ """
496
+ x: [B, C, H, W] or [B, S, C]
497
+ [B, C, H, W] --> [B, H*W, C]
498
+ """
499
+ for name in x:
500
+ if "FPN1_" not in name:
501
+ continue
502
+ ### predict on each features point
503
+ if len(x[name].size()) == 4:
504
+ B, C, H, W = x[name].size()
505
+ logit = x[name].view(B, C, H*W)
506
+ elif len(x[name].size()) == 3:
507
+ logit = x[name].transpose(1, 2).contiguous()
508
+ model_name = name.replace("FPN1_", "")
509
+ logits[name] = getattr(self, "fpn_classifier_down_" + model_name)(logit)
510
+ logits[name] = logits[name].transpose(1, 2).contiguous() # transpose
511
+
512
+ def fpn_predict_up(self, x: dict, logits: dict):
513
+ """
514
+ x: [B, C, H, W] or [B, S, C]
515
+ [B, C, H, W] --> [B, H*W, C]
516
+ """
517
+ for name in x:
518
+ if "FPN1_" in name:
519
+ continue
520
+ ### predict on each features point
521
+ if len(x[name].size()) == 4:
522
+ B, C, H, W = x[name].size()
523
+ logit = x[name].view(B, C, H*W)
524
+ elif len(x[name].size()) == 3:
525
+ logit = x[name].transpose(1, 2).contiguous()
526
+ model_name = name.replace("FPN1_", "")
527
+ logits[name] = getattr(self, "fpn_classifier_up_" + model_name)(logit)
528
+ logits[name] = logits[name].transpose(1, 2).contiguous() # transpose
529
+
530
+ def forward(self, x: torch.Tensor):
531
+
532
+ logits = {}
533
+
534
+ x = self.forward_backbone(x)
535
+
536
+ if self.use_fpn:
537
+ x = self.fpn_down(x)
538
+ # print([name for name in x])
539
+ self.fpn_predict_down(x, logits)
540
+ x = self.fpn_up(x)
541
+ self.fpn_predict_up(x, logits)
542
+
543
+ if self.use_selection:
544
+ selects = self.selector(x, logits)
545
+
546
+ if self.use_combiner:
547
+ comb_outs = self.combiner(selects)
548
+ logits['comb_outs'] = comb_outs
549
+ return logits
550
+
551
+ if self.use_selection or self.fpn:
552
+ return logits
553
+
554
+ ### original backbone (only predict final selected layer)
555
+ for name in x:
556
+ hs = x[name]
557
+
558
+ if len(hs.size()) == 4:
559
+ hs = F.adaptive_avg_pool2d(hs, (1, 1))
560
+ hs = hs.flatten(1)
561
+ else:
562
+ hs = hs.mean(1)
563
+ out = self.classifier(hs)
564
+ logits['ori_out'] = logits
565
+
566
+ return
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ torch
4
+ torchvision
5
+ timm
6
+ opencv-python-headless
7
+ pillow
8
+ numpy