# app.py from typing import List from fastapi import FastAPI, UploadFile, File from fastapi.responses import JSONResponse import uvicorn import torch from PIL import Image import torchvision.transforms as transforms import timm from pim_module import PluginMoodel import cv2 import copy import numpy as np import numpy.matlib import os app = FastAPI() # === Classes classes_list = [ "Ferrage_et_accessoires_ANTI_FAUSSE_MANOEUVRE", "Ferrage_et_accessoires_Busettes", "Ferrage_et_accessoires_Butees", "Ferrage_et_accessoires_Chariots", "Ferrage_et_accessoires_Charniere", "Ferrage_et_accessoires_Compas_limiteur", "Ferrage_et_accessoires_Renvois_d'angle", "Joints_et_consommables_Equerres_aluminium_moulees", "Joints_et_consommables_Joints_a_clipser", "Joints_et_consommables_Joints_a_coller", "Joints_et_consommables_Joints_a_glisser", "Joints_et_consommables_Joints_EPDM", "Joints_et_consommables_Joints_PVC_aluminium", "Joints_et_consommables_Silicone_pour_vitrage_alu", "Joints_et_consommables_Visserie_inox_alu", "Poignee_carre_7_mm", "Poignee_carre_8_mm", "Poignee_cremone", "Poignee_cuvette", "Poignee_de_tirage", "Poignee_pour_Levant_Coulissant", "Serrure_Cremone_multipoints", "Serrure_Cuvette", "Serrure_Gaches", "Serrure_Pene_Crochet", "Serrure_pour_Porte", "Serrure_Tringles", ] data_size = 384 fpn_size = 1536 num_classes = 27 num_selects = {'layer1': 256, 'layer2': 128, 'layer3': 64, 'layer4': 32} module_id_mapper, features, grads = {}, {}, {} def forward_hook(module, inp_hs, out_hs): layer_id = len(features) + 1 module_id_mapper[module] = layer_id features[layer_id] = {"in": inp_hs, "out": out_hs} def backward_hook(module, inp_grad, out_grad): layer_id = module_id_mapper[module] grads[layer_id] = {"in": inp_grad, "out": out_grad} def build_model(path: str): backbone = timm.create_model('swin_large_patch4_window12_384_in22k', pretrained=True) model = PluginMoodel( backbone=backbone, return_nodes=None, img_size=data_size, use_fpn=True, fpn_size=fpn_size, proj_type="Linear", upsample_type="Conv", use_selection=True, num_classes=num_classes, num_selects=num_selects, use_combiner=True, comb_proj_size=None ) ckpt = torch.load(path, map_location="cpu") model.load_state_dict(ckpt["model"], strict=False) model.eval() for layer in [0, 1, 2, 3]: model.backbone.layers[layer].register_forward_hook(forward_hook) model.backbone.layers[layer].register_full_backward_hook(backward_hook) for i in range(1, 5): getattr(model.fpn_down, f'Proj_layer{i}').register_forward_hook(forward_hook) getattr(model.fpn_down, f'Proj_layer{i}').register_full_backward_hook(backward_hook) getattr(model.fpn_up, f'Proj_layer{i}').register_forward_hook(forward_hook) getattr(model.fpn_up, f'Proj_layer{i}').register_full_backward_hook(backward_hook) return model class ImgLoader: def __init__(self, img_size): self.transform = transforms.Compose([ transforms.Resize((510, 510), Image.BILINEAR), transforms.CenterCrop((img_size, img_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load(self, path): ori_img = cv2.imread(path) img = copy.deepcopy(ori_img[:, :, ::-1]) img = Image.fromarray(img) return self.transform(img).unsqueeze(0) def cal_backward(out): target_layer_names = ['layer1', 'layer2', 'layer3', 'layer4', 'FPN1_layer1', 'FPN1_layer2', 'FPN1_layer3', 'FPN1_layer4', 'comb_outs'] sum_out = None for name in target_layer_names: tmp_out = out[name].mean(1) if name != "comb_outs" else out[name] tmp_out = torch.softmax(tmp_out, dim=-1) sum_out = tmp_out if sum_out is None else sum_out + tmp_out with torch.no_grad(): smax = torch.softmax(sum_out, dim=-1) A = np.transpose(np.matlib.repmat(smax[0], num_classes, 1)) - np.eye(num_classes) _, _, V = np.linalg.svd(A, full_matrices=True) V = V[num_classes - 1, :] if V[0] < 0: V = -V V = np.log(V) V = V - min(V) V = V / sum(V) top5 = np.argsort(-V)[:5] accs = -np.sort(-V)[:5] return [f"{classes_list[int(cls)]}: {acc*100:.2f}%" for cls, acc in zip(top5, accs)] # === Charge le modèle au démarrage model = build_model("weights.pt") img_loader = ImgLoader(data_size) @app.post("/predict") async def predict(file: UploadFile = File(...)): global features, grads, module_id_mapper features, grads, module_id_mapper = {}, {}, {} file_path = f"/tmp/{file.filename}" with open(file_path, "wb") as buffer: buffer.write(await file.read()) img_tensor = img_loader.load(file_path) out = model(img_tensor) result = cal_backward(out) return JSONResponse(content=result)