Spaces:
Running
Running
| import gradio as gr | |
| import timm | |
| import torch | |
| import torch.nn as nn | |
| class Net2D(nn.Module): | |
| def __init__(self, weights): | |
| super().__init__() | |
| self.backbone = timm.create_model("tf_efficientnet_b6_ns", pretrained=False, global_pool="", num_classes=0) | |
| self.pool_layer = nn.AdaptiveAvgPool2d(1) | |
| self.dropout = nn.Dropout(0.5) | |
| self.linear = nn.Linear(2304, 5) | |
| self.load_state_dict(weights) | |
| def forward(self, x): | |
| x = self.backbone(x) | |
| x = self.pool_layer(x).view(x.size(0), -1) | |
| x = self.dropout(x) | |
| x = self.linear(x) | |
| return x[:, 0] if x.size(1) == 1 else x | |
| def rescale(x): | |
| x = x / 255.0 | |
| x = x - 0.5 | |
| x = x * 2.0 | |
| return x | |
| weights = torch.load("model0.ckpt", map_location=torch.device("cpu"))["state_dict"] | |
| weights = {k.replace("model.", ""): v for k, v in weights.items()} | |
| model = Net2D(weights).eval() | |
| def predict(Image): | |
| img = torch.from_numpy(Image) | |
| img = img.permute(2, 0, 1) | |
| img = img.unsqueeze(0) | |
| img = rescale(img) | |
| with torch.no_grad(): | |
| grade = torch.softmax(model(img.float()), dim=1)[0] | |
| cats = ["None", "Mild", "Moderate", "Severe", "Proliferative"] | |
| output_dict = {} | |
| for cat, value in zip(cats, grade): | |
| output_dict[cat] = value.item() | |
| return output_dict | |
| image = gr.Image(shape=(512, 512), image_mode="RGB") | |
| label = gr.Label(label="Grade") | |
| demo = gr.Interface( | |
| fn=predict, | |
| inputs=image, | |
| outputs=label, | |
| examples=["examples/0.png", "examples/1.png", "examples/2.png", "examples/3.png", "examples/4.png"] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(debug=True) | |