Spaces:
Running
Running
| import gradio as gr | |
| from torchvision import transforms | |
| import torch | |
| import torch.nn as nn | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| print(device) | |
| class ResidualBlock(nn.Module): | |
| def __init__(self, channels): | |
| super(ResidualBlock, self).__init__() | |
| self.block = nn.Sequential( | |
| nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False), | |
| nn.InstanceNorm2d(channels), | |
| nn.ReLU(inplace=True), | |
| nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False), | |
| nn.InstanceNorm2d(channels) | |
| ) | |
| def forward(self, x): | |
| return x + self.block(x) | |
| class StrongGenerator(nn.Module): | |
| def __init__(self, num_residual_blocks=6): | |
| super(StrongGenerator, self).__init__() | |
| model = [ | |
| nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False), | |
| nn.InstanceNorm2d(64), | |
| nn.ReLU(inplace=True) | |
| ] | |
| in_channels = 64 | |
| for _ in range(2): | |
| out_channels = in_channels * 2 | |
| model += [ | |
| nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), | |
| nn.InstanceNorm2d(out_channels), | |
| nn.ReLU(inplace=True) | |
| ] | |
| in_channels = out_channels | |
| for _ in range(num_residual_blocks): | |
| model += [ResidualBlock(in_channels)] | |
| for _ in range(2): | |
| out_channels = in_channels // 2 | |
| model += [ | |
| nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False), | |
| nn.InstanceNorm2d(out_channels), | |
| nn.ReLU(inplace=True) | |
| ] | |
| in_channels = out_channels | |
| model += [ | |
| nn.Conv2d(in_channels, 3, kernel_size=7, stride=1, padding=3), | |
| nn.Tanh() | |
| ] | |
| self.model = nn.Sequential(*model) | |
| def forward(self, x): | |
| return self.model(x) | |
| generator = StrongGenerator().to(device) | |
| generator.load_state_dict(torch.load("./generator_epoch_10.pth", map_location=device)) | |
| generator.eval() | |
| def restore_image(mosaic_image): | |
| transform_in = transforms.Compose([ | |
| transforms.Resize((256, 256)), | |
| transforms.ToTensor(), | |
| transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) | |
| ]) | |
| input_tensor = transform_in(mosaic_image).unsqueeze(0).to(device) | |
| with torch.no_grad(): | |
| restored_tensor = generator(input_tensor) | |
| restored_tensor = restored_tensor.squeeze(0).cpu() | |
| restored_tensor = (restored_tensor * 0.5 + 0.5).clamp(0, 1) | |
| restored_image = transforms.ToPILImage()(restored_tensor) | |
| return restored_image | |
| iface = gr.Interface( | |
| fn=restore_image, | |
| inputs=gr.Image(type="pil"), | |
| outputs="image", | |
| title="HorrorMovieStyleGAN", | |
| description="上傳圖像後,模型將嘗試將圖片變成恐怖。" | |
| ) | |
| iface.launch() | |