Spaces:
Build error
Build error
TalhaUsuf
commited on
Commit
·
42b38a5
1
Parent(s):
ee384f2
added gfp gan
Browse files- app.py +66 -6
- models/__pycache__/gfpgan.cpython-37.pyc +0 -0
- models/gfpgan.py +19 -0
- requirements.txt +7 -6
app.py
CHANGED
|
@@ -7,15 +7,20 @@ import torchvision.transforms as transforms
|
|
| 7 |
import PIL
|
| 8 |
from PIL import Image
|
| 9 |
from PIL import ImageFile
|
|
|
|
|
|
|
| 10 |
import math
|
| 11 |
import os
|
| 12 |
import torch.nn.functional as F
|
| 13 |
from rich.panel import Panel
|
| 14 |
from rich.columns import Columns
|
| 15 |
from rich.console import Console
|
|
|
|
| 16 |
|
| 17 |
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
|
| 18 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
|
|
| 19 |
model1 = my_model(en_feature_num=48,
|
| 20 |
en_inter_num=32,
|
| 21 |
de_feature_num=64,
|
|
@@ -33,7 +38,10 @@ def default_toTensor(img):
|
|
| 33 |
composed_transform = transforms.Compose(t_list)
|
| 34 |
return composed_transform(img)
|
| 35 |
|
| 36 |
-
def
|
|
|
|
|
|
|
|
|
|
| 37 |
in_img = transforms.ToTensor()(img).to(device).unsqueeze(0)
|
| 38 |
b, c, h, w = in_img.size()
|
| 39 |
# pad image such that the resolution is a multiple of 32
|
|
@@ -75,7 +83,32 @@ def img_pad(x, w_pad, h_pad, w_odd_pad, h_odd_pad):
|
|
| 75 |
return y
|
| 76 |
|
| 77 |
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
description = " The model was trained to remove the moire patterns from images! Max input image resolution is \
|
| 80 |
up to 4K resolution\
|
| 81 |
<br /> \
|
|
@@ -85,6 +118,10 @@ It takes time to perform inference \
|
|
| 85 |
article = "Reference:\n Towards Efficient and Scale-Robust Ultra-High-Definition Image Demoiréing. CVMI Lab, Nov. 21, 2022. Accessed: Nov. 21, 2022. [Online]. Available: https://github.com/CVMI-Lab/UHDM"
|
| 86 |
|
| 87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
files = [
|
| 89 |
'0001_01.jpg',
|
| 90 |
'0002_01.jpg',
|
|
@@ -676,14 +713,37 @@ files = [os.path.join("n000129", k) for k in files]
|
|
| 676 |
|
| 677 |
Console().print(Columns ([Panel.fit(f"{k}", style="red on black") for k in files]))
|
| 678 |
|
| 679 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 680 |
inputs=gr.Image(type="pil"),
|
| 681 |
outputs=gr.Image(type="pil"),
|
| 682 |
examples=files,
|
| 683 |
title = title,
|
| 684 |
# description = description,
|
| 685 |
-
article = article
|
|
|
|
| 686 |
)
|
| 687 |
|
| 688 |
-
|
| 689 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
import PIL
|
| 8 |
from PIL import Image
|
| 9 |
from PIL import ImageFile
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
import shutil
|
| 12 |
import math
|
| 13 |
import os
|
| 14 |
import torch.nn.functional as F
|
| 15 |
from rich.panel import Panel
|
| 16 |
from rich.columns import Columns
|
| 17 |
from rich.console import Console
|
| 18 |
+
from models.gfpgan import gfpgan_predict
|
| 19 |
|
| 20 |
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
|
| 21 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
model1 = my_model(en_feature_num=48,
|
| 25 |
en_inter_num=32,
|
| 26 |
de_feature_num=64,
|
|
|
|
| 38 |
composed_transform = transforms.Compose(t_list)
|
| 39 |
return composed_transform(img)
|
| 40 |
|
| 41 |
+
def predict(img):
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
|
| 45 |
in_img = transforms.ToTensor()(img).to(device).unsqueeze(0)
|
| 46 |
b, c, h, w = in_img.size()
|
| 47 |
# pad image such that the resolution is a multiple of 32
|
|
|
|
| 83 |
return y
|
| 84 |
|
| 85 |
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def predict_gfpgan(img):
|
| 89 |
+
|
| 90 |
+
with Console().status("[red] using [green] GFP-GAN v1.4", spinner="aesthetic"):
|
| 91 |
+
# if image already exists with this name then delete it
|
| 92 |
+
if Path("input_image_gfpgan.jpg").exists():
|
| 93 |
+
os.remove("input_image_gfpgan.jpg")
|
| 94 |
+
# save incoming PIL image to disk
|
| 95 |
+
img.save("input_image_gfpgan.jpg")
|
| 96 |
+
|
| 97 |
+
out = gfpgan_predict(img)
|
| 98 |
+
Console().print(out)
|
| 99 |
+
|
| 100 |
+
return img
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
title = "image enhancement"
|
| 112 |
description = " The model was trained to remove the moire patterns from images! Max input image resolution is \
|
| 113 |
up to 4K resolution\
|
| 114 |
<br /> \
|
|
|
|
| 118 |
article = "Reference:\n Towards Efficient and Scale-Robust Ultra-High-Definition Image Demoiréing. CVMI Lab, Nov. 21, 2022. Accessed: Nov. 21, 2022. [Online]. Available: https://github.com/CVMI-Lab/UHDM"
|
| 119 |
|
| 120 |
|
| 121 |
+
# ==========================================================================
|
| 122 |
+
# example images
|
| 123 |
+
# ==========================================================================
|
| 124 |
+
|
| 125 |
files = [
|
| 126 |
'0001_01.jpg',
|
| 127 |
'0002_01.jpg',
|
|
|
|
| 713 |
|
| 714 |
Console().print(Columns ([Panel.fit(f"{k}", style="red on black") for k in files]))
|
| 715 |
|
| 716 |
+
|
| 717 |
+
# # --------------------------------------------------------------------------
|
| 718 |
+
# # making interfaces for models
|
| 719 |
+
# # --------------------------------------------------------------------------
|
| 720 |
+
|
| 721 |
+
iface1 = gr.Interface(fn=predict,
|
| 722 |
inputs=gr.Image(type="pil"),
|
| 723 |
outputs=gr.Image(type="pil"),
|
| 724 |
examples=files,
|
| 725 |
title = title,
|
| 726 |
# description = description,
|
| 727 |
+
article = article,
|
| 728 |
+
allow_flagging="auto"
|
| 729 |
)
|
| 730 |
|
| 731 |
+
gfpgan = gr.Interface(fn=predict_gfpgan,
|
| 732 |
+
inputs=gr.Image(type="pil"),
|
| 733 |
+
outputs=gr.Image(type="pil"),
|
| 734 |
+
examples=files,
|
| 735 |
+
title = "GFP-GAN v 1.4",
|
| 736 |
+
# description = description,
|
| 737 |
+
article = "Practical face restoration algorithm for old photos",
|
| 738 |
+
allow_flagging="auto"
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
# setting queues for all models
|
| 745 |
+
iface1.queue(concurrency_count=3)
|
| 746 |
+
gfpgan.queue(concurrency_count=3)
|
| 747 |
+
|
| 748 |
+
# launching the interface in parallel model
|
| 749 |
+
gr.Parallel(iface1, gfpgan).launch()
|
models/__pycache__/gfpgan.cpython-37.pyc
ADDED
|
Binary file (658 Bytes). View file
|
|
|
models/gfpgan.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import replicate
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
model = replicate.models.get("tencentarc/gfpgan")
|
| 5 |
+
version = model.versions.get("9283608cc6b7be6b65a8e44983db012355fde4132009bf99d976b2f0896856a3")
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def gfpgan_predict(input_image : str):
|
| 9 |
+
'''
|
| 10 |
+
takes path to image as input and returns enhanced image
|
| 11 |
+
|
| 12 |
+
Parameters
|
| 13 |
+
----------
|
| 14 |
+
input_image : str
|
| 15 |
+
path to the image file
|
| 16 |
+
'''
|
| 17 |
+
output = version.predict(img=input_image, version='v1.4', scale=1.0)
|
| 18 |
+
|
| 19 |
+
return output
|
requirements.txt
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
|
|
|
|
|
| 1 |
+
gradio==3.11.0
|
| 2 |
+
opencv_python_headless==4.6.0.66
|
| 3 |
+
Pillow==9.3.0
|
| 4 |
+
replicate==0.4.0
|
| 5 |
+
rich==12.6.0
|
| 6 |
+
torch==1.11.0
|
| 7 |
+
torchvision==0.12.0
|