Spaces:
Runtime error
Runtime error
Create app.py
#13
by
twebb
- opened
- app.py +4 -153
- requirements.txt +1 -1
app.py
CHANGED
@@ -1,156 +1,7 @@
|
|
1 |
-
# install
|
2 |
-
|
3 |
-
|
4 |
-
import glob
|
5 |
import gradio as gr
|
6 |
-
import os
|
7 |
-
import random
|
8 |
-
|
9 |
-
import subprocess
|
10 |
-
|
11 |
-
if os.getenv('SYSTEM') == 'spaces':
|
12 |
-
subprocess.run('pip install pyembree'.split())
|
13 |
-
subprocess.run('pip install rembg'.split())
|
14 |
-
subprocess.run(
|
15 |
-
'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
|
16 |
-
subprocess.run(
|
17 |
-
'pip install git+https://github.com/YuliangXiu/kaolin.git'.split())
|
18 |
-
subprocess.run('pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html'.split())
|
19 |
-
subprocess.run(
|
20 |
-
'pip install git+https://github.com/Project-Splinter/human_det.git'.split())
|
21 |
-
subprocess.run(
|
22 |
-
'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split())
|
23 |
-
|
24 |
-
from apps.infer import generate_model
|
25 |
-
|
26 |
-
# running
|
27 |
-
|
28 |
-
description = '''
|
29 |
-
# ICON Clothed Human Digitization
|
30 |
-
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
|
31 |
-
|
32 |
-
<table>
|
33 |
-
<th>
|
34 |
-
<ul>
|
35 |
-
<li><strong>Homepage</strong> <a href="http://icon.is.tue.mpg.de">icon.is.tue.mpg.de</a></li>
|
36 |
-
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ICON">YuliangXiu/ICON</a>
|
37 |
-
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2112.09127">arXiv</a>, <a href="https://readpaper.com/paper/4569785684533977089">ReadPaper</a>
|
38 |
-
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a>
|
39 |
-
</ul>
|
40 |
-
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a>
|
41 |
-
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ICON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
|
42 |
-
<a href="https://youtu.be/hZd6AYin2DE"><img alt="YouTube Video Views" src="https://img.shields.io/youtube/views/hZd6AYin2DE?style=social"></a>
|
43 |
-
</th>
|
44 |
-
<th>
|
45 |
-
<iframe width="560" height="315" src="https://www.youtube.com/embed/hZd6AYin2DE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
46 |
-
</th>
|
47 |
-
</table>
|
48 |
-
|
49 |
-
<h4> The reconstruction + refinement + video take about 80~200 seconds for single image. <span style="color:red"> If ERROR, try "Submit Image" again.</span></h4>
|
50 |
-
|
51 |
-
<details>
|
52 |
-
|
53 |
-
<summary>More</summary>
|
54 |
-
|
55 |
-
#### Citation
|
56 |
-
```
|
57 |
-
@inproceedings{xiu2022icon,
|
58 |
-
title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
|
59 |
-
author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
|
60 |
-
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
|
61 |
-
month = {June},
|
62 |
-
year = {2022},
|
63 |
-
pages = {13296-13306}
|
64 |
-
}
|
65 |
-
```
|
66 |
-
|
67 |
-
#### Acknowledgments:
|
68 |
-
|
69 |
-
- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/)
|
70 |
-
- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu)
|
71 |
-
- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization)
|
72 |
-
|
73 |
-
#### Image Credits
|
74 |
-
|
75 |
-
* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox)
|
76 |
-
|
77 |
-
#### Related works
|
78 |
-
|
79 |
-
* [ICON @ MPI](https://icon.is.tue.mpg.de/)
|
80 |
-
* [MonoPort @ USC](https://xiuyuliang.cn/monoport)
|
81 |
-
* [Phorhum @ Google](https://phorhum.github.io/)
|
82 |
-
* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/)
|
83 |
-
* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html)
|
84 |
-
|
85 |
-
</details>
|
86 |
-
'''
|
87 |
-
|
88 |
-
|
89 |
-
def generate_image(seed, psi):
|
90 |
-
iface = gr.Interface.load("spaces/hysts/StyleGAN-Human")
|
91 |
-
img = iface(seed, psi)
|
92 |
-
return img
|
93 |
-
|
94 |
-
|
95 |
-
random.seed(2022)
|
96 |
-
model_types = ['icon-filter', 'pifu', 'pamir']
|
97 |
-
examples = [[item, random.choice(model_types)] for item in glob.glob('examples/*.png')]
|
98 |
-
|
99 |
-
with gr.Blocks() as demo:
|
100 |
-
gr.Markdown(description)
|
101 |
-
|
102 |
-
out_lst = []
|
103 |
-
with gr.Row():
|
104 |
-
with gr.Column():
|
105 |
-
with gr.Row():
|
106 |
-
with gr.Column():
|
107 |
-
seed = gr.inputs.Slider(
|
108 |
-
0, 100, step=1, default=0, label='Seed (For Image Generation)')
|
109 |
-
psi = gr.inputs.Slider(
|
110 |
-
0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)')
|
111 |
-
radio_choice = gr.Radio(
|
112 |
-
model_types, label='Method (For Reconstruction)', value='icon-filter')
|
113 |
-
inp = gr.Image(type="filepath", label="Input Image")
|
114 |
-
with gr.Row():
|
115 |
-
btn_sample = gr.Button("Sample Image")
|
116 |
-
btn_submit = gr.Button("Submit Image")
|
117 |
-
|
118 |
-
gr.Examples(examples=examples,
|
119 |
-
inputs=[inp, radio_choice],
|
120 |
-
cache_examples=True,
|
121 |
-
fn=generate_model,
|
122 |
-
outputs=out_lst)
|
123 |
-
|
124 |
-
out_vid = gr.Video(label="Image + Normal + Recon + Refined Recon")
|
125 |
-
out_vid_download = gr.File(
|
126 |
-
label="Download Video, welcome share on Twitter with #ICON")
|
127 |
-
|
128 |
-
with gr.Column():
|
129 |
-
overlap_inp = gr.Image(
|
130 |
-
type="filepath", label="Image Normal Overlap")
|
131 |
-
out_smpl = gr.Model3D(
|
132 |
-
clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL")
|
133 |
-
out_smpl_download = gr.File(label="Download SMPL mesh")
|
134 |
-
out_smpl_npy_download = gr.File(label="Download SMPL params")
|
135 |
-
out_recon = gr.Model3D(
|
136 |
-
clear_color=[0.0, 0.0, 0.0, 0.0], label="Recon")
|
137 |
-
out_recon_download = gr.File(label="Download clothed human mesh")
|
138 |
-
out_final = gr.Model3D(
|
139 |
-
clear_color=[0.0, 0.0, 0.0, 0.0], label="Refined Recon")
|
140 |
-
out_final_download = gr.File(
|
141 |
-
label="Download refined clothed human mesh")
|
142 |
-
|
143 |
-
out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download, out_recon, out_recon_download,
|
144 |
-
out_final, out_final_download, out_vid, out_vid_download, overlap_inp]
|
145 |
-
|
146 |
-
btn_submit.click(fn=generate_model, inputs=[
|
147 |
-
inp, radio_choice], outputs=out_lst)
|
148 |
-
btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp)
|
149 |
-
|
150 |
-
if __name__ == "__main__":
|
151 |
|
152 |
-
|
153 |
-
|
154 |
-
# auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")
|
155 |
|
156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
def greet(name):
|
4 |
+
return "Hello " + name + "!!"
|
|
|
5 |
|
6 |
+
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
+
iface.launch()
|
requirements.txt
CHANGED
@@ -7,6 +7,7 @@ yacs>=0.1.8
|
|
7 |
scikit-image==0.19.1
|
8 |
termcolor
|
9 |
tqdm
|
|
|
10 |
trimesh==3.9.35
|
11 |
flatten_dict==0.4.2
|
12 |
jpeg4py
|
@@ -15,7 +16,6 @@ rtree==0.9.7
|
|
15 |
pytorch_lightning==1.2.5
|
16 |
PyMCubes
|
17 |
chumpy
|
18 |
-
rembg>=2.0.3
|
19 |
opencv-python
|
20 |
opencv_contrib_python
|
21 |
scikit-learn
|
|
|
7 |
scikit-image==0.19.1
|
8 |
termcolor
|
9 |
tqdm
|
10 |
+
numpy==1.22.4
|
11 |
trimesh==3.9.35
|
12 |
flatten_dict==0.4.2
|
13 |
jpeg4py
|
|
|
16 |
pytorch_lightning==1.2.5
|
17 |
PyMCubes
|
18 |
chumpy
|
|
|
19 |
opencv-python
|
20 |
opencv_contrib_python
|
21 |
scikit-learn
|