Spaces:
Running
on
T4
Running
on
T4
Commit
·
e283fdf
1
Parent(s):
82f2717
update
Browse files- .gitignore +3 -1
- app_3d.py +21 -0
- app_texnet.py +107 -5
- examples/uv_normal.png +0 -3
- model.py +1 -1
.gitignore
CHANGED
@@ -1,2 +1,4 @@
|
|
1 |
__pycache__
|
2 |
-
data
|
|
|
|
|
|
1 |
__pycache__
|
2 |
+
data
|
3 |
+
examples
|
4 |
+
.gradio
|
app_3d.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
|
4 |
+
def load_mesh(mesh_file_name):
|
5 |
+
return mesh_file_name
|
6 |
+
|
7 |
+
demo = gr.Interface(
|
8 |
+
fn=load_mesh,
|
9 |
+
inputs=gr.Model3D(),
|
10 |
+
outputs=gr.Model3D(
|
11 |
+
clear_color=(255.0, 0.0, 0.0, 0.0), label="3D Model", display_mode="wireframe"),
|
12 |
+
examples=[
|
13 |
+
[os.path.join(os.path.dirname(__file__), "examples/bunny/mesh.obj")],
|
14 |
+
[os.path.join(os.path.dirname(__file__), "examples/monkey/mesh.obj")],
|
15 |
+
[os.path.join(os.path.dirname(__file__), "examples/Bunny.obj")],
|
16 |
+
],
|
17 |
+
cache_examples=True
|
18 |
+
)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
demo.launch()
|
app_texnet.py
CHANGED
@@ -1,6 +1,11 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
|
|
|
|
|
|
|
3 |
import gradio as gr
|
|
|
|
|
4 |
|
5 |
from settings import (
|
6 |
DEFAULT_IMAGE_RESOLUTION,
|
@@ -11,6 +16,77 @@ from settings import (
|
|
11 |
)
|
12 |
from utils import randomize_seed_fn
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
def create_demo(process):
|
16 |
with gr.Blocks() as demo:
|
@@ -39,7 +115,14 @@ def create_demo(process):
|
|
39 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
40 |
)
|
41 |
with gr.Column():
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
inputs = [
|
44 |
image,
|
45 |
prompt,
|
@@ -51,6 +134,8 @@ def create_demo(process):
|
|
51 |
guidance_scale,
|
52 |
seed,
|
53 |
]
|
|
|
|
|
54 |
prompt.submit(
|
55 |
fn=randomize_seed_fn,
|
56 |
inputs=[seed, randomize_seed],
|
@@ -60,19 +145,25 @@ def create_demo(process):
|
|
60 |
).then(
|
61 |
fn=process,
|
62 |
inputs=inputs,
|
63 |
-
outputs=
|
64 |
api_name="canny",
|
65 |
concurrency_id="main",
|
66 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
# TODO: setup several example images
|
69 |
gr.Examples(
|
70 |
fn=process,
|
71 |
inputs=inputs,
|
72 |
-
outputs=
|
73 |
examples=[
|
74 |
[
|
75 |
-
"examples/uv_normal.png",
|
76 |
"feather",
|
77 |
a_prompt.value,
|
78 |
n_prompt.value,
|
@@ -82,6 +173,17 @@ def create_demo(process):
|
|
82 |
guidance_scale.value,
|
83 |
seed.value,
|
84 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
],
|
86 |
)
|
87 |
return demo
|
|
|
1 |
#!/usr/bin/env python
|
2 |
|
3 |
+
import os
|
4 |
+
import shutil
|
5 |
+
import tempfile
|
6 |
import gradio as gr
|
7 |
+
from PIL import Image
|
8 |
+
import numpy as np
|
9 |
|
10 |
from settings import (
|
11 |
DEFAULT_IMAGE_RESOLUTION,
|
|
|
16 |
)
|
17 |
from utils import randomize_seed_fn
|
18 |
|
19 |
+
# ---- helper to build a quick textured copy of the mesh ---------------
|
20 |
+
def apply_texture(src_mesh:str, texture:str, tag:str)->str:
|
21 |
+
"""
|
22 |
+
Writes a copy of `src_mesh` and tiny .mtl that points to `texture`.
|
23 |
+
Returns the new OBJ/GLB path for viewing.
|
24 |
+
"""
|
25 |
+
tmp_dir = tempfile.mkdtemp()
|
26 |
+
mesh_copy = os.path.join(tmp_dir, f"{tag}.obj")
|
27 |
+
mtl_name = f"{tag}.mtl"
|
28 |
+
|
29 |
+
# copy geometry
|
30 |
+
shutil.copy(src_mesh, mesh_copy)
|
31 |
+
|
32 |
+
# write minimal MTL
|
33 |
+
with open(os.path.join(tmp_dir, mtl_name), "w") as f:
|
34 |
+
f.write(f"newmtl material_0\nmap_Kd {os.path.basename(texture)}\n")
|
35 |
+
|
36 |
+
# ensure texture lives next to OBJ
|
37 |
+
shutil.copy(texture, os.path.join(tmp_dir, os.path.basename(texture)))
|
38 |
+
|
39 |
+
# patch OBJ to reference our new MTL
|
40 |
+
with open(mesh_copy, "r+") as f:
|
41 |
+
lines = f.readlines()
|
42 |
+
if not lines[0].startswith("mtllib"):
|
43 |
+
lines.insert(0, f"mtllib {mtl_name}\n")
|
44 |
+
f.seek(0); f.writelines(lines)
|
45 |
+
|
46 |
+
return mesh_copy
|
47 |
+
|
48 |
+
def image_to_temp_path(img_like, tag):
|
49 |
+
"""
|
50 |
+
Convert various image-like objects (str, PIL.Image, list, tuple) to temp PNG path.
|
51 |
+
Returns the path to the saved image file.
|
52 |
+
"""
|
53 |
+
# Handle tuple or list input
|
54 |
+
if isinstance(img_like, (list, tuple)):
|
55 |
+
if len(img_like) == 0:
|
56 |
+
raise ValueError("Empty image list/tuple.")
|
57 |
+
img_like = img_like[0]
|
58 |
+
|
59 |
+
# If it's already a file path
|
60 |
+
if isinstance(img_like, str):
|
61 |
+
return img_like
|
62 |
+
|
63 |
+
# If it's a PIL Image
|
64 |
+
if isinstance(img_like, Image.Image):
|
65 |
+
temp_path = os.path.join(tempfile.mkdtemp(), f"{tag}.png")
|
66 |
+
img_like.save(temp_path)
|
67 |
+
return temp_path
|
68 |
+
|
69 |
+
# if it's numpy array
|
70 |
+
if isinstance(img_like, np.ndarray):
|
71 |
+
temp_path = os.path.join(tempfile.mkdtemp(), f"{tag}.png")
|
72 |
+
img_like = Image.fromarray(img_like)
|
73 |
+
img_like.save(temp_path)
|
74 |
+
return temp_path
|
75 |
+
|
76 |
+
raise ValueError(f"Expected PIL.Image, str, list, or tuple — got {type(img_like)}")
|
77 |
+
|
78 |
+
def show_mesh(which, mesh, inp, coarse, fine):
|
79 |
+
"""Switch the displayed texture based on dropdown change."""
|
80 |
+
print()
|
81 |
+
tex_map = {
|
82 |
+
"Input": image_to_temp_path(inp, "input"),
|
83 |
+
"Coarse": coarse[0] if isinstance(coarse, tuple) else coarse,
|
84 |
+
"Fine": fine[0] if isinstance(fine, tuple) else fine,
|
85 |
+
}
|
86 |
+
texture_path = tex_map[which]
|
87 |
+
return apply_texture(mesh, texture_path, which.lower())
|
88 |
+
# ----------------------------------------------------------------------
|
89 |
+
|
90 |
|
91 |
def create_demo(process):
|
92 |
with gr.Blocks() as demo:
|
|
|
115 |
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
116 |
)
|
117 |
with gr.Column():
|
118 |
+
result_coarse = gr.Gallery(label="Output Coarse", show_label=True, columns=2, object_fit="scale-down")
|
119 |
+
result_fine = gr.Gallery(label="Output Fine", show_label=True, columns=2, object_fit="scale-down")
|
120 |
+
# mesh_viewer = gr.Model3D(label="Textured Mesh", clear_color=[0, 0, 0, 0], value="examples/monkey/mesh.obj")
|
121 |
+
|
122 |
+
# radio buttons let the user toggle which texture to view
|
123 |
+
# texture_choice = gr.Radio(["Input", "Coarse", "Fine"], label="Preview texture", value="Input")
|
124 |
+
# mesh_path_state = gr.State("examples/bunny/mesh.obj")
|
125 |
+
|
126 |
inputs = [
|
127 |
image,
|
128 |
prompt,
|
|
|
134 |
guidance_scale,
|
135 |
seed,
|
136 |
]
|
137 |
+
|
138 |
+
# first call → run diffusion / texture network
|
139 |
prompt.submit(
|
140 |
fn=randomize_seed_fn,
|
141 |
inputs=[seed, randomize_seed],
|
|
|
145 |
).then(
|
146 |
fn=process,
|
147 |
inputs=inputs,
|
148 |
+
outputs=[result_coarse, result_fine],
|
149 |
api_name="canny",
|
150 |
concurrency_id="main",
|
151 |
)
|
152 |
+
# .then(
|
153 |
+
# fn=show_mesh,
|
154 |
+
# inputs=[texture_choice, mesh_path_state, image, result_coarse, result_fine],
|
155 |
+
# outputs=mesh_viewer,
|
156 |
+
# queue=False,
|
157 |
+
# api_name=False,
|
158 |
+
# )
|
159 |
|
|
|
160 |
gr.Examples(
|
161 |
fn=process,
|
162 |
inputs=inputs,
|
163 |
+
outputs=[result_coarse, result_fine],
|
164 |
examples=[
|
165 |
[
|
166 |
+
"examples/bunny/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/bunny/uv_normal/fused.png
|
167 |
"feather",
|
168 |
a_prompt.value,
|
169 |
n_prompt.value,
|
|
|
173 |
guidance_scale.value,
|
174 |
seed.value,
|
175 |
],
|
176 |
+
[
|
177 |
+
"examples/monkey/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
178 |
+
"wood",
|
179 |
+
a_prompt.value,
|
180 |
+
n_prompt.value,
|
181 |
+
num_samples.value,
|
182 |
+
image_resolution.value,
|
183 |
+
num_steps.value,
|
184 |
+
guidance_scale.value,
|
185 |
+
seed.value,
|
186 |
+
],
|
187 |
],
|
188 |
)
|
189 |
return demo
|
examples/uv_normal.png
DELETED
Git LFS Details
|
model.py
CHANGED
@@ -220,7 +220,7 @@ class Model:
|
|
220 |
# restore the original pipe
|
221 |
self.pipe = self.pipe_backup
|
222 |
|
223 |
-
return [
|
224 |
|
225 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
226 |
@torch.inference_mode()
|
|
|
220 |
# restore the original pipe
|
221 |
self.pipe = self.pipe_backup
|
222 |
|
223 |
+
return [*results_coarse], [*results_fine]
|
224 |
|
225 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
226 |
@torch.inference_mode()
|