Commit
·
40ae5e2
1
Parent(s):
cdd5e3e
update
Browse files
app.py
CHANGED
@@ -187,7 +187,7 @@ else:
|
|
187 |
# print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
|
188 |
return k3d_wrapper.get_detailed_prompt(prompt, seed)
|
189 |
|
190 |
-
@spaces.GPU
|
191 |
def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=18, redux_hparam=None, init_image=None, **kwargs):
|
192 |
# subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
193 |
# print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
|
@@ -210,7 +210,7 @@ else:
|
|
210 |
**kwargs)
|
211 |
return result[-1]
|
212 |
|
213 |
-
@spaces.GPU
|
214 |
def image2mesh_preprocess_(input_image_, seed, use_mv_rgb=True):
|
215 |
global preprocessed_input_image
|
216 |
|
@@ -225,7 +225,7 @@ else:
|
|
225 |
return reference_save_path, caption
|
226 |
|
227 |
|
228 |
-
@spaces.GPU
|
229 |
def image2mesh_main_(reference_3d_bundle_image, caption, seed, strength1=0.5, strength2=0.95, enable_redux=True, use_controlnet=True, if_video=True):
|
230 |
subprocess.run(['nvidia-smi'])
|
231 |
global mesh_cache
|
@@ -252,7 +252,7 @@ else:
|
|
252 |
return gen_save_path, recon_mesh_path, mesh_cache
|
253 |
# return gen_save_path, recon_mesh_path
|
254 |
|
255 |
-
@spaces.GPU
|
256 |
def bundle_image_to_mesh(
|
257 |
gen_3d_bundle_image,
|
258 |
camera_radius=3.5,
|
@@ -429,10 +429,11 @@ with gr.Blocks(css="""
|
|
429 |
# Modify the Examples section to display horizontally
|
430 |
gr.Examples(
|
431 |
examples=[
|
432 |
-
["A
|
|
|
433 |
["A battle mech in a mix of red, blue, and black color, with a cannon on the head."],
|
434 |
["骷髅头, 邪恶的"],
|
435 |
-
|
436 |
],
|
437 |
inputs=[prompt],
|
438 |
label="Example Prompts",
|
@@ -440,7 +441,7 @@ with gr.Blocks(css="""
|
|
440 |
)
|
441 |
|
442 |
with gr.Accordion("Advanced Parameters", open=False):
|
443 |
-
seed1 = gr.Number(value=
|
444 |
|
445 |
btn_one_click_generate = gr.Button("One-click Generation", elem_id="one-click-generate-btn", elem_classes=["orange-button"])
|
446 |
|
@@ -452,7 +453,7 @@ with gr.Blocks(css="""
|
|
452 |
|
453 |
with gr.Accordion("Advanced Parameters", open=False):
|
454 |
with gr.Row():
|
455 |
-
img_gen_seed = gr.Number(value=
|
456 |
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=18, step=1, label="Inference Steps")
|
457 |
with gr.Row():
|
458 |
strength = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.05, label="Strength")
|
|
|
187 |
# print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
|
188 |
return k3d_wrapper.get_detailed_prompt(prompt, seed)
|
189 |
|
190 |
+
@spaces.GPU
|
191 |
def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=18, redux_hparam=None, init_image=None, **kwargs):
|
192 |
# subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
193 |
# print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
|
|
|
210 |
**kwargs)
|
211 |
return result[-1]
|
212 |
|
213 |
+
@spaces.GPU
|
214 |
def image2mesh_preprocess_(input_image_, seed, use_mv_rgb=True):
|
215 |
global preprocessed_input_image
|
216 |
|
|
|
225 |
return reference_save_path, caption
|
226 |
|
227 |
|
228 |
+
@spaces.GPU
|
229 |
def image2mesh_main_(reference_3d_bundle_image, caption, seed, strength1=0.5, strength2=0.95, enable_redux=True, use_controlnet=True, if_video=True):
|
230 |
subprocess.run(['nvidia-smi'])
|
231 |
global mesh_cache
|
|
|
252 |
return gen_save_path, recon_mesh_path, mesh_cache
|
253 |
# return gen_save_path, recon_mesh_path
|
254 |
|
255 |
+
@spaces.GPU
|
256 |
def bundle_image_to_mesh(
|
257 |
gen_3d_bundle_image,
|
258 |
camera_radius=3.5,
|
|
|
429 |
# Modify the Examples section to display horizontally
|
430 |
gr.Examples(
|
431 |
examples=[
|
432 |
+
["A cat"],
|
433 |
+
["A person wearing a virtual reality headset, sitting position, bent legs, clasped hands."],
|
434 |
["A battle mech in a mix of red, blue, and black color, with a cannon on the head."],
|
435 |
["骷髅头, 邪恶的"],
|
436 |
+
|
437 |
],
|
438 |
inputs=[prompt],
|
439 |
label="Example Prompts",
|
|
|
441 |
)
|
442 |
|
443 |
with gr.Accordion("Advanced Parameters", open=False):
|
444 |
+
seed1 = gr.Number(value=666, label="Seed")
|
445 |
|
446 |
btn_one_click_generate = gr.Button("One-click Generation", elem_id="one-click-generate-btn", elem_classes=["orange-button"])
|
447 |
|
|
|
453 |
|
454 |
with gr.Accordion("Advanced Parameters", open=False):
|
455 |
with gr.Row():
|
456 |
+
img_gen_seed = gr.Number(value=666, label="Image Generation Seed")
|
457 |
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=18, step=1, label="Inference Steps")
|
458 |
with gr.Row():
|
459 |
strength = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.05, label="Strength")
|