Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,13 +16,6 @@ from huggingface_hub import hf_hub_download
|
|
| 16 |
from safetensors.torch import load_file
|
| 17 |
|
| 18 |
import os
|
| 19 |
-
import base64
|
| 20 |
-
from io import BytesIO
|
| 21 |
-
import json
|
| 22 |
-
import time # Added for history update delay
|
| 23 |
-
|
| 24 |
-
from gradio_client import Client, handle_file
|
| 25 |
-
import tempfile
|
| 26 |
from PIL import Image
|
| 27 |
import os
|
| 28 |
import gradio as gr
|
|
@@ -43,69 +36,65 @@ pipe.load_lora_weights(
|
|
| 43 |
weight_name="镜头转换.safetensors", adapter_name="angles"
|
| 44 |
)
|
| 45 |
|
| 46 |
-
pipe.load_lora_weights(
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
pipe.set_adapters(["angles"
|
| 51 |
-
pipe.fuse_lora(adapter_names=["angles"], lora_scale=1.)
|
| 52 |
-
pipe.fuse_lora(adapter_names=["next-scene"], lora_scale=1.)
|
| 53 |
pipe.unload_lora_weights()
|
| 54 |
|
| 55 |
|
| 56 |
-
|
| 57 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
| 58 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 59 |
|
| 60 |
-
# # --- Ahead-of-time compilation ---
|
| 61 |
optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
|
| 62 |
|
| 63 |
-
|
| 64 |
MAX_SEED = np.iinfo(np.int32).max
|
| 65 |
|
| 66 |
-
|
| 67 |
-
def build_camera_prompt(rotate_deg, move_lr, move_forward, topdown, wideangle, closeup):
|
| 68 |
prompt_parts = []
|
| 69 |
|
| 70 |
# Rotation
|
| 71 |
if rotate_deg != 0:
|
| 72 |
direction = "left" if rotate_deg > 0 else "right"
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
| 89 |
if wideangle:
|
| 90 |
-
prompt_parts.append("Turn the camera to a wide-angle lens.")
|
| 91 |
-
if closeup:
|
| 92 |
-
prompt_parts.append("Turn the camera to a close-up lens.")
|
| 93 |
|
| 94 |
final_prompt = " ".join(prompt_parts).strip()
|
| 95 |
-
return final_prompt if final_prompt else "
|
| 96 |
|
| 97 |
|
| 98 |
-
|
| 99 |
-
@spaces.GPU(duration=300)
|
| 100 |
def infer_camera_edit(
|
| 101 |
image,
|
| 102 |
prev_output,
|
| 103 |
rotate_deg,
|
| 104 |
-
move_lr,
|
| 105 |
move_forward,
|
| 106 |
-
|
| 107 |
wideangle,
|
| 108 |
-
closeup,
|
| 109 |
seed,
|
| 110 |
randomize_seed,
|
| 111 |
true_guidance_scale,
|
|
@@ -113,14 +102,14 @@ def infer_camera_edit(
|
|
| 113 |
height,
|
| 114 |
width,
|
| 115 |
):
|
| 116 |
-
prompt = build_camera_prompt(rotate_deg,
|
| 117 |
print(f"Generated Prompt: {prompt}")
|
| 118 |
|
| 119 |
if randomize_seed:
|
| 120 |
seed = random.randint(0, MAX_SEED)
|
| 121 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 122 |
|
| 123 |
-
#
|
| 124 |
pil_images = []
|
| 125 |
if image is not None:
|
| 126 |
if isinstance(image, Image.Image):
|
|
@@ -147,30 +136,38 @@ def infer_camera_edit(
|
|
| 147 |
return result, seed, prompt
|
| 148 |
|
| 149 |
|
| 150 |
-
# ---
|
| 151 |
-
css =
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
with gr.Blocks(css=css) as demo:
|
| 156 |
with gr.Column(elem_id="col-container"):
|
| 157 |
gr.Markdown("## 🎬 Qwen Image Edit — Camera Angle Control")
|
| 158 |
-
gr.Markdown(
|
|
|
|
|
|
|
| 159 |
|
| 160 |
with gr.Row():
|
| 161 |
with gr.Column():
|
| 162 |
image = gr.Image(label="Input Image", type="pil", sources=["upload"])
|
| 163 |
prev_output = gr.State(value=None)
|
|
|
|
| 164 |
|
| 165 |
-
with gr.
|
| 166 |
-
rotate_deg = gr.Slider(
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
move_lr = gr.Slider(label="Move Right–Left", minimum=-10, maximum=10, step=1, value=0)
|
| 170 |
-
move_forward = gr.Slider(label="Move Forward/Backward", minimum=-10, maximum=10, step=1, value=0)
|
| 171 |
-
topdown = gr.Checkbox(label="Top-Down View", value=False)
|
| 172 |
wideangle = gr.Checkbox(label="Wide-Angle Lens", value=False)
|
| 173 |
-
|
|
|
|
| 174 |
|
| 175 |
with gr.Accordion("Advanced Settings", open=False):
|
| 176 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
|
@@ -180,44 +177,57 @@ with gr.Blocks(css=css) as demo:
|
|
| 180 |
height = gr.Slider(label="Height", minimum=256, maximum=2048, step=8, value=1024)
|
| 181 |
width = gr.Slider(label="Width", minimum=256, maximum=2048, step=8, value=1024)
|
| 182 |
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
run_btn = gr.Button("Generate", variant="primary")
|
| 186 |
|
| 187 |
with gr.Column():
|
| 188 |
result = gr.Image(label="Output Image")
|
| 189 |
-
prompt_preview = gr.Textbox(label="
|
| 190 |
-
gr.Markdown("_Each change applies a fresh camera instruction to the last output image._")
|
| 191 |
|
| 192 |
-
# Define inputs & outputs
|
| 193 |
inputs = [
|
| 194 |
-
image, prev_output, rotate_deg,
|
| 195 |
-
|
| 196 |
seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width
|
| 197 |
]
|
| 198 |
outputs = [result, seed, prompt_preview]
|
| 199 |
|
| 200 |
-
|
| 201 |
-
return [0, 0, 0, False, False, False]
|
| 202 |
-
|
| 203 |
reset_btn.click(
|
| 204 |
fn=reset_all,
|
| 205 |
inputs=None,
|
| 206 |
-
outputs=[rotate_deg,
|
| 207 |
queue=False
|
| 208 |
-
)
|
| 209 |
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
control.change(fn=infer_camera_edit, inputs=inputs, outputs=outputs, show_progress="minimal")
|
| 219 |
|
| 220 |
-
|
| 221 |
-
run_event.then(lambda img, *_: img, inputs=outputs, outputs=[prev_output])
|
| 222 |
|
| 223 |
-
demo.launch()
|
|
|
|
| 16 |
from safetensors.torch import load_file
|
| 17 |
|
| 18 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
from PIL import Image
|
| 20 |
import os
|
| 21 |
import gradio as gr
|
|
|
|
| 36 |
weight_name="镜头转换.safetensors", adapter_name="angles"
|
| 37 |
)
|
| 38 |
|
| 39 |
+
# pipe.load_lora_weights(
|
| 40 |
+
# "lovis93/next-scene-qwen-image-lora-2509",
|
| 41 |
+
# weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene"
|
| 42 |
+
# )
|
| 43 |
+
pipe.set_adapters(["angles"], adapter_weights=[1.])
|
| 44 |
+
pipe.fuse_lora(adapter_names=["angles"], lora_scale=1.25)
|
| 45 |
+
# pipe.fuse_lora(adapter_names=["next-scene"], lora_scale=1.)
|
| 46 |
pipe.unload_lora_weights()
|
| 47 |
|
| 48 |
|
| 49 |
+
|
| 50 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
| 51 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 52 |
|
|
|
|
| 53 |
optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
|
| 54 |
|
| 55 |
+
|
| 56 |
MAX_SEED = np.iinfo(np.int32).max
|
| 57 |
|
| 58 |
+
def build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle):
|
|
|
|
| 59 |
prompt_parts = []
|
| 60 |
|
| 61 |
# Rotation
|
| 62 |
if rotate_deg != 0:
|
| 63 |
direction = "left" if rotate_deg > 0 else "right"
|
| 64 |
+
if direction == "left":
|
| 65 |
+
prompt_parts.append(f"将镜头向左旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the left.")
|
| 66 |
+
else:
|
| 67 |
+
prompt_parts.append(f"将镜头向右旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the right.")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# Move forward / close-up
|
| 71 |
+
if move_forward >= 5:
|
| 72 |
+
prompt_parts.append("将镜头转为特写镜头 Turn the camera to a close-up.")
|
| 73 |
+
elif move_forward >= 1:
|
| 74 |
+
prompt_parts.append("将镜头向前移动 Move the camera forward.")
|
| 75 |
+
|
| 76 |
+
# Vertical tilt
|
| 77 |
+
if vertical_tilt <= -1:
|
| 78 |
+
prompt_parts.append("将相机转向鸟瞰视角 Turn the camera to a bird's-eye view.")
|
| 79 |
+
elif vertical_tilt >= 1:
|
| 80 |
+
prompt_parts.append("将相机切换到仰视视角 Turn the camera to a worm's-eye view.")
|
| 81 |
+
|
| 82 |
+
# Lens option
|
| 83 |
if wideangle:
|
| 84 |
+
prompt_parts.append(" 将镜头转为广角镜头 Turn the camera to a wide-angle lens.")
|
|
|
|
|
|
|
| 85 |
|
| 86 |
final_prompt = " ".join(prompt_parts).strip()
|
| 87 |
+
return final_prompt if final_prompt else ""
|
| 88 |
|
| 89 |
|
| 90 |
+
@spaces.GPU
|
|
|
|
| 91 |
def infer_camera_edit(
|
| 92 |
image,
|
| 93 |
prev_output,
|
| 94 |
rotate_deg,
|
|
|
|
| 95 |
move_forward,
|
| 96 |
+
vertical_tilt,
|
| 97 |
wideangle,
|
|
|
|
| 98 |
seed,
|
| 99 |
randomize_seed,
|
| 100 |
true_guidance_scale,
|
|
|
|
| 102 |
height,
|
| 103 |
width,
|
| 104 |
):
|
| 105 |
+
prompt = build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle)
|
| 106 |
print(f"Generated Prompt: {prompt}")
|
| 107 |
|
| 108 |
if randomize_seed:
|
| 109 |
seed = random.randint(0, MAX_SEED)
|
| 110 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 111 |
|
| 112 |
+
# Choose input image (prefer uploaded, else last output)
|
| 113 |
pil_images = []
|
| 114 |
if image is not None:
|
| 115 |
if isinstance(image, Image.Image):
|
|
|
|
| 136 |
return result, seed, prompt
|
| 137 |
|
| 138 |
|
| 139 |
+
# --- UI ---
|
| 140 |
+
css = "#col-container { max-width: 800px; margin: 0 auto; }"
|
| 141 |
+
|
| 142 |
+
is_reset = gr.State(value=False)
|
| 143 |
+
|
| 144 |
+
def reset_all():
|
| 145 |
+
return [0, 0, 0, 0, False, True]
|
| 146 |
+
|
| 147 |
+
def end_reset():
|
| 148 |
+
return False
|
| 149 |
+
|
| 150 |
|
| 151 |
with gr.Blocks(css=css) as demo:
|
| 152 |
with gr.Column(elem_id="col-container"):
|
| 153 |
gr.Markdown("## 🎬 Qwen Image Edit — Camera Angle Control")
|
| 154 |
+
gr.Markdown(
|
| 155 |
+
""
|
| 156 |
+
)
|
| 157 |
|
| 158 |
with gr.Row():
|
| 159 |
with gr.Column():
|
| 160 |
image = gr.Image(label="Input Image", type="pil", sources=["upload"])
|
| 161 |
prev_output = gr.State(value=None)
|
| 162 |
+
is_reset = gr.State(value=False)
|
| 163 |
|
| 164 |
+
with gr.Group():
|
| 165 |
+
rotate_deg = gr.Slider(label="Rotate Left–Right (°)", minimum=-90, maximum=90, step=45, value=0)
|
| 166 |
+
move_forward = gr.Slider(label="Move Forward → Close-Up", minimum=0, maximum=10, step=5, value=0)
|
| 167 |
+
vertical_tilt = gr.Slider(label="Vertical Angle (Bird ↔ Worm)", minimum=-1, maximum=1, step=1, value=0)
|
|
|
|
|
|
|
|
|
|
| 168 |
wideangle = gr.Checkbox(label="Wide-Angle Lens", value=False)
|
| 169 |
+
with gr.Row():
|
| 170 |
+
reset_btn = gr.Button("reset settings")
|
| 171 |
|
| 172 |
with gr.Accordion("Advanced Settings", open=False):
|
| 173 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
|
|
|
| 177 |
height = gr.Slider(label="Height", minimum=256, maximum=2048, step=8, value=1024)
|
| 178 |
width = gr.Slider(label="Width", minimum=256, maximum=2048, step=8, value=1024)
|
| 179 |
|
| 180 |
+
|
| 181 |
+
run_btn = gr.Button("Generate", variant="primary", visible=False)
|
|
|
|
| 182 |
|
| 183 |
with gr.Column():
|
| 184 |
result = gr.Image(label="Output Image")
|
| 185 |
+
prompt_preview = gr.Textbox(label="Processed Prompt", interactive=False)
|
| 186 |
+
#gr.Markdown("_Each change applies a fresh camera instruction to the last output image._")
|
| 187 |
|
|
|
|
| 188 |
inputs = [
|
| 189 |
+
image, prev_output, rotate_deg, move_forward,
|
| 190 |
+
vertical_tilt, wideangle,
|
| 191 |
seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width
|
| 192 |
]
|
| 193 |
outputs = [result, seed, prompt_preview]
|
| 194 |
|
| 195 |
+
# Reset behavior
|
|
|
|
|
|
|
| 196 |
reset_btn.click(
|
| 197 |
fn=reset_all,
|
| 198 |
inputs=None,
|
| 199 |
+
outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset],
|
| 200 |
queue=False
|
| 201 |
+
).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False)
|
| 202 |
|
| 203 |
+
# Manual generation
|
| 204 |
+
run_event = run_btn.click(fn=infer_camera_edit, inputs=inputs, outputs=outputs)
|
| 205 |
+
|
| 206 |
+
# Image upload resets
|
| 207 |
+
image.change(
|
| 208 |
+
fn=reset_all,
|
| 209 |
+
inputs=None,
|
| 210 |
+
outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset],
|
| 211 |
+
queue=False
|
| 212 |
+
).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False)
|
| 213 |
+
|
| 214 |
+
# Live updates
|
| 215 |
+
def maybe_infer(is_reset, *args):
|
| 216 |
+
if is_reset:
|
| 217 |
+
return gr.update(), gr.update(), gr.update()
|
| 218 |
+
else:
|
| 219 |
+
return infer_camera_edit(*args)
|
| 220 |
+
|
| 221 |
+
control_inputs = [
|
| 222 |
+
image, prev_output, rotate_deg, move_forward,
|
| 223 |
+
vertical_tilt, wideangle,
|
| 224 |
+
seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width
|
| 225 |
+
]
|
| 226 |
+
control_inputs_with_flag = [is_reset] + control_inputs
|
| 227 |
|
| 228 |
+
for control in [rotate_deg, move_forward, vertical_tilt, wideangle]:
|
| 229 |
+
control.change(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs, show_progress="minimal")
|
|
|
|
| 230 |
|
| 231 |
+
run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
|
|
|
|
| 232 |
|
| 233 |
+
demo.launch()
|