Spaces:
Running
on
Zero
Running
on
Zero
Update app.py (#10)
Browse files- Update app.py (26e3c5ad3848523f6ccfde585e7db94fe8d8b19b)
app.py
CHANGED
|
@@ -44,13 +44,9 @@ pipe.load_lora_weights(
|
|
| 44 |
adapter_name="angles"
|
| 45 |
)
|
| 46 |
|
| 47 |
-
|
| 48 |
-
# "lovis93/next-scene-qwen-image-lora-2509",
|
| 49 |
-
# weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene"
|
| 50 |
-
# )
|
| 51 |
pipe.set_adapters(["angles"], adapter_weights=[1.])
|
| 52 |
pipe.fuse_lora(adapter_names=["angles"], lora_scale=1.25)
|
| 53 |
-
# pipe.fuse_lora(adapter_names=["next-scene"], lora_scale=1.)
|
| 54 |
pipe.unload_lora_weights()
|
| 55 |
|
| 56 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
@@ -187,7 +183,6 @@ def infer_camera_edit(
|
|
| 187 |
height: Optional[int] = None,
|
| 188 |
width: Optional[int] = None,
|
| 189 |
prev_output: Optional[Image.Image] = None,
|
| 190 |
-
progress: gr.Progress = gr.Progress(track_tqdm=True)
|
| 191 |
) -> Tuple[Image.Image, int, str]:
|
| 192 |
"""
|
| 193 |
Edit the camera angles/view of an image with Qwen Image Edit 2509 and dx8152's Qwen-Edit-2509-Multiple-angles LoRA.
|
|
@@ -231,9 +226,6 @@ def infer_camera_edit(
|
|
| 231 |
prev_output (PIL.Image.Image | None, optional):
|
| 232 |
Previous output image to use as input when no new image is uploaded.
|
| 233 |
Defaults to None.
|
| 234 |
-
progress (gr.Progress, optional):
|
| 235 |
-
Gradio progress tracker, automatically provided by Gradio in the UI.
|
| 236 |
-
Defaults to a progress tracker with tqdm support.
|
| 237 |
|
| 238 |
Returns:
|
| 239 |
Tuple[PIL.Image.Image, int, str]:
|
|
@@ -241,6 +233,8 @@ def infer_camera_edit(
|
|
| 241 |
- The actual seed used for generation.
|
| 242 |
- The constructed camera prompt string.
|
| 243 |
"""
|
|
|
|
|
|
|
| 244 |
prompt = build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle)
|
| 245 |
print(f"Generated Prompt: {prompt}")
|
| 246 |
|
|
@@ -632,7 +626,7 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
|
|
| 632 |
|
| 633 |
run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
|
| 634 |
|
| 635 |
-
|
| 636 |
-
|
| 637 |
|
| 638 |
demo.launch(mcp_server=True, show_api=True)
|
|
|
|
| 44 |
adapter_name="angles"
|
| 45 |
)
|
| 46 |
|
| 47 |
+
|
|
|
|
|
|
|
|
|
|
| 48 |
pipe.set_adapters(["angles"], adapter_weights=[1.])
|
| 49 |
pipe.fuse_lora(adapter_names=["angles"], lora_scale=1.25)
|
|
|
|
| 50 |
pipe.unload_lora_weights()
|
| 51 |
|
| 52 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
|
|
| 183 |
height: Optional[int] = None,
|
| 184 |
width: Optional[int] = None,
|
| 185 |
prev_output: Optional[Image.Image] = None,
|
|
|
|
| 186 |
) -> Tuple[Image.Image, int, str]:
|
| 187 |
"""
|
| 188 |
Edit the camera angles/view of an image with Qwen Image Edit 2509 and dx8152's Qwen-Edit-2509-Multiple-angles LoRA.
|
|
|
|
| 226 |
prev_output (PIL.Image.Image | None, optional):
|
| 227 |
Previous output image to use as input when no new image is uploaded.
|
| 228 |
Defaults to None.
|
|
|
|
|
|
|
|
|
|
| 229 |
|
| 230 |
Returns:
|
| 231 |
Tuple[PIL.Image.Image, int, str]:
|
|
|
|
| 233 |
- The actual seed used for generation.
|
| 234 |
- The constructed camera prompt string.
|
| 235 |
"""
|
| 236 |
+
progress = gr.Progress(track_tqdm=True)
|
| 237 |
+
|
| 238 |
prompt = build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle)
|
| 239 |
print(f"Generated Prompt: {prompt}")
|
| 240 |
|
|
|
|
| 626 |
|
| 627 |
run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
|
| 628 |
|
| 629 |
+
gr.api(infer_camera_edit, api_name="infer_edit_camera_angles")
|
| 630 |
+
gr.api(create_video_between_images, api_name="create_video_between_images")
|
| 631 |
|
| 632 |
demo.launch(mcp_server=True, show_api=True)
|