File size: 13,944 Bytes
71f5363
7d4ee71
 
 
 
71f5363
7d4ee71
6571814
 
 
 
 
 
ec6ec95
fe9c804
 
71f5363
63c5b22
 
 
9c01f36
 
695bf10
7d4ee71
 
 
 
 
a2cff3a
 
 
 
 
 
 
9fb37c1
 
a2cff3a
79640f8
028ba65
 
 
 
 
 
 
a2cff3a
 
 
028ba65
6571814
 
 
a2cff3a
6571814
028ba65
7d4ee71
 
ebda337
9c01f36
ebda337
 
9c01f36
 
 
20e4eed
9c01f36
 
 
028ba65
79640f8
a2cff3a
79640f8
 
 
028ba65
 
 
 
 
 
 
65c19a1
028ba65
 
 
 
 
 
 
 
 
 
 
79640f8
028ba65
79640f8
 
29a13d1
79640f8
 
028ba65
79640f8
 
 
 
028ba65
79640f8
 
 
 
 
 
 
fd2fd4d
05eb5ed
7d4ee71
028ba65
79640f8
 
7d4ee71
 
 
79640f8
028ba65
7d4ee71
79640f8
 
 
 
 
78ef85d
79640f8
 
 
bdb5e40
79640f8
29a13d1
 
79640f8
 
7d4ee71
79640f8
 
7d4ee71
 
 
79640f8
 
 
 
 
8979d0e
9c01f36
e525699
bdb5e40
 
9c01f36
bdb5e40
52a0034
 
 
bdb5e40
52a0034
 
 
 
bdb5e40
9c01f36
bdb5e40
 
 
8979d0e
9c01f36
 
 
bdb5e40
9c01f36
79640f8
028ba65
9cbc39c
b5c1d6f
 
028ba65
 
26b519f
 
 
 
028ba65
884ddb3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d4ee71
29a13d1
7d4ee71
bdb5e40
a2fda4d
bdb5e40
 
a2fda4d
028ba65
79640f8
7d4ee71
 
bdb5e40
8c5a92c
1618f97
79640f8
bdb5e40
 
 
 
 
a2fda4d
bdb5e40
 
79640f8
bdb5e40
 
 
 
 
 
 
cc842fe
79640f8
bdb5e40
 
 
9c01f36
bdb5e40
9c01f36
79640f8
fd2fd4d
028ba65
231da4a
79640f8
 
 
028ba65
79640f8
 
 
26b519f
79640f8
26b519f
b001fe7
9c01f36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121ad29
 
 
 
a189bec
b2cff6a
 
2787953
c09ff4c
 
121ad29
231da4a
 
 
121ad29
 
 
b5c1d6f
121ad29
884ddb3
 
dca85e2
884ddb3
 
 
 
028ba65
 
26b519f
 
 
 
 
 
884ddb3
 
 
028ba65
 
26b519f
 
9c01f36
26b519f
9c01f36
 
 
 
26b519f
028ba65
fd2fd4d
028ba65
fd2fd4d
028ba65
26b519f
7d4ee71
26b519f
9c01f36
26b519f
023d9af
a2340ae
028ba65
695bf10
028ba65
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
import gradio as gr
import numpy as np
import random
import torch
import spaces

from PIL import Image
from diffusers import FlowMatchEulerDiscreteScheduler
from optimization import optimize_pipeline_
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3

import math
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file

from PIL import Image
import os
import gradio as gr
from gradio_client import Client, handle_file
import tempfile


# --- Model Loading ---
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"

pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", 
                                                transformer= QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO", 
                                                                                                         subfolder='transformer',
                                                                                                         torch_dtype=dtype,
                                                                                                         device_map='cuda'),torch_dtype=dtype).to(device)

pipe.load_lora_weights(
        "dx8152/Qwen-Edit-2509-Multiple-angles", 
        weight_name="镜头转换.safetensors", adapter_name="angles"
    )

# pipe.load_lora_weights(
#         "lovis93/next-scene-qwen-image-lora-2509", 
#         weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene"
#     )
pipe.set_adapters(["angles"], adapter_weights=[1.])
pipe.fuse_lora(adapter_names=["angles"], lora_scale=1.25)
# pipe.fuse_lora(adapter_names=["next-scene"], lora_scale=1.)
pipe.unload_lora_weights()



pipe.transformer.__class__ = QwenImageTransformer2DModel
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())

optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")


MAX_SEED = np.iinfo(np.int32).max

def _generate_video_segment(input_image_path: str, output_image_path: str, prompt: str, request: gr.Request) -> str:
    """Generates a single video segment using the external service."""
    x_ip_token = request.headers['x-ip-token']
    video_client = Client("multimodalart/wan-2-2-first-last-frame", headers={"x-ip-token": x_ip_token})
    result = video_client.predict(
        start_image_pil=handle_file(input_image_path),
        end_image_pil=handle_file(output_image_path),
        prompt=prompt, api_name="/generate_video",
    )
    return result[0]["video"]

def build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle):
    prompt_parts = []

    # Rotation
    if rotate_deg != 0:
        direction = "left" if rotate_deg > 0 else "right"
        if direction == "left":
            prompt_parts.append(f"将镜头向左旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the left.")
        else:
            prompt_parts.append(f"将镜头向右旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the right.")


    # Move forward / close-up
    if move_forward > 5:
        prompt_parts.append("将镜头转为特写镜头 Turn the camera to a close-up.")
    elif move_forward >= 1:
        prompt_parts.append("将镜头向前移动 Move the camera forward.")

    # Vertical tilt
    if vertical_tilt <= -1:
        prompt_parts.append("将相机转向鸟瞰视角 Turn the camera to a bird's-eye view.")
    elif vertical_tilt >= 1:
        prompt_parts.append("将相机切换到仰视视角 Turn the camera to a worm's-eye view.")

    # Lens option
    if wideangle:
        prompt_parts.append(" 将镜头转为广角镜头 Turn the camera to a wide-angle lens.")

    final_prompt = " ".join(prompt_parts).strip()
    return final_prompt if final_prompt else "no camera movement"


@spaces.GPU
def infer_camera_edit(
    image,
    rotate_deg,
    move_forward,
    vertical_tilt,
    wideangle,
    seed,
    randomize_seed,
    true_guidance_scale,
    num_inference_steps,
    height,
    width,
    prev_output = None,
    progress=gr.Progress(track_tqdm=True)
):
    prompt = build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle)
    print(f"Generated Prompt: {prompt}")

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator(device=device).manual_seed(seed)

    # Choose input image (prefer uploaded, else last output)
    pil_images = []
    if image is not None:
        if isinstance(image, Image.Image):
            pil_images.append(image.convert("RGB"))
        elif hasattr(image, "name"):
            pil_images.append(Image.open(image.name).convert("RGB"))
    elif prev_output:
        pil_images.append(prev_output.convert("RGB"))

    if len(pil_images) == 0:
        raise gr.Error("먼저 이미지를 업로드해주세요.")

    if prompt == "no camera movement":
        return image, seed, prompt
    result = pipe(
        image=pil_images,
        prompt=prompt,
        height=height if height != 0 else None,
        width=width if width != 0 else None,
        num_inference_steps=num_inference_steps,
        generator=generator,
        true_cfg_scale=true_guidance_scale,
        num_images_per_prompt=1,
    ).images[0]

    return result, seed, prompt

def create_video_between_images(input_image, output_image, prompt: str, request: gr.Request) -> str:
    """Create a video between the input and output images."""
    if input_image is None or output_image is None:
        raise gr.Error("비디오 생성을 위해 입력 및 출력 이미지가 모두 필요합니다.")

    try:

        with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
            input_image.save(tmp.name)
            input_image_path = tmp.name

        output_pil = Image.fromarray(output_image.astype('uint8'))
        with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
            output_pil.save(tmp.name)
            output_image_path = tmp.name

        video_path = _generate_video_segment(
            input_image_path,
            output_image_path,
            prompt if prompt else "카메라 움직임 변환",
            request
        )
        return video_path
    except Exception as e:
        raise gr.Error(f"비디오 생성 실패: {e}")


# --- UI ---
css = '''#col-container { max-width: 800px; margin: 0 auto; }
.dark .progress-text{color: white !important}
#examples{max-width: 800px; margin: 0 auto; }'''

def reset_all():
    return [0, 0, 0, 0, False, True]

def end_reset():
    return False

def update_dimensions_on_upload(image):
    if image is None:
        return 1024, 1024
    
    original_width, original_height = image.size
    
    if original_width > original_height:
        new_width = 1024
        aspect_ratio = original_height / original_width
        new_height = int(new_width * aspect_ratio)
    else:
        new_height = 1024
        aspect_ratio = original_width / original_height
        new_width = int(new_height * aspect_ratio)
        
    # Ensure dimensions are multiples of 8
    new_width = (new_width // 8) * 8
    new_height = (new_height // 8) * 8
    
    return new_width, new_height


with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("## 🎬 Qwen Image Edit — 카메라 앵글 컨트롤")
        gr.Markdown("""
            카메라 컨트롤을 위한 Qwen Image Edit 2509 ✨
            4단계 추론을 위한 [dx8152's Qwen-Edit-2509-Multiple-angles LoRA](https://huggingface.co/dx8152/Qwen-Edit-2509-Multiple-angles)와 [Phr00t/Qwen-Image-Edit-Rapid-AIO](https://huggingface.co/Phr00t/Qwen-Image-Edit-Rapid-AIO/tree/main) 사용 💨
            """
        )

        with gr.Row():
            with gr.Column():
                image = gr.Image(label="입력 이미지", type="pil")
                prev_output = gr.Image(value=None, visible=False)
                is_reset = gr.Checkbox(value=False, visible=False)

                with gr.Tab("카메라 컨트롤"):
                    rotate_deg = gr.Slider(label="좌우 회전 (각도 °)", minimum=-90, maximum=90, step=45, value=0)
                    move_forward = gr.Slider(label="전진 → 클로즈업", minimum=0, maximum=10, step=5, value=0)
                    vertical_tilt = gr.Slider(label="수직 앵글 (조감 ↔ 앙각)", minimum=-1, maximum=1, step=1, value=0)
                    wideangle = gr.Checkbox(label="광각 렌즈", value=False)
                with gr.Row():
                        reset_btn = gr.Button("초기화")
                        run_btn = gr.Button("생성", variant="primary")

                with gr.Accordion("고급 설정", open=False):
                    seed = gr.Slider(label="시드", minimum=0, maximum=MAX_SEED, step=1, value=0)
                    randomize_seed = gr.Checkbox(label="랜덤 시드", value=True)
                    true_guidance_scale = gr.Slider(label="가이던스 스케일", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
                    num_inference_steps = gr.Slider(label="추론 단계", minimum=1, maximum=40, step=1, value=4)
                    height = gr.Slider(label="높이", minimum=256, maximum=2048, step=8, value=1024)
                    width = gr.Slider(label="너비", minimum=256, maximum=2048, step=8, value=1024)

            with gr.Column():
                result = gr.Image(label="출력 이미지", interactive=False)
                prompt_preview = gr.Textbox(label="처리된 프롬프트", interactive=False)
                create_video_button = gr.Button("🎥 이미지 간 비디오 생성", variant="secondary", visible=False)
                with gr.Group(visible=False) as video_group:
                    video_output = gr.Video(label="생성된 비디오", show_download_button=True, autoplay=True)
                    
    inputs = [
        image,rotate_deg, move_forward,
        vertical_tilt, wideangle,
        seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
    ]
    outputs = [result, seed, prompt_preview]

    # Reset behavior
    reset_btn.click(
        fn=reset_all,
        inputs=None,
        outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset],
        queue=False
    ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False)

    # Manual generation with video button visibility control
    def infer_and_show_video_button(*args):
        result_img, result_seed, result_prompt = infer_camera_edit(*args)
        # Show video button if we have both input and output images
        show_button = args[0] is not None and result_img is not None
        return result_img, result_seed, result_prompt, gr.update(visible=show_button)
    
    run_event = run_btn.click(
        fn=infer_and_show_video_button, 
        inputs=inputs, 
        outputs=outputs + [create_video_button]
    )

    # Video creation
    create_video_button.click(
        fn=lambda: gr.update(visible=True), 
        outputs=[video_group],
        api_name=False
    ).then(
        fn=create_video_between_images,
        inputs=[image, result, prompt_preview],
        outputs=[video_output],
        api_name=False
    )

    # Examples
    gr.Examples(
        examples=[
            ["american_gothic.jpg", 0, 0, 0, False, 0, True, 1.0, 4, 1024, 768],
            ["tool_of_the_sea.png", 90, 0, 0, False, 0, True, 1.0, 4, 568, 1024],
            ["monkey.jpg", -90, 0, 0, False, 0, True, 1.0, 4, 704, 1024],
            ["metropolis.jpg", 0, 0, -1, False, 0, True, 1.0, 4, 816, 1024],
            ["disaster_girl.jpg", -45, 0, 1, False, 0, True, 1.0, 4, 768, 1024],
            ["grumpy.png", 90, 0, 1, False, 0, True, 1.0, 4, 576, 1024]
        ],
        inputs=[image,rotate_deg, move_forward,
        vertical_tilt, wideangle,
        seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width],
        outputs=outputs,
        fn=infer_camera_edit,
        cache_examples="lazy",
        elem_id="examples"
    )
    
    # Image upload triggers dimension update and control reset
    image.upload(
        fn=update_dimensions_on_upload,
        inputs=[image],
        outputs=[width, height]
    ).then(
        fn=reset_all,
        inputs=None,
        outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset],
        queue=False
    ).then(
        fn=end_reset, 
        inputs=None, 
        outputs=[is_reset], 
        queue=False
    )


    # Live updates
    def maybe_infer(is_reset, progress=gr.Progress(track_tqdm=True), *args):
        if is_reset:
            return gr.update(), gr.update(), gr.update(), gr.update()
        else:
            result_img, result_seed, result_prompt = infer_camera_edit(*args)
            # Show video button if we have both input and output
            show_button = args[0] is not None and result_img is not None
            return result_img, result_seed, result_prompt, gr.update(visible=show_button)

    control_inputs = [
        image, rotate_deg, move_forward,
        vertical_tilt, wideangle,
        seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
    ]
    control_inputs_with_flag = [is_reset] + control_inputs

    for control in [rotate_deg, move_forward, vertical_tilt]:
        control.release(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
    
    wideangle.input(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
    
    run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])

demo.launch()