zhangjiewu commited on
Commit
dc178ef
·
1 Parent(s): 3369cd1

init commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+ import torch as th
4
+ import torch
5
+ import numpy as np
6
+ import tempfile
7
+ from diffusers import AutoencoderKLWan
8
+ from diffusers.utils import export_to_video, load_image
9
+ from diffusers.schedulers import UniPCMultistepScheduler
10
+ from transformers import CLIPVisionModel
11
+ from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
12
+ from chronoedit_diffusers.transformer_chronoedit import ChronoEditTransformer3DModel
13
+ from PIL import Image
14
+ from huggingface_hub import hf_hub_download
15
+ from prompt_enhancer import enhance_prompt
16
+ from transformers import (
17
+ Qwen2_5_VLForConditionalGeneration,
18
+ AutoProcessor,
19
+ )
20
+
21
+
22
+ th.enable_grad(False)
23
+ th.backends.cuda.preferred_linalg_library(backend="magma")
24
+
25
+
26
+ start = time.time()
27
+
28
+ model_id = "nvidia/ChronoEdit-14B-Diffusers"
29
+ image_encoder = CLIPVisionModel.from_pretrained(
30
+ model_id,
31
+ subfolder="image_encoder",
32
+ torch_dtype=torch.float32
33
+ )
34
+ print("✓ Loaded image encoder")
35
+
36
+ vae = AutoencoderKLWan.from_pretrained(
37
+ model_id,
38
+ subfolder="vae",
39
+ torch_dtype=torch.bfloat16
40
+ )
41
+ print("✓ Loaded VAE")
42
+
43
+ transformer = ChronoEditTransformer3DModel.from_pretrained(
44
+ model_id,
45
+ subfolder="transformer",
46
+ torch_dtype=torch.bfloat16
47
+ )
48
+ print("✓ Loaded transformer")
49
+
50
+ pipe = ChronoEditPipeline.from_pretrained(
51
+ model_id,
52
+ image_encoder=image_encoder,
53
+ transformer=transformer,
54
+ vae=vae,
55
+ torch_dtype=torch.bfloat16
56
+ )
57
+ print("✓ Created pipeline")
58
+
59
+ lora_path = hf_hub_download(repo_id=model_id, filename="lora/chronoedit_distill_lora.safetensors")
60
+ # Load LoRA if specified
61
+ if lora_path:
62
+ print(f"Loading LoRA weights from {lora_path}...")
63
+ pipe.load_lora_weights(lora_path)
64
+
65
+ pipe.fuse_lora(lora_scale=1.0)
66
+ print(f"✓ Fused LoRA with scale 1.0")
67
+ # Setup scheduler
68
+ pipe.scheduler = UniPCMultistepScheduler.from_config(
69
+ pipe.scheduler.config,
70
+ flow_shift=2.0
71
+ )
72
+ print(f"✓ Configured scheduler (flow_shift=2.0)")
73
+
74
+ # Move to device
75
+ pipe.to("cuda:0")
76
+ print(f"✓ Models loaded and moved to cuda:0")
77
+
78
+ end = time.time()
79
+ print(f"Model loaded in {end - start:.2f}s.")
80
+
81
+ start = time.time()
82
+ prompt_enhancer_model = "Qwen/Qwen2.5-VL-7B-Instruct"
83
+ prompt_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
84
+ prompt_enhancer_model,
85
+ torch_dtype=torch.bfloat16,
86
+ attn_implementation="flash_attention_2",
87
+ device_map="cuda:1",
88
+ )
89
+ processor = AutoProcessor.from_pretrained(prompt_enhancer_model)
90
+ end = time.time()
91
+ print(f"Prompt enhancer loaded in {end - start:.2f}s.")
92
+
93
+
94
+ def calculate_dimensions(image, mod_value):
95
+ """
96
+ Calculate output dimensions based on resolution settings.
97
+
98
+ Args:
99
+ image: PIL Image
100
+ mod_value: Modulo value for dimension alignment
101
+
102
+ Returns:
103
+ Tuple of (width, height)
104
+ """
105
+
106
+ # Get max area from preset or override
107
+ target_area = 720 * 1280
108
+
109
+ # Calculate dimensions maintaining aspect ratio
110
+ aspect_ratio = image.height / image.width
111
+ calculated_height = round(np.sqrt(target_area * aspect_ratio)) // mod_value * mod_value
112
+ calculated_width = round(np.sqrt(target_area / aspect_ratio)) // mod_value * mod_value
113
+
114
+ return calculated_width, calculated_height
115
+
116
+
117
+ def run_inference(
118
+ image_path: str,
119
+ prompt: str,
120
+ enable_temporal_reasoning: bool,
121
+ num_inference_steps: int = 8,
122
+ guidance_scale: float = 1.0,
123
+ shift: float = 2.0,
124
+ num_temporal_reasoning_steps: int = 8,
125
+ ):
126
+ pipe.to("cuda:0")
127
+
128
+ # Rewriter (optional)
129
+ final_prompt = prompt
130
+
131
+ # Enhance prompt with CoT reasoning
132
+ cot_prompt = enhance_prompt(
133
+ image_path,
134
+ prompt,
135
+ prompt_model,
136
+ processor,
137
+ )
138
+
139
+ # Print enhanced CoT prompt
140
+ print("\n" + "=" * 80)
141
+ print("Enhanced CoT Prompt:")
142
+ print("=" * 80)
143
+ print(cot_prompt)
144
+ print("=" * 80 + "\n")
145
+ final_prompt = cot_prompt
146
+
147
+ # Inference
148
+ print(f"Loading input image: {image_path}")
149
+ image = load_image(image_path)
150
+ mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
151
+ width, height = calculate_dimensions(
152
+ image,
153
+ mod_value
154
+ )
155
+ print(f"Output dimensions: {width}x{height}")
156
+ image = image.resize((width, height))
157
+ num_frames = 29 if enable_temporal_reasoning else 5
158
+ with th.no_grad():
159
+ start = time.time()
160
+ output = pipe(
161
+ image=image,
162
+ prompt=final_prompt,
163
+ height=height,
164
+ width=width,
165
+ num_frames=num_frames,
166
+ num_inference_steps=num_inference_steps,
167
+ guidance_scale=guidance_scale,
168
+ enable_temporal_reasoning=enable_temporal_reasoning,
169
+ num_temporal_reasoning_steps=num_temporal_reasoning_steps,
170
+ offload_model=True,
171
+ ).frames[0]
172
+
173
+ end = time.time()
174
+
175
+ video_tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
176
+ output_path_video = video_tmp.name
177
+ video_tmp.close()
178
+ image_tmp = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
179
+ output_path_image = image_tmp.name
180
+ image_tmp.close()
181
+ export_to_video(output, output_path_video, fps=10)
182
+ Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8")).save(output_path_image)
183
+
184
+ log_text = (
185
+ f"Final prompt: {final_prompt}\n"
186
+ f"Guidance: {guidance_scale}, Shift: {shift}, Steps: {num_inference_steps}\n"
187
+ f"Inference: {end - start:.2f}s"
188
+ )
189
+ if enable_temporal_reasoning:
190
+ log_text += f"Temporal reasoning: {enable_temporal_reasoning}, Steps: {num_temporal_reasoning_steps}\n"
191
+ return output_path_image, output_path_video #, log_text
192
+
193
+
194
+ def build_ui() -> gr.Blocks:
195
+ with gr.Blocks(title="ChronoEdit", theme=gr.themes.Soft()) as demo:
196
+
197
+ gr.Markdown("""
198
+ # 🚀 ChronoEdit Demo
199
+ [[Project Page]](https://research.nvidia.com/labs/toronto-ai/chronoedit/) |
200
+ [[Code]](https://github.com/nv-tlabs/ChronoEdit) |
201
+ [[Technical Report]](https://arxiv.org/abs/2510.04290)
202
+ """)
203
+
204
+ with gr.Row():
205
+ image = gr.Image(type="filepath", label="Input Image")
206
+ output_image = gr.Image(label="Generated Image")
207
+ with gr.Row():
208
+ with gr.Column(scale=1):
209
+ prompt = gr.Textbox(label="Prompt", lines=4, value="")
210
+ enable_temporal_reasoning = gr.Checkbox(label="Enable temporal reasoning", value=False)
211
+ run_btn = gr.Button("Start Generation", variant="primary")
212
+ with gr.Column(scale=1):
213
+ output_video = gr.Video(label="Temporal Reasoning Visualization", visible=False)
214
+
215
+ # with gr.Row():
216
+ # num_inference_steps = gr.Slider(minimum=4, maximum=75, step=1, value=50, label="Num Inference Steps")
217
+ # guidance_scale = gr.Slider(minimum=1.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale")
218
+ # shift = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=5.0, label="Shift")
219
+ # num_temporal_reasoning_steps = gr.Slider(minimum=0, maximum=50, step=1, value=50, label="Number of temporal reasoning steps")
220
+
221
+ # log_text = gr.Markdown("Logs will appear here.")
222
+
223
+ def _on_run(image_path, prompt, enable_temporal_reasoning):
224
+ image_out_path, video_out_path = run_inference(
225
+ image_path=image_path,
226
+ prompt=prompt,
227
+ enable_temporal_reasoning=enable_temporal_reasoning,
228
+ )
229
+ video_update = gr.update(visible=enable_temporal_reasoning, value=(video_out_path if enable_temporal_reasoning else None))
230
+ return image_out_path, video_update
231
+
232
+ run_btn.click(
233
+ _on_run,
234
+ inputs=[image, prompt, enable_temporal_reasoning],
235
+ outputs=[output_image, output_video] #, log_text],
236
+ )
237
+
238
+ gr.Examples(
239
+ examples=[
240
+ [
241
+ "examples/1.png",
242
+ "The user wants to change the scene so that the girl in the traditional-style painting, wearing her ornate floral robe and headdress, is now playing a guitar. Her graceful appearance remains unchanged\u2014smooth black hair tied neatly, soft facial features with a calm, focused expression\u2014but her pose shifts: both hands are engaged with the guitar. One hand rests on the neck of the instrument, fingers pressing the strings with delicate precision, while the other hand strums near the sound hole. The guitar is positioned naturally across her lap, blending with the elegance of her posture. The traditional painting style is preserved, but the addition of the guitar introduces a modern contrast, giving the scene a harmonious fusion of classical refinement and contemporary music.",
243
+ False,
244
+ ],
245
+ [
246
+ "examples/1.png",
247
+ "The user wants to change the scene so that the girl in the traditional-style painting, wearing her ornate floral robe and headdress, is now playing a guitar. Her graceful appearance remains unchanged\u2014smooth black hair tied neatly, soft facial features with a calm, focused expression\u2014but her pose shifts: both hands are engaged with the guitar. One hand rests on the neck of the instrument, fingers pressing the strings with delicate precision, while the other hand strums near the sound hole. The guitar is positioned naturally across her lap, blending with the elegance of her posture. The traditional painting style is preserved, but the addition of the guitar introduces a modern contrast, giving the scene a harmonious fusion of classical refinement and contemporary music.",
248
+ True,
249
+ ],
250
+ ],
251
+ inputs=[image, prompt, enable_temporal_reasoning], outputs=[output_image, output_video], fn=_on_run, cache_examples=True
252
+ )
253
+
254
+ return demo
255
+
256
+
257
+ # CUDA_VISIBLE_DEVICES=0,1 PYTHONPATH=$(pwd) python scripts/app.py
258
+ if __name__ == "__main__":
259
+ demo = build_ui()
260
+ demo.launch(server_name="0.0.0.0", server_port=7869)
chronoedit_diffusers/pipeline_chronoedit.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import html
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import PIL
20
+ import regex as re
21
+ import torch
22
+ from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel
23
+
24
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
25
+ from diffusers.image_processor import PipelineImageInput
26
+ from diffusers.loaders import WanLoraLoaderMixin
27
+ from diffusers.models import AutoencoderKLWan, WanTransformer3DModel
28
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
29
+ from diffusers.utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring
30
+ from diffusers.utils.torch_utils import randn_tensor
31
+ from diffusers.video_processor import VideoProcessor
32
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
33
+ from diffusers.pipelines.wan.pipeline_output import WanPipelineOutput
34
+ from chronoedit_diffusers.transformer_chronoedit import ChronoEditTransformer3DModel
35
+
36
+
37
+ if is_torch_xla_available():
38
+ import torch_xla.core.xla_model as xm
39
+
40
+ XLA_AVAILABLE = True
41
+ else:
42
+ XLA_AVAILABLE = False
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+ if is_ftfy_available():
47
+ import ftfy
48
+
49
+ EXAMPLE_DOC_STRING = """
50
+ Examples:
51
+ ```python
52
+ >>> import torch
53
+ >>> import numpy as np
54
+ >>> from diffusers import AutoencoderKLWan, WanImageToVideoPipeline
55
+ >>> from diffusers.utils import export_to_video, load_image
56
+ >>> from transformers import CLIPVisionModel
57
+
58
+ >>> # Available models: Wan-AI/Wan2.1-I2V-14B-480P-Diffusers, Wan-AI/Wan2.1-I2V-14B-720P-Diffusers
59
+ >>> model_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
60
+ >>> image_encoder = CLIPVisionModel.from_pretrained(
61
+ ... model_id, subfolder="image_encoder", torch_dtype=torch.float32
62
+ ... )
63
+ >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
64
+ >>> pipe = WanImageToVideoPipeline.from_pretrained(
65
+ ... model_id, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
66
+ ... )
67
+ >>> pipe.to("cuda")
68
+
69
+ >>> image = load_image(
70
+ ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg"
71
+ ... )
72
+ >>> max_area = 480 * 832
73
+ >>> aspect_ratio = image.height / image.width
74
+ >>> mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
75
+ >>> height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
76
+ >>> width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
77
+ >>> image = image.resize((width, height))
78
+ >>> prompt = (
79
+ ... "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in "
80
+ ... "the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot."
81
+ ... )
82
+ >>> negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
83
+
84
+ >>> output = pipe(
85
+ ... image=image,
86
+ ... prompt=prompt,
87
+ ... negative_prompt=negative_prompt,
88
+ ... height=height,
89
+ ... width=width,
90
+ ... num_frames=81,
91
+ ... guidance_scale=5.0,
92
+ ... ).frames[0]
93
+ >>> export_to_video(output, "output.mp4", fps=16)
94
+ ```
95
+ """
96
+
97
+
98
+ def basic_clean(text):
99
+ text = ftfy.fix_text(text)
100
+ text = html.unescape(html.unescape(text))
101
+ return text.strip()
102
+
103
+
104
+ def whitespace_clean(text):
105
+ text = re.sub(r"\s+", " ", text)
106
+ text = text.strip()
107
+ return text
108
+
109
+
110
+ def prompt_clean(text):
111
+ text = whitespace_clean(basic_clean(text))
112
+ return text
113
+
114
+
115
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
116
+ def retrieve_latents(
117
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
118
+ ):
119
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
120
+ return encoder_output.latent_dist.sample(generator)
121
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
122
+ return encoder_output.latent_dist.mode()
123
+ elif hasattr(encoder_output, "latents"):
124
+ return encoder_output.latents
125
+ else:
126
+ raise AttributeError("Could not access latents of provided encoder_output")
127
+
128
+
129
+ class ChronoEditPipeline(DiffusionPipeline, WanLoraLoaderMixin):
130
+ r"""
131
+ Pipeline for image-to-video generation using Wan.
132
+
133
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
134
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
135
+
136
+ Args:
137
+ tokenizer ([`T5Tokenizer`]):
138
+ Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer),
139
+ specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant.
140
+ text_encoder ([`T5EncoderModel`]):
141
+ [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
142
+ the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant.
143
+ image_encoder ([`CLIPVisionModel`]):
144
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModel), specifically
145
+ the
146
+ [clip-vit-huge-patch14](https://github.com/mlfoundations/open_clip/blob/main/docs/PRETRAINED.md#vit-h14-xlm-roberta-large)
147
+ variant.
148
+ transformer ([`WanTransformer3DModel`]):
149
+ Conditional Transformer to denoise the input latents.
150
+ scheduler ([`UniPCMultistepScheduler`]):
151
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
152
+ vae ([`AutoencoderKLWan`]):
153
+ Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
154
+ """
155
+
156
+ model_cpu_offload_seq = "text_encoder->image_encoder->transformer->vae"
157
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
158
+
159
+ def __init__(
160
+ self,
161
+ tokenizer: AutoTokenizer,
162
+ text_encoder: UMT5EncoderModel,
163
+ image_encoder: CLIPVisionModel,
164
+ image_processor: CLIPImageProcessor,
165
+ transformer: ChronoEditTransformer3DModel,
166
+ vae: AutoencoderKLWan,
167
+ scheduler: FlowMatchEulerDiscreteScheduler,
168
+ ):
169
+ super().__init__()
170
+
171
+ self.register_modules(
172
+ vae=vae,
173
+ text_encoder=text_encoder,
174
+ tokenizer=tokenizer,
175
+ image_encoder=image_encoder,
176
+ transformer=transformer,
177
+ scheduler=scheduler,
178
+ image_processor=image_processor,
179
+ )
180
+
181
+ self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4
182
+ self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8
183
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
184
+ self.image_processor = image_processor
185
+
186
+ def _get_t5_prompt_embeds(
187
+ self,
188
+ prompt: Union[str, List[str]] = None,
189
+ num_videos_per_prompt: int = 1,
190
+ max_sequence_length: int = 512,
191
+ device: Optional[torch.device] = None,
192
+ dtype: Optional[torch.dtype] = None,
193
+ ):
194
+ device = device or self._execution_device
195
+ dtype = dtype or self.text_encoder.dtype
196
+
197
+ prompt = [prompt] if isinstance(prompt, str) else prompt
198
+ prompt = [prompt_clean(u) for u in prompt]
199
+ batch_size = len(prompt)
200
+
201
+ text_inputs = self.tokenizer(
202
+ prompt,
203
+ padding="max_length",
204
+ max_length=max_sequence_length,
205
+ truncation=True,
206
+ add_special_tokens=True,
207
+ return_attention_mask=True,
208
+ return_tensors="pt",
209
+ )
210
+ text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask
211
+ seq_lens = mask.gt(0).sum(dim=1).long()
212
+
213
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state
214
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
215
+ prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)]
216
+ prompt_embeds = torch.stack(
217
+ [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0
218
+ )
219
+
220
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
221
+ _, seq_len, _ = prompt_embeds.shape
222
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
223
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
224
+
225
+ return prompt_embeds
226
+
227
+ def encode_image(
228
+ self,
229
+ image: PipelineImageInput,
230
+ device: Optional[torch.device] = None,
231
+ ):
232
+ device = device or self._execution_device
233
+ image = self.image_processor(images=image, return_tensors="pt").to(device)
234
+ image_embeds = self.image_encoder(**image, output_hidden_states=True)
235
+ return image_embeds.hidden_states[-2]
236
+
237
+ # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt
238
+ def encode_prompt(
239
+ self,
240
+ prompt: Union[str, List[str]],
241
+ negative_prompt: Optional[Union[str, List[str]]] = None,
242
+ do_classifier_free_guidance: bool = True,
243
+ num_videos_per_prompt: int = 1,
244
+ prompt_embeds: Optional[torch.Tensor] = None,
245
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
246
+ max_sequence_length: int = 226,
247
+ device: Optional[torch.device] = None,
248
+ dtype: Optional[torch.dtype] = None,
249
+ ):
250
+ r"""
251
+ Encodes the prompt into text encoder hidden states.
252
+
253
+ Args:
254
+ prompt (`str` or `List[str]`, *optional*):
255
+ prompt to be encoded
256
+ negative_prompt (`str` or `List[str]`, *optional*):
257
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
258
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
259
+ less than `1`).
260
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
261
+ Whether to use classifier free guidance or not.
262
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
263
+ Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
264
+ prompt_embeds (`torch.Tensor`, *optional*):
265
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
266
+ provided, text embeddings will be generated from `prompt` input argument.
267
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
268
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
269
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
270
+ argument.
271
+ device: (`torch.device`, *optional*):
272
+ torch device
273
+ dtype: (`torch.dtype`, *optional*):
274
+ torch dtype
275
+ """
276
+ device = device or self._execution_device
277
+
278
+ prompt = [prompt] if isinstance(prompt, str) else prompt
279
+ if prompt is not None:
280
+ batch_size = len(prompt)
281
+ else:
282
+ batch_size = prompt_embeds.shape[0]
283
+
284
+ if prompt_embeds is None:
285
+ prompt_embeds = self._get_t5_prompt_embeds(
286
+ prompt=prompt,
287
+ num_videos_per_prompt=num_videos_per_prompt,
288
+ max_sequence_length=max_sequence_length,
289
+ device=device,
290
+ dtype=dtype,
291
+ )
292
+
293
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
294
+ negative_prompt = negative_prompt or ""
295
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
296
+
297
+ if prompt is not None and type(prompt) is not type(negative_prompt):
298
+ raise TypeError(
299
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
300
+ f" {type(prompt)}."
301
+ )
302
+ elif batch_size != len(negative_prompt):
303
+ raise ValueError(
304
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
305
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
306
+ " the batch size of `prompt`."
307
+ )
308
+
309
+ negative_prompt_embeds = self._get_t5_prompt_embeds(
310
+ prompt=negative_prompt,
311
+ num_videos_per_prompt=num_videos_per_prompt,
312
+ max_sequence_length=max_sequence_length,
313
+ device=device,
314
+ dtype=dtype,
315
+ )
316
+
317
+ return prompt_embeds, negative_prompt_embeds
318
+
319
+ def check_inputs(
320
+ self,
321
+ prompt,
322
+ negative_prompt,
323
+ image,
324
+ height,
325
+ width,
326
+ prompt_embeds=None,
327
+ negative_prompt_embeds=None,
328
+ image_embeds=None,
329
+ callback_on_step_end_tensor_inputs=None,
330
+ ):
331
+ if image is not None and image_embeds is not None:
332
+ raise ValueError(
333
+ f"Cannot forward both `image`: {image} and `image_embeds`: {image_embeds}. Please make sure to"
334
+ " only forward one of the two."
335
+ )
336
+ if image is None and image_embeds is None:
337
+ raise ValueError(
338
+ "Provide either `image` or `prompt_embeds`. Cannot leave both `image` and `image_embeds` undefined."
339
+ )
340
+ if image is not None and not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image):
341
+ raise ValueError(f"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is {type(image)}")
342
+ if height % 16 != 0 or width % 16 != 0:
343
+ raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.")
344
+
345
+ if callback_on_step_end_tensor_inputs is not None and not all(
346
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
347
+ ):
348
+ raise ValueError(
349
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
350
+ )
351
+
352
+ if prompt is not None and prompt_embeds is not None:
353
+ raise ValueError(
354
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
355
+ " only forward one of the two."
356
+ )
357
+ elif negative_prompt is not None and negative_prompt_embeds is not None:
358
+ raise ValueError(
359
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to"
360
+ " only forward one of the two."
361
+ )
362
+ elif prompt is None and prompt_embeds is None:
363
+ raise ValueError(
364
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
365
+ )
366
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
367
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
368
+ elif negative_prompt is not None and (
369
+ not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list)
370
+ ):
371
+ raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
372
+
373
+ def prepare_latents(
374
+ self,
375
+ image: PipelineImageInput,
376
+ batch_size: int,
377
+ num_channels_latents: int = 16,
378
+ height: int = 480,
379
+ width: int = 832,
380
+ num_frames: int = 81,
381
+ dtype: Optional[torch.dtype] = None,
382
+ device: Optional[torch.device] = None,
383
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
384
+ latents: Optional[torch.Tensor] = None,
385
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
386
+ num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1
387
+ latent_height = height // self.vae_scale_factor_spatial
388
+ latent_width = width // self.vae_scale_factor_spatial
389
+
390
+ shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width)
391
+ if isinstance(generator, list) and len(generator) != batch_size:
392
+ raise ValueError(
393
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
394
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
395
+ )
396
+
397
+ if latents is None:
398
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
399
+ else:
400
+ latents = latents.to(device=device, dtype=dtype)
401
+
402
+ image = image.unsqueeze(2)
403
+ video_condition = torch.cat(
404
+ [image, image.new_zeros(image.shape[0], image.shape[1], num_frames - 1, height, width)], dim=2
405
+ )
406
+ video_condition = video_condition.to(device=device, dtype=dtype)
407
+
408
+ latents_mean = (
409
+ torch.tensor(self.vae.config.latents_mean)
410
+ .view(1, self.vae.config.z_dim, 1, 1, 1)
411
+ .to(latents.device, latents.dtype)
412
+ )
413
+ latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
414
+ latents.device, latents.dtype
415
+ )
416
+
417
+ if isinstance(generator, list):
418
+ latent_condition = [
419
+ retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax") for _ in generator
420
+ ]
421
+ latent_condition = torch.cat(latent_condition)
422
+ else:
423
+ latent_condition = retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax")
424
+ latent_condition = latent_condition.repeat(batch_size, 1, 1, 1, 1)
425
+
426
+ latent_condition = (latent_condition - latents_mean) * latents_std
427
+
428
+ mask_lat_size = torch.ones(batch_size, 1, num_frames, latent_height, latent_width)
429
+ mask_lat_size[:, :, list(range(1, num_frames))] = 0
430
+ first_frame_mask = mask_lat_size[:, :, 0:1]
431
+ first_frame_mask = torch.repeat_interleave(first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal)
432
+ mask_lat_size = torch.concat([first_frame_mask, mask_lat_size[:, :, 1:, :]], dim=2)
433
+ mask_lat_size = mask_lat_size.view(batch_size, -1, self.vae_scale_factor_temporal, latent_height, latent_width)
434
+ mask_lat_size = mask_lat_size.transpose(1, 2)
435
+ mask_lat_size = mask_lat_size.to(latent_condition.device)
436
+
437
+ return latents, torch.concat([mask_lat_size, latent_condition], dim=1)
438
+
439
+ @property
440
+ def guidance_scale(self):
441
+ return self._guidance_scale
442
+
443
+ @property
444
+ def do_classifier_free_guidance(self):
445
+ return self._guidance_scale > 1
446
+
447
+ @property
448
+ def num_timesteps(self):
449
+ return self._num_timesteps
450
+
451
+ @property
452
+ def current_timestep(self):
453
+ return self._current_timestep
454
+
455
+ @property
456
+ def interrupt(self):
457
+ return self._interrupt
458
+
459
+ @property
460
+ def attention_kwargs(self):
461
+ return self._attention_kwargs
462
+
463
+ @torch.no_grad()
464
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
465
+ def __call__(
466
+ self,
467
+ image: PipelineImageInput,
468
+ prompt: Union[str, List[str]] = None,
469
+ negative_prompt: Union[str, List[str]] = None,
470
+ height: int = 480,
471
+ width: int = 832,
472
+ num_frames: int = 81,
473
+ num_inference_steps: int = 50,
474
+ guidance_scale: float = 5.0,
475
+ num_videos_per_prompt: Optional[int] = 1,
476
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
477
+ latents: Optional[torch.Tensor] = None,
478
+ prompt_embeds: Optional[torch.Tensor] = None,
479
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
480
+ image_embeds: Optional[torch.Tensor] = None,
481
+ output_type: Optional[str] = "np",
482
+ return_dict: bool = True,
483
+ attention_kwargs: Optional[Dict[str, Any]] = None,
484
+ callback_on_step_end: Optional[
485
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
486
+ ] = None,
487
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
488
+ max_sequence_length: int = 512,
489
+ enable_temporal_reasoning: bool = False,
490
+ num_temporal_reasoning_steps: int = 0,
491
+ offload_model: bool=False
492
+ ):
493
+ r"""
494
+ The call function to the pipeline for generation.
495
+
496
+ Args:
497
+ image (`PipelineImageInput`):
498
+ The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`.
499
+ prompt (`str` or `List[str]`, *optional*):
500
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
501
+ instead.
502
+ negative_prompt (`str` or `List[str]`, *optional*):
503
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
504
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
505
+ less than `1`).
506
+ height (`int`, defaults to `480`):
507
+ The height of the generated video.
508
+ width (`int`, defaults to `832`):
509
+ The width of the generated video.
510
+ num_frames (`int`, defaults to `81`):
511
+ The number of frames in the generated video.
512
+ num_inference_steps (`int`, defaults to `50`):
513
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
514
+ expense of slower inference.
515
+ guidance_scale (`float`, defaults to `5.0`):
516
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
517
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
518
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
519
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
520
+ usually at the expense of lower image quality.
521
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
522
+ The number of images to generate per prompt.
523
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
524
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
525
+ generation deterministic.
526
+ latents (`torch.Tensor`, *optional*):
527
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
528
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
529
+ tensor is generated by sampling using the supplied random `generator`.
530
+ prompt_embeds (`torch.Tensor`, *optional*):
531
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
532
+ provided, text embeddings are generated from the `prompt` input argument.
533
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
534
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
535
+ provided, text embeddings are generated from the `negative_prompt` input argument.
536
+ image_embeds (`torch.Tensor`, *optional*):
537
+ Pre-generated image embeddings. Can be used to easily tweak image inputs (weighting). If not provided,
538
+ image embeddings are generated from the `image` input argument.
539
+ output_type (`str`, *optional*, defaults to `"pil"`):
540
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
541
+ return_dict (`bool`, *optional*, defaults to `True`):
542
+ Whether or not to return a [`WanPipelineOutput`] instead of a plain tuple.
543
+ attention_kwargs (`dict`, *optional*):
544
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
545
+ `self.processor` in
546
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
547
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
548
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
549
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
550
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
551
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
552
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
553
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
554
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
555
+ `._callback_tensor_inputs` attribute of your pipeline class.
556
+ max_sequence_length (`int`, *optional*, defaults to `512`):
557
+ The maximum sequence length of the prompt.
558
+ shift (`float`, *optional*, defaults to `5.0`):
559
+ The shift of the flow.
560
+ autocast_dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`):
561
+ The dtype to use for the torch.amp.autocast.
562
+ Examples:
563
+
564
+ Returns:
565
+ [`~WanPipelineOutput`] or `tuple`:
566
+ If `return_dict` is `True`, [`WanPipelineOutput`] is returned, otherwise a `tuple` is returned where
567
+ the first element is a list with the generated images and the second element is a list of `bool`s
568
+ indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content.
569
+ """
570
+
571
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
572
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
573
+
574
+ # 1. Check inputs. Raise error if not correct
575
+ self.check_inputs(
576
+ prompt,
577
+ negative_prompt,
578
+ image,
579
+ height,
580
+ width,
581
+ prompt_embeds,
582
+ negative_prompt_embeds,
583
+ image_embeds,
584
+ callback_on_step_end_tensor_inputs,
585
+ )
586
+
587
+ if num_frames % self.vae_scale_factor_temporal != 1:
588
+ logger.warning(
589
+ f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number."
590
+ )
591
+ num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1
592
+ num_frames = max(num_frames, 1)
593
+
594
+ self._guidance_scale = guidance_scale
595
+ self._attention_kwargs = attention_kwargs
596
+ self._current_timestep = None
597
+ self._interrupt = False
598
+
599
+ device = self._execution_device
600
+
601
+ # 2. Define call parameters
602
+ if prompt is not None and isinstance(prompt, str):
603
+ batch_size = 1
604
+ elif prompt is not None and isinstance(prompt, list):
605
+ batch_size = len(prompt)
606
+ else:
607
+ batch_size = prompt_embeds.shape[0]
608
+
609
+ # 3. Encode input prompt
610
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
611
+ prompt=prompt,
612
+ negative_prompt=negative_prompt,
613
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
614
+ num_videos_per_prompt=num_videos_per_prompt,
615
+ prompt_embeds=prompt_embeds,
616
+ negative_prompt_embeds=negative_prompt_embeds,
617
+ max_sequence_length=max_sequence_length,
618
+ device=device,
619
+ )
620
+ if offload_model:
621
+ self.text_encoder.cpu()
622
+ # Encode image embedding
623
+ transformer_dtype = self.transformer.dtype
624
+ prompt_embeds = prompt_embeds.to(transformer_dtype)
625
+ if negative_prompt_embeds is not None:
626
+ negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype)
627
+
628
+ if image_embeds is None:
629
+ image_embeds = self.encode_image(image, device)
630
+ image_embeds = image_embeds.repeat(batch_size, 1, 1)
631
+ image_embeds = image_embeds.to(transformer_dtype)
632
+
633
+ if offload_model:
634
+ self.image_encoder.cpu()
635
+
636
+
637
+ # 4. Prepare timesteps
638
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
639
+ timesteps = self.scheduler.timesteps
640
+
641
+ # 5. Prepare latent variables
642
+ num_channels_latents = self.vae.config.z_dim
643
+ image = self.video_processor.preprocess(image, height=height, width=width).to(device, dtype=torch.bfloat16)
644
+ latents, condition = self.prepare_latents(
645
+ image,
646
+ batch_size * num_videos_per_prompt,
647
+ num_channels_latents,
648
+ height,
649
+ width,
650
+ num_frames,
651
+ torch.bfloat16,
652
+ device,
653
+ generator,
654
+ latents,
655
+ )
656
+
657
+ # 6. Denoising loop
658
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
659
+ self._num_timesteps = len(timesteps)
660
+
661
+ if offload_model:
662
+ torch.cuda.empty_cache()
663
+
664
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
665
+ for i, t in enumerate(timesteps):
666
+
667
+ if self.interrupt:
668
+ continue
669
+
670
+ if enable_temporal_reasoning and i == num_temporal_reasoning_steps:
671
+ latents = latents[:, :, [0, -1]]
672
+ condition = condition[:, :, [0, -1]]
673
+
674
+ for j in range(len(self.scheduler.model_outputs)):
675
+ if self.scheduler.model_outputs[j] is not None:
676
+ if latents.shape[-3] != self.scheduler.model_outputs[j].shape[-3]:
677
+ self.scheduler.model_outputs[j] = self.scheduler.model_outputs[j][:,:,[0, -1]]
678
+ if self.scheduler.last_sample is not None:
679
+ self.scheduler.last_sample = self.scheduler.last_sample[:, :, [0, -1]]
680
+
681
+ self._current_timestep = t
682
+ latent_model_input = torch.cat([latents, condition], dim=1).to(transformer_dtype)
683
+ timestep = t.expand(latents.shape[0])
684
+
685
+ noise_pred = self.transformer(
686
+ hidden_states=latent_model_input,
687
+ timestep=timestep,
688
+ encoder_hidden_states=prompt_embeds,
689
+ encoder_hidden_states_image=image_embeds,
690
+ attention_kwargs=attention_kwargs,
691
+ return_dict=False,
692
+ )[0]
693
+
694
+ if offload_model:
695
+ torch.cuda.empty_cache()
696
+
697
+ if self.do_classifier_free_guidance:
698
+ noise_uncond = self.transformer(
699
+ hidden_states=latent_model_input,
700
+ timestep=timestep,
701
+ encoder_hidden_states=negative_prompt_embeds,
702
+ encoder_hidden_states_image=image_embeds,
703
+ attention_kwargs=attention_kwargs,
704
+ return_dict=False,
705
+ )[0]
706
+ noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond)
707
+
708
+ # compute the previous noisy sample x_t -> x_t-1
709
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
710
+
711
+ if callback_on_step_end is not None:
712
+ callback_kwargs = {}
713
+ for k in callback_on_step_end_tensor_inputs:
714
+ callback_kwargs[k] = locals()[k]
715
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
716
+
717
+ latents = callback_outputs.pop("latents", latents)
718
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
719
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
720
+
721
+ # call the callback, if provided
722
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
723
+ progress_bar.update()
724
+
725
+ if XLA_AVAILABLE:
726
+ xm.mark_step()
727
+
728
+ if offload_model:
729
+ self.transformer.cpu()
730
+ torch.cuda.empty_cache()
731
+
732
+ self._current_timestep = None
733
+
734
+ if not output_type == "latent":
735
+ latents = latents.to(self.vae.dtype)
736
+ latents_mean = (
737
+ torch.tensor(self.vae.config.latents_mean)
738
+ .view(1, self.vae.config.z_dim, 1, 1, 1)
739
+ .to(latents.device, latents.dtype)
740
+ )
741
+ latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
742
+ latents.device, latents.dtype
743
+ )
744
+ latents = latents / latents_std + latents_mean
745
+
746
+ if enable_temporal_reasoning and num_temporal_reasoning_steps > 0:
747
+ video_edit = self.vae.decode(latents[:, :, [0, -1]], return_dict=False)[0]
748
+ video_reason = self.vae.decode(latents[:, :, :-1], return_dict=False)[0]
749
+ video = torch.cat([video_reason, video_edit[:, :, 1:]], dim=2)
750
+ else:
751
+ video = self.vae.decode(latents, return_dict=False)[0]
752
+
753
+ # video = self.vae.decode(latents, return_dict=False)[0]
754
+ video = self.video_processor.postprocess_video(video, output_type=output_type)
755
+ else:
756
+ video = latents
757
+
758
+ # Offload all models
759
+ self.maybe_free_model_hooks()
760
+
761
+ if not return_dict:
762
+ return (video,)
763
+
764
+ return WanPipelineOutput(frames=video)
chronoedit_diffusers/transformer_chronoedit.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import math
17
+ from typing import Any, Dict, Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
24
+ from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin
25
+ from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
26
+ from diffusers.models.attention import FeedForward
27
+ from diffusers.models.attention_processor import Attention
28
+ from diffusers.models.cache_utils import CacheMixin
29
+ from diffusers.models.embeddings import PixArtAlphaTextProjection, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed
30
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
31
+ from diffusers.models.modeling_utils import ModelMixin
32
+ from diffusers.models.normalization import FP32LayerNorm
33
+
34
+
35
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
+
37
+
38
+ class ChronoEditAttnProcessor2_0:
39
+ def __init__(self):
40
+ if not hasattr(F, "scaled_dot_product_attention"):
41
+ raise ImportError("ChronoEditAttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.")
42
+
43
+ def __call__(
44
+ self,
45
+ attn: Attention,
46
+ hidden_states: torch.Tensor,
47
+ encoder_hidden_states: Optional[torch.Tensor] = None,
48
+ attention_mask: Optional[torch.Tensor] = None,
49
+ rotary_emb: Optional[torch.Tensor] = None,
50
+ ) -> torch.Tensor:
51
+ encoder_hidden_states_img = None
52
+ if attn.add_k_proj is not None:
53
+ encoder_hidden_states_img = encoder_hidden_states[:, :257]
54
+ encoder_hidden_states = encoder_hidden_states[:, 257:]
55
+ if encoder_hidden_states is None:
56
+ encoder_hidden_states = hidden_states
57
+
58
+ query = attn.to_q(hidden_states)
59
+ key = attn.to_k(encoder_hidden_states)
60
+ value = attn.to_v(encoder_hidden_states)
61
+
62
+ if attn.norm_q is not None:
63
+ query = attn.norm_q(query)
64
+ if attn.norm_k is not None:
65
+ key = attn.norm_k(key)
66
+
67
+ query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2)
68
+ key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2)
69
+ value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2)
70
+
71
+ if rotary_emb is not None:
72
+
73
+ def apply_rotary_emb(hidden_states: torch.Tensor, freqs: torch.Tensor):
74
+ x_rotated = torch.view_as_complex(hidden_states.to(torch.float64).unflatten(3, (-1, 2)))
75
+ x_out = torch.view_as_real(x_rotated * freqs).flatten(3, 4)
76
+ return x_out.type_as(hidden_states)
77
+
78
+ query = apply_rotary_emb(query, rotary_emb)
79
+ key = apply_rotary_emb(key, rotary_emb)
80
+
81
+ # I2V task
82
+ hidden_states_img = None
83
+ if encoder_hidden_states_img is not None:
84
+ key_img = attn.add_k_proj(encoder_hidden_states_img)
85
+ key_img = attn.norm_added_k(key_img)
86
+ value_img = attn.add_v_proj(encoder_hidden_states_img)
87
+
88
+ key_img = key_img.unflatten(2, (attn.heads, -1)).transpose(1, 2)
89
+ value_img = value_img.unflatten(2, (attn.heads, -1)).transpose(1, 2)
90
+
91
+ hidden_states_img = F.scaled_dot_product_attention(
92
+ query, key_img, value_img, attn_mask=None, dropout_p=0.0, is_causal=False
93
+ )
94
+ hidden_states_img = hidden_states_img.transpose(1, 2).flatten(2, 3)
95
+ hidden_states_img = hidden_states_img.type_as(query)
96
+
97
+ hidden_states = F.scaled_dot_product_attention(
98
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
99
+ )
100
+ hidden_states = hidden_states.transpose(1, 2).flatten(2, 3)
101
+ hidden_states = hidden_states.type_as(query)
102
+
103
+ if hidden_states_img is not None:
104
+ hidden_states = hidden_states + hidden_states_img
105
+
106
+ hidden_states = attn.to_out[0](hidden_states)
107
+ hidden_states = attn.to_out[1](hidden_states)
108
+ return hidden_states
109
+
110
+
111
+ class ChronoEditImageEmbedding(torch.nn.Module):
112
+ def __init__(self, in_features: int, out_features: int):
113
+ super().__init__()
114
+
115
+ self.norm1 = FP32LayerNorm(in_features)
116
+ self.ff = FeedForward(in_features, out_features, mult=1, activation_fn="gelu")
117
+ self.norm2 = FP32LayerNorm(out_features)
118
+
119
+ def forward(self, encoder_hidden_states_image: torch.Tensor) -> torch.Tensor:
120
+ hidden_states = self.norm1(encoder_hidden_states_image)
121
+ hidden_states = self.ff(hidden_states)
122
+ hidden_states = self.norm2(hidden_states)
123
+ return hidden_states
124
+
125
+
126
+ class ChronoEditTimeTextImageEmbedding(nn.Module):
127
+ def __init__(
128
+ self,
129
+ dim: int,
130
+ time_freq_dim: int,
131
+ time_proj_dim: int,
132
+ text_embed_dim: int,
133
+ image_embed_dim: Optional[int] = None,
134
+ ):
135
+ super().__init__()
136
+
137
+ self.timesteps_proj = Timesteps(num_channels=time_freq_dim, flip_sin_to_cos=True, downscale_freq_shift=0)
138
+ self.time_embedder = TimestepEmbedding(in_channels=time_freq_dim, time_embed_dim=dim)
139
+ self.act_fn = nn.SiLU()
140
+ self.time_proj = nn.Linear(dim, time_proj_dim)
141
+ self.text_embedder = PixArtAlphaTextProjection(text_embed_dim, dim, act_fn="gelu_tanh")
142
+
143
+ self.image_embedder = None
144
+ if image_embed_dim is not None:
145
+ self.image_embedder = ChronoEditImageEmbedding(image_embed_dim, dim)
146
+
147
+ def forward(
148
+ self,
149
+ timestep: torch.Tensor,
150
+ encoder_hidden_states: torch.Tensor,
151
+ encoder_hidden_states_image: Optional[torch.Tensor] = None,
152
+ ):
153
+ timestep = self.timesteps_proj(timestep)
154
+
155
+ time_embedder_dtype = next(iter(self.time_embedder.parameters())).dtype
156
+ if timestep.dtype != time_embedder_dtype and time_embedder_dtype != torch.int8:
157
+ timestep = timestep.to(time_embedder_dtype)
158
+ temb = self.time_embedder(timestep).type_as(encoder_hidden_states)
159
+ timestep_proj = self.time_proj(self.act_fn(temb))
160
+
161
+ encoder_hidden_states = self.text_embedder(encoder_hidden_states)
162
+ if encoder_hidden_states_image is not None:
163
+ encoder_hidden_states_image = self.image_embedder(encoder_hidden_states_image)
164
+
165
+ return temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image
166
+
167
+
168
+ class ChronoEditRotaryPosEmbed(nn.Module):
169
+ def __init__(
170
+ self, attention_head_dim: int, patch_size: Tuple[int, int, int], max_seq_len: int, theta: float = 10000.0, temporal_skip_len: int = 8
171
+ ):
172
+ super().__init__()
173
+
174
+ self.attention_head_dim = attention_head_dim
175
+ self.patch_size = patch_size
176
+ self.max_seq_len = max_seq_len
177
+ self.temporal_skip_len = temporal_skip_len
178
+
179
+ h_dim = w_dim = 2 * (attention_head_dim // 6)
180
+ t_dim = attention_head_dim - h_dim - w_dim
181
+
182
+ freqs = []
183
+ for dim in [t_dim, h_dim, w_dim]:
184
+ freq = get_1d_rotary_pos_embed(
185
+ dim, max_seq_len, theta, use_real=False, repeat_interleave_real=False, freqs_dtype=torch.float64
186
+ )
187
+ freqs.append(freq)
188
+ self.freqs = torch.cat(freqs, dim=1)
189
+
190
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
191
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
192
+ p_t, p_h, p_w = self.patch_size
193
+ ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w
194
+
195
+ self.freqs = self.freqs.to(hidden_states.device)
196
+ freqs = self.freqs.split_with_sizes(
197
+ [
198
+ self.attention_head_dim // 2 - 2 * (self.attention_head_dim // 6),
199
+ self.attention_head_dim // 6,
200
+ self.attention_head_dim // 6,
201
+ ],
202
+ dim=1,
203
+ )
204
+
205
+ assert num_frames == 2 or num_frames == self.temporal_skip_len, f"num_frames must be 2 or {self.temporal_skip_len}, but got {num_frames}"
206
+ if num_frames == 2:
207
+ freqs_f = freqs[0][:self.temporal_skip_len][[0, -1]].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1)
208
+ else:
209
+ freqs_f = freqs[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1)
210
+ freqs_h = freqs[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1)
211
+ freqs_w = freqs[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1)
212
+ freqs = torch.cat([freqs_f, freqs_h, freqs_w], dim=-1).reshape(1, 1, ppf * pph * ppw, -1)
213
+ return freqs
214
+
215
+
216
+ class ChronoEditTransformerBlock(nn.Module):
217
+ def __init__(
218
+ self,
219
+ dim: int,
220
+ ffn_dim: int,
221
+ num_heads: int,
222
+ qk_norm: str = "rms_norm_across_heads",
223
+ cross_attn_norm: bool = False,
224
+ eps: float = 1e-6,
225
+ added_kv_proj_dim: Optional[int] = None,
226
+ ):
227
+ super().__init__()
228
+
229
+ # 1. Self-attention
230
+ self.norm1 = FP32LayerNorm(dim, eps, elementwise_affine=False)
231
+ self.attn1 = Attention(
232
+ query_dim=dim,
233
+ heads=num_heads,
234
+ kv_heads=num_heads,
235
+ dim_head=dim // num_heads,
236
+ qk_norm=qk_norm,
237
+ eps=eps,
238
+ bias=True,
239
+ cross_attention_dim=None,
240
+ out_bias=True,
241
+ processor=ChronoEditAttnProcessor2_0(),
242
+ )
243
+
244
+ # 2. Cross-attention
245
+ self.attn2 = Attention(
246
+ query_dim=dim,
247
+ heads=num_heads,
248
+ kv_heads=num_heads,
249
+ dim_head=dim // num_heads,
250
+ qk_norm=qk_norm,
251
+ eps=eps,
252
+ bias=True,
253
+ cross_attention_dim=None,
254
+ out_bias=True,
255
+ added_kv_proj_dim=added_kv_proj_dim,
256
+ added_proj_bias=True,
257
+ processor=ChronoEditAttnProcessor2_0(),
258
+ )
259
+ self.norm2 = FP32LayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity()
260
+
261
+ # 3. Feed-forward
262
+ self.ffn = FeedForward(dim, inner_dim=ffn_dim, activation_fn="gelu-approximate")
263
+ self.norm3 = FP32LayerNorm(dim, eps, elementwise_affine=False)
264
+
265
+ self.scale_shift_table = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)
266
+
267
+ def forward(
268
+ self,
269
+ hidden_states: torch.Tensor,
270
+ encoder_hidden_states: torch.Tensor,
271
+ temb: torch.Tensor,
272
+ rotary_emb: torch.Tensor,
273
+ ) -> torch.Tensor:
274
+ shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = (
275
+ self.scale_shift_table + temb.float()
276
+ ).chunk(6, dim=1)
277
+
278
+ # 1. Self-attention
279
+ norm_hidden_states = (self.norm1(hidden_states.float()) * (1 + scale_msa) + shift_msa).type_as(hidden_states)
280
+ attn_output = self.attn1(hidden_states=norm_hidden_states, rotary_emb=rotary_emb)
281
+ hidden_states = (hidden_states.float() + attn_output * gate_msa).type_as(hidden_states)
282
+
283
+ # 2. Cross-attention
284
+ norm_hidden_states = self.norm2(hidden_states.float()).type_as(hidden_states)
285
+ attn_output = self.attn2(hidden_states=norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
286
+ hidden_states = hidden_states + attn_output
287
+
288
+ # 3. Feed-forward
289
+ norm_hidden_states = (self.norm3(hidden_states.float()) * (1 + c_scale_msa) + c_shift_msa).type_as(
290
+ hidden_states
291
+ )
292
+ ff_output = self.ffn(norm_hidden_states)
293
+ hidden_states = (hidden_states.float() + ff_output.float() * c_gate_msa).type_as(hidden_states)
294
+
295
+ return hidden_states
296
+
297
+
298
+ class ChronoEditTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin):
299
+ r"""
300
+ A Transformer model for video-like data used in the ChronoEdit model.
301
+
302
+ Args:
303
+ patch_size (`Tuple[int]`, defaults to `(1, 2, 2)`):
304
+ 3D patch dimensions for video embedding (t_patch, h_patch, w_patch).
305
+ num_attention_heads (`int`, defaults to `40`):
306
+ Fixed length for text embeddings.
307
+ attention_head_dim (`int`, defaults to `128`):
308
+ The number of channels in each head.
309
+ in_channels (`int`, defaults to `16`):
310
+ The number of channels in the input.
311
+ out_channels (`int`, defaults to `16`):
312
+ The number of channels in the output.
313
+ text_dim (`int`, defaults to `512`):
314
+ Input dimension for text embeddings.
315
+ freq_dim (`int`, defaults to `256`):
316
+ Dimension for sinusoidal time embeddings.
317
+ ffn_dim (`int`, defaults to `13824`):
318
+ Intermediate dimension in feed-forward network.
319
+ num_layers (`int`, defaults to `40`):
320
+ The number of layers of transformer blocks to use.
321
+ window_size (`Tuple[int]`, defaults to `(-1, -1)`):
322
+ Window size for local attention (-1 indicates global attention).
323
+ cross_attn_norm (`bool`, defaults to `True`):
324
+ Enable cross-attention normalization.
325
+ qk_norm (`bool`, defaults to `True`):
326
+ Enable query/key normalization.
327
+ eps (`float`, defaults to `1e-6`):
328
+ Epsilon value for normalization layers.
329
+ add_img_emb (`bool`, defaults to `False`):
330
+ Whether to use img_emb.
331
+ added_kv_proj_dim (`int`, *optional*, defaults to `None`):
332
+ The number of channels to use for the added key and value projections. If `None`, no projection is used.
333
+ """
334
+
335
+ _supports_gradient_checkpointing = True
336
+ _skip_layerwise_casting_patterns = ["patch_embedding", "condition_embedder", "norm"]
337
+ _no_split_modules = ["ChronoEditTransformerBlock"]
338
+ _keep_in_fp32_modules = ["time_embedder", "scale_shift_table", "norm1", "norm2", "norm3"]
339
+ _keys_to_ignore_on_load_unexpected = ["norm_added_q"]
340
+
341
+ @register_to_config
342
+ def __init__(
343
+ self,
344
+ patch_size: Tuple[int] = (1, 2, 2),
345
+ num_attention_heads: int = 40,
346
+ attention_head_dim: int = 128,
347
+ in_channels: int = 16,
348
+ out_channels: int = 16,
349
+ text_dim: int = 4096,
350
+ freq_dim: int = 256,
351
+ ffn_dim: int = 13824,
352
+ num_layers: int = 40,
353
+ cross_attn_norm: bool = True,
354
+ qk_norm: Optional[str] = "rms_norm_across_heads",
355
+ eps: float = 1e-6,
356
+ image_dim: Optional[int] = None,
357
+ added_kv_proj_dim: Optional[int] = None,
358
+ rope_max_seq_len: int = 1024,
359
+ rope_temporal_skip_len: int = 8,
360
+ ) -> None:
361
+ super().__init__()
362
+
363
+ inner_dim = num_attention_heads * attention_head_dim
364
+ out_channels = out_channels or in_channels
365
+
366
+ # 1. Patch & position embedding
367
+ self.rope = ChronoEditRotaryPosEmbed(attention_head_dim, patch_size, rope_max_seq_len, temporal_skip_len=rope_temporal_skip_len)
368
+ self.patch_embedding = nn.Conv3d(in_channels, inner_dim, kernel_size=patch_size, stride=patch_size)
369
+
370
+ # 2. Condition embeddings
371
+ # image_embedding_dim=1280 for I2V model
372
+ self.condition_embedder = ChronoEditTimeTextImageEmbedding(
373
+ dim=inner_dim,
374
+ time_freq_dim=freq_dim,
375
+ time_proj_dim=inner_dim * 6,
376
+ text_embed_dim=text_dim,
377
+ image_embed_dim=image_dim,
378
+ )
379
+
380
+ # 3. Transformer blocks
381
+ self.blocks = nn.ModuleList(
382
+ [
383
+ ChronoEditTransformerBlock(
384
+ inner_dim, ffn_dim, num_attention_heads, qk_norm, cross_attn_norm, eps, added_kv_proj_dim
385
+ )
386
+ for _ in range(num_layers)
387
+ ]
388
+ )
389
+
390
+ # 4. Output norm & projection
391
+ self.norm_out = FP32LayerNorm(inner_dim, eps, elementwise_affine=False)
392
+ self.proj_out = nn.Linear(inner_dim, out_channels * math.prod(patch_size))
393
+ self.scale_shift_table = nn.Parameter(torch.randn(1, 2, inner_dim) / inner_dim**0.5)
394
+
395
+ self.gradient_checkpointing = False
396
+
397
+ def forward(
398
+ self,
399
+ hidden_states: torch.Tensor,
400
+ timestep: torch.LongTensor,
401
+ encoder_hidden_states: torch.Tensor,
402
+ encoder_hidden_states_image: Optional[torch.Tensor] = None,
403
+ return_dict: bool = True,
404
+ attention_kwargs: Optional[Dict[str, Any]] = None,
405
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
406
+ if attention_kwargs is not None:
407
+ attention_kwargs = attention_kwargs.copy()
408
+ lora_scale = attention_kwargs.pop("scale", 1.0)
409
+ else:
410
+ lora_scale = 1.0
411
+
412
+ if USE_PEFT_BACKEND:
413
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
414
+ scale_lora_layers(self, lora_scale)
415
+ else:
416
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
417
+ logger.warning(
418
+ "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective."
419
+ )
420
+
421
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
422
+ p_t, p_h, p_w = self.config.patch_size
423
+ post_patch_num_frames = num_frames // p_t
424
+ post_patch_height = height // p_h
425
+ post_patch_width = width // p_w
426
+
427
+ rotary_emb = self.rope(hidden_states)
428
+
429
+ hidden_states = self.patch_embedding(hidden_states)
430
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
431
+
432
+ temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image = self.condition_embedder(
433
+ timestep, encoder_hidden_states, encoder_hidden_states_image
434
+ )
435
+ timestep_proj = timestep_proj.unflatten(1, (6, -1))
436
+
437
+ if encoder_hidden_states_image is not None:
438
+ encoder_hidden_states = torch.concat([encoder_hidden_states_image, encoder_hidden_states], dim=1)
439
+
440
+ # 4. Transformer blocks
441
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
442
+ for block in self.blocks:
443
+ hidden_states = self._gradient_checkpointing_func(
444
+ block, hidden_states, encoder_hidden_states, timestep_proj, rotary_emb
445
+ )
446
+ else:
447
+ for block in self.blocks:
448
+ hidden_states = block(hidden_states, encoder_hidden_states, timestep_proj, rotary_emb)
449
+
450
+ # 5. Output norm, projection & unpatchify
451
+ shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1)
452
+
453
+ # Move the shift and scale tensors to the same device as hidden_states.
454
+ # When using multi-GPU inference via accelerate these will be on the
455
+ # first device rather than the last device, which hidden_states ends up
456
+ # on.
457
+ shift = shift.to(hidden_states.device)
458
+ scale = scale.to(hidden_states.device)
459
+
460
+ hidden_states = (self.norm_out(hidden_states.float()) * (1 + scale) + shift).type_as(hidden_states)
461
+ hidden_states = self.proj_out(hidden_states)
462
+
463
+ hidden_states = hidden_states.reshape(
464
+ batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p_h, p_w, -1
465
+ )
466
+ hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6)
467
+ output = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3)
468
+
469
+ if USE_PEFT_BACKEND:
470
+ # remove `lora_scale` from each PEFT layer
471
+ unscale_lora_layers(self, lora_scale)
472
+
473
+ if not return_dict:
474
+ return (output,)
475
+
476
+ return Transformer2DModelOutput(sample=output)
prompt_enhancer.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import torch
18
+ from PIL import Image
19
+ from transformers import (
20
+ Qwen2_5_VLForConditionalGeneration,
21
+ AutoProcessor,
22
+ Qwen3VLMoeForConditionalGeneration,
23
+ )
24
+ from qwen_vl_utils import process_vision_info
25
+
26
+
27
+ def parse_args():
28
+ parser = argparse.ArgumentParser(
29
+ description="Enhance a prompt with CoT reasoning given an input image and prompt"
30
+ )
31
+ parser.add_argument(
32
+ "--input-image",
33
+ type=str,
34
+ default="./assets/images/input.jpg",
35
+ help="Path to the input image (default: ./assets/images/input.jpg)"
36
+ )
37
+ parser.add_argument(
38
+ "--input-prompt",
39
+ type=str,
40
+ required=True,
41
+ help="Input prompt to enhance with CoT reasoning"
42
+ )
43
+ parser.add_argument(
44
+ "--model",
45
+ type=str,
46
+ default="Qwen/Qwen3-VL-30B-A3B-Instruct",
47
+ choices=[
48
+ "Qwen/Qwen2.5-VL-7B-Instruct",
49
+ "Qwen/Qwen3-VL-30B-A3B-Instruct",
50
+ ],
51
+ help="Model to use for prompt enhancement"
52
+ )
53
+ parser.add_argument(
54
+ "--max-resolution",
55
+ type=int,
56
+ default=1080,
57
+ help="Maximum resolution for the shortest edge (default: 1080)"
58
+ )
59
+ return parser.parse_args()
60
+
61
+
62
+ def pick_attn_implementation(prefer_flash: bool = True) -> str:
63
+ """
64
+ Decide the best attn_implementation based on environment.
65
+
66
+ Returns one of: "flash_attention_2", "sdpa", "eager".
67
+ """
68
+ # Try FlashAttention v2 first (needs SM80+ and the wheel to import)
69
+ if prefer_flash:
70
+ try:
71
+ import flash_attn # noqa: F401
72
+ if torch.cuda.is_available():
73
+ major, minor = torch.cuda.get_device_capability()
74
+ # FlashAttn requires Ampere (SM80) or newer
75
+ if (major, minor) >= (8, 0):
76
+ return "flash_attention_2"
77
+ except Exception:
78
+ pass
79
+ try:
80
+ if torch.backends.cuda.sdp_kernel.is_available():
81
+ return "sdpa"
82
+ except Exception:
83
+ pass
84
+
85
+ # Fallback: eager (always works, slower)
86
+ return "eager"
87
+ def load_model(model_name):
88
+ """Load the vision-language model and processor."""
89
+ print(f"Loading model: {model_name}")
90
+
91
+ attn_impl = pick_attn_implementation(prefer_flash=True)
92
+
93
+ if model_name == "Qwen/Qwen2.5-VL-7B-Instruct":
94
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
95
+ model_name,
96
+ dtype=torch.bfloat16,
97
+ attn_implementation=attn_impl,
98
+ device_map="auto",
99
+ )
100
+ processor = AutoProcessor.from_pretrained(model_name)
101
+
102
+ elif model_name == "Qwen/Qwen3-VL-30B-A3B-Instruct":
103
+ model = Qwen3VLMoeForConditionalGeneration.from_pretrained(
104
+ model_name,
105
+ dtype=torch.bfloat16,
106
+ attn_implementation=attn_impl,
107
+ device_map="auto"
108
+ )
109
+ processor = AutoProcessor.from_pretrained(model_name)
110
+
111
+ else:
112
+ raise ValueError(f"Unsupported model: {model_name}")
113
+
114
+ return model, processor
115
+
116
+
117
+ def resize_if_needed(image, max_resolution=1080):
118
+ """Resize image so that the shortest edge is at most max_resolution pixels."""
119
+ width, height = image.size
120
+ if min(width, height) > max_resolution:
121
+ scaling_factor = max_resolution / float(min(width, height))
122
+ new_size = (int(width * scaling_factor), int(height * scaling_factor))
123
+ print(f"Resizing image from {image.size} to {new_size}")
124
+ return image.resize(new_size, Image.LANCZOS)
125
+ return image
126
+
127
+
128
+ def _run_model_inference(messages, model, processor):
129
+ """
130
+ Helper function to run model inference.
131
+
132
+ Args:
133
+ messages: Chat messages for the model
134
+ model: The loaded VL model
135
+ processor: The model's processor
136
+
137
+ Returns:
138
+ str: Generated text
139
+ """
140
+ if isinstance(model, Qwen2_5_VLForConditionalGeneration):
141
+ text = processor.apply_chat_template(
142
+ messages, tokenize=False, add_generation_prompt=True
143
+ )
144
+ images, videos = process_vision_info(messages)
145
+ inputs = processor(
146
+ text=[text],
147
+ images=images,
148
+ videos=videos,
149
+ padding=True,
150
+ return_tensors="pt",
151
+ )
152
+ inputs = inputs.to(model.device).to(model.dtype)
153
+ generated_ids = model.generate(**inputs, max_new_tokens=512)
154
+
155
+ elif isinstance(model, Qwen3VLMoeForConditionalGeneration):
156
+ inputs = processor.apply_chat_template(
157
+ messages,
158
+ tokenize=True,
159
+ add_generation_prompt=True,
160
+ return_dict=True,
161
+ return_tensors="pt"
162
+ )
163
+ inputs = inputs.to(model.device).to(model.dtype)
164
+ generated_ids = model.generate(**inputs, max_new_tokens=512)
165
+
166
+ else:
167
+ raise ValueError("Unsupported model type")
168
+
169
+ # Decode the generated text
170
+ generated_ids_trimmed = [
171
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
172
+ ]
173
+ output_text = processor.batch_decode(
174
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
175
+ )
176
+
177
+ return output_text[0]
178
+
179
+
180
+ def enhance_prompt(input_image_path, input_prompt, model, processor, max_resolution=1080):
181
+ """
182
+ Enhance a prompt with Chain-of-Thought reasoning given an input image and prompt.
183
+
184
+ Args:
185
+ input_image_path: Path to the input image
186
+ input_prompt: The input editing instruction prompt
187
+ model: The loaded VL model
188
+ processor: The model's processor
189
+ max_resolution: Maximum resolution for image resizing
190
+
191
+ Returns:
192
+ str: Enhanced CoT prompt
193
+ """
194
+ # Load and resize image
195
+ print(f"Loading image: {input_image_path}")
196
+ input_image = Image.open(input_image_path).convert("RGB")
197
+ input_image = resize_if_needed(input_image, max_resolution)
198
+
199
+ cot_prompt = f"""You are a professional edit instruction rewriter and prompt engineer. Your task is to generate a precise, concise, and visually achievable chain-of-thought reasoning based on the user-provided instruction and the image to be edited.
200
+
201
+ You have the following information:
202
+ 1. The user provides an image (the original image to be edited)
203
+ 2. question text: {input_prompt}
204
+
205
+ Your task is NOT to output the final answer or the edited image. Instead, you must:
206
+ - Generate a "thinking" or chain-of-thought process that explains how you reason about the editing task.
207
+ - First identify the task type, then provide reasoning/analysis that leads to how the image should be edited.
208
+ - Always describe pose and appearance in detail.
209
+ - Match the original visual style or genre (anime, CG art, cinematic, poster). If not explicit, choose a stylistically appropriate one based on the image.
210
+ - Incorporate motion and camera direction when relevant (e.g., walking, turning, dolly in/out, pan), implying natural human/character motion and interactions.
211
+ - Maintain quoted phrases or titles exactly (e.g., character names, series names). Do not translate or alter the original language of text.
212
+
213
+ ## Task Type Handling Rules:
214
+
215
+ **1. Standard Editing Tasks (e.g., Add, Delete, Replace, Action Change):**
216
+ - For replacement tasks, specify what to replace and key visual features of the new element.
217
+ - For text editing tasks, specify text position, color, and layout concisely.
218
+ - If the user wants to "extract" something, this means they want to remove the background and only keep the specified object isolated. We should add "while removing the background" to the reasoning.
219
+ - Explicitly note what must stay unchanged: appearances (hairstyle, clothing, expression, skin tone/race, age), posture, pose, visual style/genre, spatial layout, and shot composition (e.g., medium shot, close-up, side view).
220
+
221
+ **2. Character Consistency Editing Tasks (e.g., Scenario Change):**
222
+ - For tasks that place an object/character (e.g., human, robot, animal) in a completely new scenario, preserve the object's core identity (appearance, materials, key features) but adapt its pose, interaction, and context to fit naturally in the new environment.
223
+ - Reason about how the object should interact with the new scenario (e.g., pose changes, hand positions, orientation, facial direction).
224
+ - The background and context should transform completely to match the new scenario while maintaining visual coherence.
225
+ - Describe both what stays the same (core appearance) and what must change (pose, interaction, setting) to make the scene look realistic and natural.
226
+
227
+ The length of outputs should be **around 80 - 100 words** to fully describe the transformation. Always start with "The user wants to ..."
228
+
229
+ Example Output 1 (Standard Editing Task):
230
+ The user wants to make the knight kneel on his right knee while keeping the rest of the pose intact.
231
+ The knight should lower his stance so his right leg bends to the ground in a kneeling position, with the left leg bent upright to support balance.
232
+ The shield with the NVIDIA logo should still be held up firmly in his left hand, angled forward in a defensive posture, while the right hand continues gripping the weapon.
233
+ The armor reflections, proportions, and medieval style should remain consistent, emphasizing a powerful and respectful kneeling stance.
234
+
235
+ Example Output 2 (Character Consistency Editing Task):
236
+ The user wants to change the image by modifying the scene so that the woman is drinking coffee in a cozy coffee shop.
237
+ The elegant anime-style woman keeps her same graceful expression, long flowing dark hair adorned with golden ornaments, and detailed traditional outfit with red and gold floral patterns.
238
+ She is now seated at a wooden café table, holding a steaming cup of coffee near her lips with one hand, while soft sunlight filters through the window, highlighting her refined features.
239
+ The background transforms into a warmly lit café interior with subtle reflections, bookshelves, and gentle ambience, maintaining the delicate, painterly aesthetic.
240
+ """
241
+
242
+ # Create messages for CoT generation
243
+ messages = [
244
+ {
245
+ "role": "system",
246
+ "content": [
247
+ {"type": "text", "text": cot_prompt},
248
+ ],
249
+ },
250
+ {
251
+ "role": "user",
252
+ "content": [
253
+ {"type": "image", "image": input_image},
254
+ ],
255
+ }
256
+ ]
257
+
258
+ # Generate CoT reasoning
259
+ print("Generating Chain-of-Thought enhanced prompt...")
260
+ cot_prompt_output = _run_model_inference(messages, model, processor)
261
+
262
+ return cot_prompt_output
263
+
264
+
265
+ def main():
266
+ args = parse_args()
267
+
268
+ # Load model
269
+ model, processor = load_model(args.model)
270
+
271
+ # Enhance prompt with CoT reasoning
272
+ cot_prompt = enhance_prompt(
273
+ args.input_image,
274
+ args.input_prompt,
275
+ model,
276
+ processor,
277
+ args.max_resolution
278
+ )
279
+
280
+ # Print enhanced CoT prompt
281
+ print("\n" + "="*80)
282
+ print("Enhanced CoT Prompt:")
283
+ print("="*80)
284
+ print(cot_prompt)
285
+ print("="*80 + "\n")
286
+
287
+
288
+ if __name__ == "__main__":
289
+ main()
requirements.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.7.1
2
+ torchvision==0.22.1
3
+
4
+ einops==0.8.1
5
+ typing-extensions==4.14.1
6
+
7
+ diffusers==0.35.2
8
+ peft==0.17.1
9
+ accelerate==1.8.1
10
+ transformers==4.57.1
11
+ sentencepiece==0.2.0
12
+ tokenizers==0.22
13
+ xfuser==0.4.4
14
+ regex==2024.11.6
15
+ ftfy==6.3.1
16
+ numpy==1.26.4
17
+ pillow==11.1.0
18
+ qwen-vl-utils==0.0.14
19
+ gradio==5.49.1
20
+
21
+ # Progress bars
22
+ tqdm==4.67.1
23
+
24
+ # Video export utilities (if using diffusers export_to_video)
25
+ imageio==2.37.0
26
+ imageio-ffmpeg==0.6.0