Serefor commited on
Commit
d6cfa67
·
verified ·
1 Parent(s): e1afb2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -468
app.py CHANGED
@@ -11,476 +11,92 @@ import tempfile
11
  from PIL import Image
12
  from huggingface_hub import hf_hub_download
13
  import shutil
14
-
15
- from inference import (
16
- create_ltx_video_pipeline,
17
- create_latent_upsampler,
18
- load_image_to_tensor_with_resize_and_crop,
19
- seed_everething,
20
- get_device,
21
- calculate_padding,
22
- load_media_file
23
- )
24
- from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline, LTXVideoPipeline
25
- from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
26
-
27
- config_file_path = "configs/ltxv-13b-0.9.7-distilled.yaml"
28
- with open(config_file_path, "r") as file:
29
- PIPELINE_CONFIG_YAML = yaml.safe_load(file)
30
-
31
- LTX_REPO = "Lightricks/LTX-Video"
32
- MAX_IMAGE_SIZE = PIPELINE_CONFIG_YAML.get("max_resolution", 1280)
33
- MAX_NUM_FRAMES = 257
34
-
35
- FPS = 30.0
36
-
37
- # --- Global variables for loaded models ---
38
- pipeline_instance = None
39
- latent_upsampler_instance = None
40
- models_dir = "downloaded_models_gradio_cpu_init"
41
- Path(models_dir).mkdir(parents=True, exist_ok=True)
42
-
43
- print("Downloading models (if not present)...")
44
- distilled_model_actual_path = hf_hub_download(
45
- repo_id=LTX_REPO,
46
- filename=PIPELINE_CONFIG_YAML["checkpoint_path"],
47
- local_dir=models_dir,
48
- local_dir_use_symlinks=False
49
- )
50
- PIPELINE_CONFIG_YAML["checkpoint_path"] = distilled_model_actual_path
51
- print(f"Distilled model path: {distilled_model_actual_path}")
52
-
53
- SPATIAL_UPSCALER_FILENAME = PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"]
54
- spatial_upscaler_actual_path = hf_hub_download(
55
- repo_id=LTX_REPO,
56
- filename=SPATIAL_UPSCALER_FILENAME,
57
- local_dir=models_dir,
58
- local_dir_use_symlinks=False
59
- )
60
- PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"] = spatial_upscaler_actual_path
61
- print(f"Spatial upscaler model path: {spatial_upscaler_actual_path}")
62
-
63
- print("Creating LTX Video pipeline on CPU...")
64
- pipeline_instance = create_ltx_video_pipeline(
65
- ckpt_path=PIPELINE_CONFIG_YAML["checkpoint_path"],
66
- precision=PIPELINE_CONFIG_YAML["precision"],
67
- text_encoder_model_name_or_path=PIPELINE_CONFIG_YAML["text_encoder_model_name_or_path"],
68
- sampler=PIPELINE_CONFIG_YAML["sampler"],
69
- device="cpu",
70
- enhance_prompt=False,
71
- prompt_enhancer_image_caption_model_name_or_path=PIPELINE_CONFIG_YAML["prompt_enhancer_image_caption_model_name_or_path"],
72
- prompt_enhancer_llm_model_name_or_path=PIPELINE_CONFIG_YAML["prompt_enhancer_llm_model_name_or_path"],
73
- )
74
- print("LTX Video pipeline created on CPU.")
75
-
76
- if PIPELINE_CONFIG_YAML.get("spatial_upscaler_model_path"):
77
- print("Creating latent upsampler on CPU...")
78
- latent_upsampler_instance = create_latent_upsampler(
79
- PIPELINE_CONFIG_YAML["spatial_upscaler_model_path"],
80
- device="cpu"
81
- )
82
- print("Latent upsampler created on CPU.")
83
-
84
- target_inference_device = "cuda"
85
- print(f"Target inference device: {target_inference_device}")
86
- pipeline_instance.to(target_inference_device)
87
- if latent_upsampler_instance:
88
- latent_upsampler_instance.to(target_inference_device)
89
-
90
-
91
- # --- Helper function for dimension calculation ---
92
- MIN_DIM_SLIDER = 256 # As defined in the sliders minimum attribute
93
- TARGET_FIXED_SIDE = 768 # Desired fixed side length as per requirement
94
-
95
- def calculate_new_dimensions(orig_w, orig_h):
96
- """
97
- Calculates new dimensions for height and width sliders based on original media dimensions.
98
- Ensures one side is TARGET_FIXED_SIDE, the other is scaled proportionally,
99
- both are multiples of 32, and within [MIN_DIM_SLIDER, MAX_IMAGE_SIZE].
100
- """
101
- if orig_w == 0 or orig_h == 0:
102
- # Default to TARGET_FIXED_SIDE square if original dimensions are invalid
103
- return int(TARGET_FIXED_SIDE), int(TARGET_FIXED_SIDE)
104
-
105
- if orig_w >= orig_h: # Landscape or square
106
- new_h = TARGET_FIXED_SIDE
107
- aspect_ratio = orig_w / orig_h
108
- new_w_ideal = new_h * aspect_ratio
109
-
110
- # Round to nearest multiple of 32
111
- new_w = round(new_w_ideal / 32) * 32
112
-
113
- # Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
114
- new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
115
- # Ensure new_h is also clamped (TARGET_FIXED_SIDE should be within these bounds if configured correctly)
116
- new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
117
- else: # Portrait
118
- new_w = TARGET_FIXED_SIDE
119
- aspect_ratio = orig_h / orig_w # Use H/W ratio for portrait scaling
120
- new_h_ideal = new_w * aspect_ratio
121
-
122
- # Round to nearest multiple of 32
123
- new_h = round(new_h_ideal / 32) * 32
124
-
125
- # Clamp to [MIN_DIM_SLIDER, MAX_IMAGE_SIZE]
126
- new_h = max(MIN_DIM_SLIDER, min(new_h, MAX_IMAGE_SIZE))
127
- # Ensure new_w is also clamped
128
- new_w = max(MIN_DIM_SLIDER, min(new_w, MAX_IMAGE_SIZE))
129
-
130
- return int(new_h), int(new_w)
131
-
132
- def get_duration(prompt, negative_prompt, input_image_filepath, input_video_filepath,
133
- height_ui, width_ui, mode,
134
- duration_ui, # Removed ui_steps
135
- ui_frames_to_use,
136
- seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
137
- progress):
138
- if duration_ui > 7:
139
- return 75
140
- else:
141
- return 60
142
-
143
- @spaces.GPU(duration=get_duration)
144
- def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath,
145
- height_ui, width_ui, mode,
146
- duration_ui,
147
- ui_frames_to_use,
148
- seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
149
- progress=gr.Progress(track_tqdm=True)):
150
-
151
- if randomize_seed:
152
- seed_ui = random.randint(0, 2**32 - 1)
153
- seed_everething(int(seed_ui))
154
-
155
- target_frames_ideal = duration_ui * FPS
156
- target_frames_rounded = round(target_frames_ideal)
157
- if target_frames_rounded < 1:
158
- target_frames_rounded = 1
159
-
160
- n_val = round((float(target_frames_rounded) - 1.0) / 8.0)
161
- actual_num_frames = int(n_val * 8 + 1)
162
-
163
- actual_num_frames = max(9, actual_num_frames)
164
- actual_num_frames = min(MAX_NUM_FRAMES, actual_num_frames)
165
-
166
- actual_height = int(height_ui)
167
- actual_width = int(width_ui)
168
-
169
- height_padded = ((actual_height - 1) // 32 + 1) * 32
170
- width_padded = ((actual_width - 1) // 32 + 1) * 32
171
- num_frames_padded = ((actual_num_frames - 2) // 8 + 1) * 8 + 1
172
- if num_frames_padded != actual_num_frames:
173
- print(f"Warning: actual_num_frames ({actual_num_frames}) and num_frames_padded ({num_frames_padded}) differ. Using num_frames_padded for pipeline.")
174
-
175
- padding_values = calculate_padding(actual_height, actual_width, height_padded, width_padded)
176
-
177
- call_kwargs = {
178
- "prompt": prompt,
179
- "negative_prompt": negative_prompt,
180
- "height": height_padded,
181
- "width": width_padded,
182
- "num_frames": num_frames_padded,
183
- "frame_rate": int(FPS),
184
- "generator": torch.Generator(device=target_inference_device).manual_seed(int(seed_ui)),
185
- "output_type": "pt",
186
- "conditioning_items": None,
187
- "media_items": None,
188
- "decode_timestep": PIPELINE_CONFIG_YAML["decode_timestep"],
189
- "decode_noise_scale": PIPELINE_CONFIG_YAML["decode_noise_scale"],
190
- "stochastic_sampling": PIPELINE_CONFIG_YAML["stochastic_sampling"],
191
- "image_cond_noise_scale": 0.15,
192
- "is_video": True,
193
- "vae_per_channel_normalize": True,
194
- "mixed_precision": (PIPELINE_CONFIG_YAML["precision"] == "mixed_precision"),
195
- "offload_to_cpu": False,
196
- "enhance_prompt": False,
197
- }
198
-
199
- stg_mode_str = PIPELINE_CONFIG_YAML.get("stg_mode", "attention_values")
200
- if stg_mode_str.lower() in ["stg_av", "attention_values"]:
201
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionValues
202
- elif stg_mode_str.lower() in ["stg_as", "attention_skip"]:
203
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.AttentionSkip
204
- elif stg_mode_str.lower() in ["stg_r", "residual"]:
205
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.Residual
206
- elif stg_mode_str.lower() in ["stg_t", "transformer_block"]:
207
- call_kwargs["skip_layer_strategy"] = SkipLayerStrategy.TransformerBlock
208
- else:
209
- raise ValueError(f"Invalid stg_mode: {stg_mode_str}")
210
-
211
- if mode == "image-to-video" and input_image_filepath:
212
- try:
213
- media_tensor = load_image_to_tensor_with_resize_and_crop(
214
- input_image_filepath, actual_height, actual_width
215
- )
216
- media_tensor = torch.nn.functional.pad(media_tensor, padding_values)
217
- call_kwargs["conditioning_items"] = [ConditioningItem(media_tensor.to(target_inference_device), 0, 1.0)]
218
- except Exception as e:
219
- print(f"Error loading image {input_image_filepath}: {e}")
220
- raise gr.Error(f"Could not load image: {e}")
221
- elif mode == "video-to-video" and input_video_filepath:
222
- try:
223
- call_kwargs["media_items"] = load_media_file(
224
- media_path=input_video_filepath,
225
- height=actual_height,
226
- width=actual_width,
227
- max_frames=int(ui_frames_to_use),
228
- padding=padding_values
229
- ).to(target_inference_device)
230
- except Exception as e:
231
- print(f"Error loading video {input_video_filepath}: {e}")
232
- raise gr.Error(f"Could not load video: {e}")
233
-
234
- print(f"Moving models to {target_inference_device} for inference (if not already there)...")
235
-
236
- active_latent_upsampler = None
237
- if improve_texture_flag and latent_upsampler_instance:
238
- active_latent_upsampler = latent_upsampler_instance
239
-
240
- result_images_tensor = None
241
- if improve_texture_flag:
242
- if not active_latent_upsampler:
243
- raise gr.Error("Spatial upscaler model not loaded or improve_texture not selected, cannot use multi-scale.")
244
-
245
- multi_scale_pipeline_obj = LTXMultiScalePipeline(pipeline_instance, active_latent_upsampler)
246
-
247
- first_pass_args = PIPELINE_CONFIG_YAML.get("first_pass", {}).copy()
248
- first_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
249
- # num_inference_steps will be derived from len(timesteps) in the pipeline
250
- first_pass_args.pop("num_inference_steps", None)
251
-
252
-
253
- second_pass_args = PIPELINE_CONFIG_YAML.get("second_pass", {}).copy()
254
- second_pass_args["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
255
- # num_inference_steps will be derived from len(timesteps) in the pipeline
256
- second_pass_args.pop("num_inference_steps", None)
257
-
258
- multi_scale_call_kwargs = call_kwargs.copy()
259
- multi_scale_call_kwargs.update({
260
- "downscale_factor": PIPELINE_CONFIG_YAML["downscale_factor"],
261
- "first_pass": first_pass_args,
262
- "second_pass": second_pass_args,
263
- })
264
-
265
- print(f"Calling multi-scale pipeline (eff. HxW: {actual_height}x{actual_width}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
266
- result_images_tensor = multi_scale_pipeline_obj(**multi_scale_call_kwargs).images
267
- else:
268
- single_pass_call_kwargs = call_kwargs.copy()
269
- first_pass_config_from_yaml = PIPELINE_CONFIG_YAML.get("first_pass", {})
270
-
271
- single_pass_call_kwargs["timesteps"] = first_pass_config_from_yaml.get("timesteps")
272
- single_pass_call_kwargs["guidance_scale"] = float(ui_guidance_scale) # UI overrides YAML
273
- single_pass_call_kwargs["stg_scale"] = first_pass_config_from_yaml.get("stg_scale")
274
- single_pass_call_kwargs["rescaling_scale"] = first_pass_config_from_yaml.get("rescaling_scale")
275
- single_pass_call_kwargs["skip_block_list"] = first_pass_config_from_yaml.get("skip_block_list")
276
-
277
- # Remove keys that might conflict or are not used in single pass / handled by above
278
- single_pass_call_kwargs.pop("num_inference_steps", None)
279
- single_pass_call_kwargs.pop("first_pass", None)
280
- single_pass_call_kwargs.pop("second_pass", None)
281
- single_pass_call_kwargs.pop("downscale_factor", None)
282
-
283
- print(f"Calling base pipeline (padded HxW: {height_padded}x{width_padded}, Frames: {actual_num_frames} -> Padded: {num_frames_padded}) on {target_inference_device}")
284
- result_images_tensor = pipeline_instance(**single_pass_call_kwargs).images
285
-
286
- if result_images_tensor is None:
287
- raise gr.Error("Generation failed.")
288
-
289
- pad_left, pad_right, pad_top, pad_bottom = padding_values
290
- slice_h_end = -pad_bottom if pad_bottom > 0 else None
291
- slice_w_end = -pad_right if pad_right > 0 else None
292
-
293
- result_images_tensor = result_images_tensor[
294
- :, :, :actual_num_frames, pad_top:slice_h_end, pad_left:slice_w_end
295
- ]
296
-
297
- video_np = result_images_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy()
298
-
299
- video_np = np.clip(video_np, 0, 1)
300
- video_np = (video_np * 255).astype(np.uint8)
301
-
302
- temp_dir = tempfile.mkdtemp()
303
- timestamp = random.randint(10000,99999)
304
- output_video_path = os.path.join(temp_dir, f"output_{timestamp}.mp4")
305
-
306
- try:
307
- with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], macro_block_size=1) as video_writer:
308
- for frame_idx in range(video_np.shape[0]):
309
- progress(frame_idx / video_np.shape[0], desc="Saving video")
310
- video_writer.append_data(video_np[frame_idx])
311
- except Exception as e:
312
- print(f"Error saving video with macro_block_size=1: {e}")
313
- try:
314
- with imageio.get_writer(output_video_path, fps=call_kwargs["frame_rate"], format='FFMPEG', codec='libx264', quality=8) as video_writer:
315
- for frame_idx in range(video_np.shape[0]):
316
- progress(frame_idx / video_np.shape[0], desc="Saving video (fallback ffmpeg)")
317
- video_writer.append_data(video_np[frame_idx])
318
- except Exception as e2:
319
- print(f"Fallback video saving error: {e2}")
320
- raise gr.Error(f"Failed to save video: {e2}")
321
-
322
- return output_video_path, seed_ui
323
-
324
- def update_task_image():
325
- return "image-to-video"
326
-
327
- def update_task_text():
328
- return "text-to-video"
329
-
330
- def update_task_video():
331
- return "video-to-video"
332
-
333
- # --- Gradio UI Definition ---
334
- css="""
335
- #col-container {
336
- margin: 0 auto;
337
- max-width: 900px;
338
- }
339
- """
340
-
341
- with gr.Blocks(css=css) as demo:
342
- gr.Markdown("# LTX Video 0.9.7 Distilled")
343
- gr.Markdown("Fast high quality video generation. [Model](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltxv-13b-0.9.7-distilled.safetensors) [GitHub](https://github.com/Lightricks/LTX-Video) [Diffusers](https://huggingface.co/Lightricks/LTX-Video-0.9.7-distilled#diffusers-🧨)")
344
-
345
- with gr.Row():
346
- with gr.Column():
347
- with gr.Tab("image-to-video") as image_tab:
348
- video_i_hidden = gr.Textbox(label="video_i", visible=False, value=None)
349
- image_i2v = gr.Image(label="Input Image", type="filepath", sources=["upload", "webcam", "clipboard"])
350
- i2v_prompt = gr.Textbox(label="Prompt", value="The creature from the image starts to move", lines=3)
351
- i2v_button = gr.Button("Generate Image-to-Video", variant="primary")
352
- with gr.Tab("text-to-video") as text_tab:
353
- image_n_hidden = gr.Textbox(label="image_n", visible=False, value=None)
354
- video_n_hidden = gr.Textbox(label="video_n", visible=False, value=None)
355
- t2v_prompt = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
356
- t2v_button = gr.Button("Generate Text-to-Video", variant="primary")
357
- with gr.Tab("video-to-video", visible=False) as video_tab:
358
- image_v_hidden = gr.Textbox(label="image_v", visible=False, value=None)
359
- video_v2v = gr.Video(label="Input Video", sources=["upload", "webcam"]) # type defaults to filepath
360
- frames_to_use = gr.Slider(label="Frames to use from input video", minimum=9, maximum=MAX_NUM_FRAMES, value=9, step=8, info="Number of initial frames to use for conditioning/transformation. Must be N*8+1.")
361
- v2v_prompt = gr.Textbox(label="Prompt", value="Change the style to cinematic anime", lines=3)
362
- v2v_button = gr.Button("Generate Video-to-Video", variant="primary")
363
-
364
- duration_input = gr.Slider(
365
- label="Video Duration (seconds)",
366
- minimum=0.3,
367
- maximum=8.5,
368
- value=2,
369
- step=0.1,
370
- info=f"Target video duration (0.3s to 8.5s)"
371
- )
372
- improve_texture = gr.Checkbox(label="Improve Texture (multi-scale)", value=True, info="Uses a two-pass generation for better quality, but is slower. Recommended for final output.")
373
-
374
- with gr.Column():
375
- output_video = gr.Video(label="Generated Video", interactive=False)
376
- # gr.DeepLinkButton()
377
-
378
- with gr.Accordion("Advanced settings", open=False):
379
- mode = gr.Dropdown(["text-to-video", "image-to-video", "video-to-video"], label="task", value="image-to-video", visible=False)
380
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value="worst quality, inconsistent motion, blurry, jittery, distorted", lines=2)
381
- with gr.Row():
382
- seed_input = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=2**32-1)
383
- randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
384
- with gr.Row():
385
- guidance_scale_input = gr.Slider(label="Guidance Scale (CFG)", minimum=1.0, maximum=10.0, value=PIPELINE_CONFIG_YAML.get("first_pass", {}).get("guidance_scale", 1.0), step=0.1, info="Controls how much the prompt influences the output. Higher values = stronger influence.")
386
- with gr.Row():
387
- height_input = gr.Slider(label="Height", value=512, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
388
- width_input = gr.Slider(label="Width", value=704, step=32, minimum=MIN_DIM_SLIDER, maximum=MAX_IMAGE_SIZE, info="Must be divisible by 32.")
389
-
390
-
391
- # --- Event handlers for updating dimensions on upload ---
392
- def handle_image_upload_for_dims(image_filepath, current_h, current_w):
393
- if not image_filepath: # Image cleared or no image initially
394
- # Keep current slider values if image is cleared or no input
395
- return gr.update(value=current_h), gr.update(value=current_w)
396
- try:
397
- img = Image.open(image_filepath)
398
- orig_w, orig_h = img.size
399
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
400
- return gr.update(value=new_h), gr.update(value=new_w)
401
- except Exception as e:
402
- print(f"Error processing image for dimension update: {e}")
403
- # Keep current slider values on error
404
- return gr.update(value=current_h), gr.update(value=current_w)
405
-
406
- def handle_video_upload_for_dims(video_filepath, current_h, current_w):
407
- if not video_filepath: # Video cleared or no video initially
408
- return gr.update(value=current_h), gr.update(value=current_w)
409
- try:
410
- # Ensure video_filepath is a string for os.path.exists and imageio
411
- video_filepath_str = str(video_filepath)
412
- if not os.path.exists(video_filepath_str):
413
- print(f"Video file path does not exist for dimension update: {video_filepath_str}")
414
- return gr.update(value=current_h), gr.update(value=current_w)
415
-
416
- orig_w, orig_h = -1, -1
417
- with imageio.get_reader(video_filepath_str) as reader:
418
- meta = reader.get_meta_data()
419
- if 'size' in meta:
420
- orig_w, orig_h = meta['size']
421
- else:
422
- # Fallback: read first frame if 'size' not in metadata
423
- try:
424
- first_frame = reader.get_data(0)
425
- # Shape is (h, w, c) for frames
426
- orig_h, orig_w = first_frame.shape[0], first_frame.shape[1]
427
- except Exception as e_frame:
428
- print(f"Could not get video size from metadata or first frame: {e_frame}")
429
- return gr.update(value=current_h), gr.update(value=current_w)
430
-
431
- if orig_w == -1 or orig_h == -1: # If dimensions couldn't be determined
432
- print(f"Could not determine dimensions for video: {video_filepath_str}")
433
- return gr.update(value=current_h), gr.update(value=current_w)
434
-
435
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
436
- return gr.update(value=new_h), gr.update(value=new_w)
437
- except Exception as e:
438
- # Log type of video_filepath for debugging if it's not a path-like string
439
- print(f"Error processing video for dimension update: {e} (Path: {video_filepath}, Type: {type(video_filepath)})")
440
- return gr.update(value=current_h), gr.update(value=current_w)
441
-
442
-
443
- image_i2v.upload(
444
- fn=handle_image_upload_for_dims,
445
- inputs=[image_i2v, height_input, width_input],
446
- outputs=[height_input, width_input]
447
- )
448
- video_v2v.upload(
449
- fn=handle_video_upload_for_dims,
450
- inputs=[video_v2v, height_input, width_input],
451
- outputs=[height_input, width_input]
452
  )
453
-
454
- image_tab.select(
455
- fn=update_task_image,
456
- outputs=[mode]
 
 
 
 
 
 
457
  )
458
- text_tab.select(
459
- fn=update_task_text,
460
- outputs=[mode]
 
 
 
 
 
 
 
 
461
  )
462
-
463
- t2v_inputs = [t2v_prompt, negative_prompt_input, image_n_hidden, video_n_hidden,
464
- height_input, width_input, mode,
465
- duration_input, frames_to_use,
466
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
467
-
468
- i2v_inputs = [i2v_prompt, negative_prompt_input, image_i2v, video_i_hidden,
469
- height_input, width_input, mode,
470
- duration_input, frames_to_use,
471
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
472
-
473
- v2v_inputs = [v2v_prompt, negative_prompt_input, image_v_hidden, video_v2v,
474
- height_input, width_input, mode,
475
- duration_input, frames_to_use,
476
- seed_input, randomize_seed_input, guidance_scale_input, improve_texture]
477
 
478
- t2v_button.click(fn=generate, inputs=t2v_inputs, outputs=[output_video, seed_input], api_name="text_to_video")
479
- i2v_button.click(fn=generate, inputs=i2v_inputs, outputs=[output_video, seed_input], api_name="image_to_video")
480
- v2v_button.click(fn=generate, inputs=v2v_inputs, outputs=[output_video, seed_input], api_name="video_to_video")
 
481
 
482
- if __name__ == "__main__":
483
- if os.path.exists(models_dir) and os.path.isdir(models_dir):
484
- print(f"Model directory: {Path(models_dir).resolve()}")
485
-
486
- demo.queue().launch(debug=True, share=False, mcp_server=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  from PIL import Image
12
  from huggingface_hub import hf_hub_download
13
  import shutil
14
+ from diffusers import LTXImageToVideoPipeline # o LTXConditionPipeline según versión
15
+
16
+ # -------------------------
17
+ # 📦 Descargar y cargar modelo
18
+ # -------------------------
19
+ MODEL_ID = "Lightricks/LTX-Video"
20
+ CHECKPOINT_FILE = "ltxv-2b-0.9.6-distilled.safetensors"
21
+
22
+ local_ckpt = hf_hub_download(repo_id=MODEL_ID, filename=CHECKPOINT_FILE, cache_dir="./models")
23
+ pipe = LTXImageToVideoPipeline.from_pretrained(
24
+ MODEL_ID, revision="main", safety_checker=None, torch_dtype=torch.bfloat16
25
+ ).to("cuda")
26
+
27
+ # -------------------------
28
+ # 🔧 Funciones de generación
29
+ # -------------------------
30
+ def txt2vid(prompt, height, width, num_frames, steps, seed=None):
31
+ seed = seed or random.randint(0, 2**32 - 1)
32
+ generator = torch.Generator(device="cuda").manual_seed(seed)
33
+ out = pipe(
34
+ prompt=prompt,
35
+ height=height,
36
+ width=width,
37
+ num_frames=num_frames,
38
+ num_inference_steps=steps,
39
+ generator=generator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  )
41
+ vid = out.videos[0]
42
+ tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
43
+ imageio.mimwrite(tmp.name, vid, fps=25)
44
+ return tmp.name
45
+
46
+ def img2vid(image, prompt, height, width, num_frames, steps, seed=None):
47
+ img = Image.fromarray(image)
48
+ cond_vid = imageio.mimwrite(
49
+ tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name,
50
+ [np.array(img)] * 1, fps=1
51
  )
52
+ seed = seed or random.randint(0, 2**32 - 1)
53
+ generator = torch.Generator(device="cuda").manual_seed(seed)
54
+ out = pipe(
55
+ prompt=prompt,
56
+ height=height,
57
+ width=width,
58
+ num_frames=num_frames,
59
+ num_inference_steps=steps,
60
+ generator=generator,
61
+ conditioning_media_paths=[cond_vid],
62
+ conditioning_start_frames=[0]
63
  )
64
+ vid = out.videos[0]
65
+ tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
66
+ imageio.mimwrite(tmp.name, vid, fps=25)
67
+ return tmp.name
 
 
 
 
 
 
 
 
 
 
 
68
 
69
+ # -------------------------
70
+ # 🎨 Interfaz Gradio
71
+ # -------------------------
72
+ css = """body { background-color:#111; color:#eee } .gradio-container { max-width:800px; }"""
73
 
74
+ with gr.Blocks(css=css) as demo:
75
+ gr.Markdown("# LTX‑Video 2B Distilled (Gratuito)")
76
+
77
+ with gr.Tab("Text → Video"):
78
+ t_prompt = gr.Textbox(label="Prompt", value="A serene landscape at sunrise")
79
+ t_h = gr.Slider(128, 720, value=512, step=32, label="Height")
80
+ t_w = gr.Slider(128, 1280, value=768, step=32, label="Width")
81
+ t_f = gr.Slider(9, 257, value=65, step=8, label="Num Frames")
82
+ t_s = gr.Slider(4, 16, value=8, step=1, label="Steps")
83
+ t_seed = gr.Number(label="Seed (opcional)", value=0)
84
+ t_btn = gr.Button("Generate")
85
+ t_out = gr.Video()
86
+ t_btn.click(fn=txt2vid, inputs=[t_prompt, t_h, t_w, t_f, t_s, t_seed], outputs=t_out)
87
+
88
+ with gr.Tab("Image → Video"):
89
+ i_img = gr.Image(type="numpy")
90
+ i_prompt = gr.Textbox(label="Prompt", value="A cute fox in the snow")
91
+ i_h = gr.Slider(128, 720, value=512, step=32, label="Height")
92
+ i_w = gr.Slider(128, 1280, value=768, step=32, label="Width")
93
+ i_f = gr.Slider(9, 257, value=65, step=8, label="Num Frames")
94
+ i_s = gr.Slider(4, 16, value=8, step=1, label="Steps")
95
+ i_seed = gr.Number(label="Seed (opcional)", value=0)
96
+ i_btn = gr.Button("Generate")
97
+ i_out = gr.Video()
98
+ i_btn.click(fn=img2vid, inputs=[i_img, i_prompt, i_h, i_w, i_f, i_s, i_seed], outputs=i_out)
99
+
100
+ gr.Markdown("**Modelo:** ltxv‑2b‑0.9.6‑distilled • resolución múltiplo de 32, frames múltiplo de 8+1 :contentReference[oaicite:1]{index=1}")
101
+
102
+ demo.launch()