|
import gradio as gr |
|
import torch |
|
from diffusers import AutoencoderKLWan, WanPipeline |
|
from diffusers.utils import export_to_video |
|
import spaces |
|
|
|
@spaces.GPU |
|
def load_pipeline(): |
|
model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" |
|
print("Loading model. This may take several minutes...") |
|
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) |
|
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16) |
|
pipe.to("cuda") |
|
print("Model loaded successfully.") |
|
return pipe |
|
|
|
|
|
PIPELINE = load_pipeline() |
|
|
|
def generate_video(prompt, negative_prompt=""): |
|
|
|
output = PIPELINE( |
|
prompt=prompt, |
|
negative_prompt=negative_prompt, |
|
height=480, |
|
width=832, |
|
num_frames=81, |
|
guidance_scale=5.0 |
|
).frames[0] |
|
|
|
video_path = "output.mp4" |
|
export_to_video(output, video_path, fps=15) |
|
return video_path |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_video, |
|
inputs=[ |
|
gr.Textbox(label="Prompt", placeholder="Enter your video prompt here"), |
|
gr.Textbox(label="Negative Prompt", placeholder="Optional negative prompt", value="") |
|
], |
|
outputs=gr.Video(label="Generated Video"), |
|
title="Wan2.1-T2V-1.3B Video Generator", |
|
description="Generate 480p videos using the Wan2.1-T2V-1.3B diffusers pipeline with ZeroGPU support." |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|