test-test / app.py
heboya8's picture
Update app.py
2b9362b verified
raw
history blame
1.23 kB
import gradio as gr
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video
# Initialize the diffusion pipeline
pipe = DiffusionPipeline.from_pretrained(
"heboya8/text2video-test",
torch_dtype=torch.float16,
variant="fp16"
)
# Optimize for GPU memory
pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
def generate_video(prompt):
try:
# Generate video frames
video_frames = pipe(
prompt,
num_inference_steps=1,
num_frames=1
).frames
# Export frames to video file
video_path = export_to_video(video_frames, output_video_path="output_video.mp4")
return video_path
except Exception as e:
return f"Error generating video: {str(e)}"
# Create Gradio interface
interface = gr.Interface(
fn=generate_video,
inputs=gr.Textbox(
label="Enter your prompt",
placeholder="e.g., a flower in a garden"
),
outputs=gr.Video(label="Generated Video"),
title="Text-to-Video Generator",
description="Enter a text prompt to generate a video using the diffusion model."
)
# Launch the app
interface.launch()