Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -122,7 +122,8 @@ def process_video_for_canny(video, width, height):
|
|
| 122 |
|
| 123 |
return canny_video
|
| 124 |
|
| 125 |
-
def process_input_video(reference_video, width, height
|
|
|
|
| 126 |
"""
|
| 127 |
Process the input video for canny edges and return both processed video and preview.
|
| 128 |
"""
|
|
@@ -173,7 +174,7 @@ def generate_video(
|
|
| 173 |
seed=0,
|
| 174 |
randomize_seed=False,
|
| 175 |
control_type="canny",
|
| 176 |
-
progress=gr.Progress()
|
| 177 |
):
|
| 178 |
try:
|
| 179 |
# Initialize models if needed
|
|
@@ -195,7 +196,6 @@ def generate_video(
|
|
| 195 |
temporal_compression = pipeline.vae_temporal_compression_ratio
|
| 196 |
num_frames = ((num_frames - 1) // temporal_compression) * temporal_compression + 1
|
| 197 |
|
| 198 |
-
progress(0.1, desc="Preparing processed video...")
|
| 199 |
|
| 200 |
# Use pre-processed video frames if available (for canny), otherwise process on-demand
|
| 201 |
print("######## control_video ", control_video)
|
|
@@ -213,7 +213,6 @@ def generate_video(
|
|
| 213 |
processed_video = read_video(processed_video)
|
| 214 |
print(type(processed_video))
|
| 215 |
|
| 216 |
-
progress(0.2, desc="Preparing generation parameters...")
|
| 217 |
|
| 218 |
# Calculate downscaled dimensions
|
| 219 |
downscale_factor = 2 / 3
|
|
@@ -223,7 +222,6 @@ def generate_video(
|
|
| 223 |
downscaled_height, downscaled_width, pipeline.vae_temporal_compression_ratio
|
| 224 |
)
|
| 225 |
|
| 226 |
-
progress(0.3, desc="Generating video at lower resolution...")
|
| 227 |
|
| 228 |
# 1. Generate video at smaller resolution
|
| 229 |
latents = pipeline(
|
|
@@ -241,7 +239,6 @@ def generate_video(
|
|
| 241 |
output_type="latent",
|
| 242 |
).frames
|
| 243 |
|
| 244 |
-
progress(0.6, desc="Upscaling video...")
|
| 245 |
|
| 246 |
# 2. Upscale generated video
|
| 247 |
upscaled_height, upscaled_width = downscaled_height * 2, downscaled_width * 2
|
|
@@ -250,7 +247,6 @@ def generate_video(
|
|
| 250 |
output_type="latent"
|
| 251 |
).frames
|
| 252 |
|
| 253 |
-
progress(0.8, desc="Final denoising and processing...")
|
| 254 |
|
| 255 |
# 3. Denoise the upscaled video
|
| 256 |
final_video_frames_np = pipeline(
|
|
@@ -270,7 +266,6 @@ def generate_video(
|
|
| 270 |
output_type="np",
|
| 271 |
).frames[0]
|
| 272 |
|
| 273 |
-
progress(0.9, desc="Finalizing output...")
|
| 274 |
|
| 275 |
|
| 276 |
# Export to temporary file
|
|
@@ -281,8 +276,7 @@ def generate_video(
|
|
| 281 |
progress((frame_idx + 1) / len(video_uint8_frames), desc="Encoding video frames...")
|
| 282 |
writer.append_data(frame_data)
|
| 283 |
|
| 284 |
-
|
| 285 |
-
progress(1.0, desc="Complete!")
|
| 286 |
|
| 287 |
return output_filename, seed
|
| 288 |
|
|
|
|
| 122 |
|
| 123 |
return canny_video
|
| 124 |
|
| 125 |
+
def process_input_video(reference_video, width, height,
|
| 126 |
+
progress=gr.Progress(track_tqdm=True)):
|
| 127 |
"""
|
| 128 |
Process the input video for canny edges and return both processed video and preview.
|
| 129 |
"""
|
|
|
|
| 174 |
seed=0,
|
| 175 |
randomize_seed=False,
|
| 176 |
control_type="canny",
|
| 177 |
+
progress=gr.Progress(track_tqdm=True)
|
| 178 |
):
|
| 179 |
try:
|
| 180 |
# Initialize models if needed
|
|
|
|
| 196 |
temporal_compression = pipeline.vae_temporal_compression_ratio
|
| 197 |
num_frames = ((num_frames - 1) // temporal_compression) * temporal_compression + 1
|
| 198 |
|
|
|
|
| 199 |
|
| 200 |
# Use pre-processed video frames if available (for canny), otherwise process on-demand
|
| 201 |
print("######## control_video ", control_video)
|
|
|
|
| 213 |
processed_video = read_video(processed_video)
|
| 214 |
print(type(processed_video))
|
| 215 |
|
|
|
|
| 216 |
|
| 217 |
# Calculate downscaled dimensions
|
| 218 |
downscale_factor = 2 / 3
|
|
|
|
| 222 |
downscaled_height, downscaled_width, pipeline.vae_temporal_compression_ratio
|
| 223 |
)
|
| 224 |
|
|
|
|
| 225 |
|
| 226 |
# 1. Generate video at smaller resolution
|
| 227 |
latents = pipeline(
|
|
|
|
| 239 |
output_type="latent",
|
| 240 |
).frames
|
| 241 |
|
|
|
|
| 242 |
|
| 243 |
# 2. Upscale generated video
|
| 244 |
upscaled_height, upscaled_width = downscaled_height * 2, downscaled_width * 2
|
|
|
|
| 247 |
output_type="latent"
|
| 248 |
).frames
|
| 249 |
|
|
|
|
| 250 |
|
| 251 |
# 3. Denoise the upscaled video
|
| 252 |
final_video_frames_np = pipeline(
|
|
|
|
| 266 |
output_type="np",
|
| 267 |
).frames[0]
|
| 268 |
|
|
|
|
| 269 |
|
| 270 |
|
| 271 |
# Export to temporary file
|
|
|
|
| 276 |
progress((frame_idx + 1) / len(video_uint8_frames), desc="Encoding video frames...")
|
| 277 |
writer.append_data(frame_data)
|
| 278 |
|
| 279 |
+
|
|
|
|
| 280 |
|
| 281 |
return output_filename, seed
|
| 282 |
|