kiwhansong commited on
Commit
5531e62
·
1 Parent(s): 5eea811
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -55,8 +55,8 @@ gif_paths = []
55
  for idx, video, path in zip(
56
  range(len(video_list)), video_list, metadata["video_paths"]
57
  ):
58
- indices = torch.linspace(0, video.size(0) - 1, 8, dtype=torch.long)
59
- gif_paths.append(export_to_gif(video[indices], fps=4))
60
 
61
 
62
  # pylint: disable-next=no-value-for-parameter
@@ -115,7 +115,6 @@ def single_image_to_long_video(
115
  return export_to_video(gen_video[0].detach().cpu(), fps=fps)
116
 
117
 
118
- @spaces.GPU(duration=100)
119
  @torch.autocast("cuda")
120
  @torch.no_grad()
121
  def any_images_to_short_video(
@@ -187,7 +186,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="teal")) as demo:
187
  gr.Markdown(
188
  """
189
  ## Demo 1: Any Number of Images → Short 2-second Video
190
- > #### **TL;DR:** _Diffusion Forcing Transformer is a flexible model that can generate videos given variable number of context frames._
191
  """
192
  )
193
 
@@ -313,7 +312,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="teal")) as demo:
313
  gr.Markdown(
314
  """
315
  ## Demo 2: Single Image → Long 20-second Video
316
- > #### **TL;DR:** _Diffusion Forcing Transformer, with History Guidance, can stably generate long videos, via sliding window rollouts and interpolation._
317
  """
318
  )
319
 
@@ -404,7 +403,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="teal")) as demo:
404
  gr.Markdown(
405
  """
406
  ## Demo 3: Single Image → Extremely Long Video
407
- > #### **TL;DR:** _TODO._
408
  """
409
  )
410
 
 
55
  for idx, video, path in zip(
56
  range(len(video_list)), video_list, metadata["video_paths"]
57
  ):
58
+ indices = torch.linspace(0, video.size(0) - 1, 16, dtype=torch.long)
59
+ gif_paths.append(export_to_gif(video[indices], fps=8))
60
 
61
 
62
  # pylint: disable-next=no-value-for-parameter
 
115
  return export_to_video(gen_video[0].detach().cpu(), fps=fps)
116
 
117
 
 
118
  @torch.autocast("cuda")
119
  @torch.no_grad()
120
  def any_images_to_short_video(
 
186
  gr.Markdown(
187
  """
188
  ## Demo 1: Any Number of Images → Short 2-second Video
189
+ > #### _Diffusion Forcing Transformer is a flexible model that can generate videos given variable number of context frames._
190
  """
191
  )
192
 
 
312
  gr.Markdown(
313
  """
314
  ## Demo 2: Single Image → Long 20-second Video
315
+ > #### _Diffusion Forcing Transformer, with History Guidance, can generate long videos via sliding window rollouts and temporal super-resolution._
316
  """
317
  )
318
 
 
403
  gr.Markdown(
404
  """
405
  ## Demo 3: Single Image → Extremely Long Video
406
+ > #### _TODO._
407
  """
408
  )
409