nezihtopaloglu commited on
Commit
49f5320
·
verified ·
1 Parent(s): 3c95b66

Trying to solve ZeroGPU runtime issues

Browse files
Files changed (1) hide show
  1. app.py +7 -0
app.py CHANGED
@@ -8,9 +8,14 @@ import moviepy.editor as mp
8
  import numpy as np
9
  import os
10
  from PIL import Image, ImageDraw, ImageFont
 
 
 
11
 
 
12
 
13
 
 
14
  def generate_script(topic):
15
  """Uses an open-source LLM to generate an engaging script of 8-10 minutes."""
16
  llm = pipeline("text-generation", model="agentica-org/DeepScaleR-1.5B-Preview")
@@ -67,12 +72,14 @@ def estimate_chunk_durations(text, words_per_second=2.5, min_sec=5, max_sec=7):
67
  chunks.append(" ".join(current_chunk))
68
  return chunks
69
 
 
70
  def generate_speech(text):
71
  tts = TTS("tts_models/en/ljspeech/glow-tts")
72
  wav_path = "speech.wav"
73
  tts.tts_to_file(text=text, file_path=wav_path)
74
  return wav_path
75
 
 
76
  def generate_images(chunks, image_size=(640, 480), use_diffusion=True, num_steps=40):
77
  image_paths = []
78
  if use_diffusion:
 
8
  import numpy as np
9
  import os
10
  from PIL import Image, ImageDraw, ImageFont
11
+ import shlex
12
+ import subprocess
13
+ import spaces
14
 
15
+ subprocess.run(shlex.split('pip install wheel/torchmcubes-0.1.0-cp310-cp310-linux_x86_64.whl'))
16
 
17
 
18
+ @spaces.GPU
19
  def generate_script(topic):
20
  """Uses an open-source LLM to generate an engaging script of 8-10 minutes."""
21
  llm = pipeline("text-generation", model="agentica-org/DeepScaleR-1.5B-Preview")
 
72
  chunks.append(" ".join(current_chunk))
73
  return chunks
74
 
75
+ @spaces.GPU
76
  def generate_speech(text):
77
  tts = TTS("tts_models/en/ljspeech/glow-tts")
78
  wav_path = "speech.wav"
79
  tts.tts_to_file(text=text, file_path=wav_path)
80
  return wav_path
81
 
82
+ @spaces.GPU
83
  def generate_images(chunks, image_size=(640, 480), use_diffusion=True, num_steps=40):
84
  image_paths = []
85
  if use_diffusion: