Rasmus Lellep commited on
Commit
86125cc
·
1 Parent(s): d9297af

gradio changes

Browse files
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. app.py +1 -6
  3. app_local.py +0 -1
  4. requirements.txt +0 -1
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app.py CHANGED
@@ -50,8 +50,6 @@ os.chmod("ffmpeg", st.st_mode | stat.S_IEXEC)
50
  # This will trigger downloading model
51
  print("Downloading Coqui XTTS V2")
52
 
53
- #model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
54
- #ModelManager().download_model(model_name)
55
  model_folder = "model/"
56
  config_file = "config.json"
57
  if not os.path.exists(model_folder + config_file):
@@ -87,7 +85,6 @@ model.load_checkpoint(
87
  eval=True,
88
  use_deepspeed=False,
89
  )
90
- #model.cuda()
91
 
92
  # This is for debugging purposes only
93
  DEVICE_ASSERT_DETECTED = 0
@@ -683,19 +680,17 @@ with gr.Blocks(analytics_enabled=False) as demo:
683
  "hu",
684
  "hi"
685
  ],
686
- max_choices=1,
687
  value="et",
688
  )
689
  ref_gr = gr.Audio(
690
  label="Reference Audio",
691
- info="Click on the ✎ button to upload your own target speaker audio",
692
  type="filepath",
693
  value="examples/LJ001-0030.wav",
694
  )
695
  mic_gr = gr.Audio(
696
  source="microphone",
697
  type="filepath",
698
- info="Use your microphone to record audio",
699
  label="Use Microphone for Reference",
700
  )
701
  use_mic_gr = gr.Checkbox(
 
50
  # This will trigger downloading model
51
  print("Downloading Coqui XTTS V2")
52
 
 
 
53
  model_folder = "model/"
54
  config_file = "config.json"
55
  if not os.path.exists(model_folder + config_file):
 
85
  eval=True,
86
  use_deepspeed=False,
87
  )
 
88
 
89
  # This is for debugging purposes only
90
  DEVICE_ASSERT_DETECTED = 0
 
680
  "hu",
681
  "hi"
682
  ],
683
+ multiselect=False,
684
  value="et",
685
  )
686
  ref_gr = gr.Audio(
687
  label="Reference Audio",
 
688
  type="filepath",
689
  value="examples/LJ001-0030.wav",
690
  )
691
  mic_gr = gr.Audio(
692
  source="microphone",
693
  type="filepath",
 
694
  label="Use Microphone for Reference",
695
  )
696
  use_mic_gr = gr.Checkbox(
app_local.py CHANGED
@@ -21,7 +21,6 @@ def load_model():
21
  return XTTS_MODEL
22
 
23
  model = load_model()
24
- #model.cuda()
25
 
26
  def predict(sentence, language, reference_clip):
27
  if not reference_clip or not reference_clip.split('.')[-1] in ['mp3', 'wav']:
 
21
  return XTTS_MODEL
22
 
23
  model = load_model()
 
24
 
25
  def predict(sentence, language, reference_clip):
26
  if not reference_clip or not reference_clip.split('.')[-1] in ['mp3', 'wav']:
requirements.txt CHANGED
@@ -27,7 +27,6 @@ pandas>=1.4,<2.0
27
  # deps for training
28
  matplotlib>=3.7.0
29
  # coqui stack
30
- trainer>=0.0.36
31
  coqui-tts-trainer>=0.2.0,<0.3.0
32
  # config management
33
  coqpit-config>=0.1.1,<0.2.0
 
27
  # deps for training
28
  matplotlib>=3.7.0
29
  # coqui stack
 
30
  coqui-tts-trainer>=0.2.0,<0.3.0
31
  # config management
32
  coqpit-config>=0.1.1,<0.2.0