cnph001's picture
Update app.py
33f90f5 verified
import spaces
import gradio as gr
import edge_tts
import asyncio
import tempfile
import os
import re # Import the regular expression module
# Get all available voices
async def get_voices():
voices = await edge_tts.list_voices()
return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
# Text-to-speech function for a single paragraph
async def paragraph_to_speech(text, voice, rate, pitch):
voice1 ="vi-VN-NamMinhNeural - vi-VN (Male)" #good for reading
voice1F ="en-US-EmmaMultilingualNeural - en-US (Female)"
voice2 ="ko-KR-HyunsuMultilingualNeural - ko-KR (Male)"
voice2F ="de-DE-SeraphinaMultilingualNeural - de-DE (Female)"
voice3 = "pt-BR-ThalitaMultilingualNeural - pt-BR (Female)" #Child
voice_it_M = "it-IT-GiuseppeMultilingualNeural - it-IT (Male)"
voice_de_M = "de-DE-FlorianMultilingualNeural - de-DE (Male)"
voice_fr_M = "fr-FR-RemyMultilingualNeural - fr-FR (Male)"
voice_fr_F = "fr-FR-VivienneMultilingualNeural - fr-FR (Female)"
voice_en_US_M = "en-US-BrianMultilingualNeural - en-US (Male)"
voice_en_US_F2 = "en-US-AvaMultilingualNeural - en-US (Female)"
if not text.strip():
return None
prefix_pattern = re.compile(r"^(1F|1M|2F|2M|1C|3M|3F)([-]?\d*)")
match = prefix_pattern.match(text)
voice_short_name = (voice or default_voice).split(" - ")[0]
extracted_pitch = None
text2 = text
if match:
prefix = match.group(1)
pitch_mod_str = match.group(2)
text2 = text[len(match.group(0)):] # Remove the prefix and optional pitch modifier
print(f">>>Processing text: '{text}'")
if prefix == "1F":
voice_short_name = voice1F.split(" - ")[0]
elif prefix == "1M":
voice_short_name = voice1.split(" - ")[0]
elif prefix == "2F":
voice_short_name = voice2F.split(" - ")[0]
elif prefix == "2M":
voice_short_name = voice2.split(" - ")[0]
elif prefix == "1C":
voice_short_name = voice3.split(" - ")[0]
elif prefix == "3M":
voice_short_name = voice_it_M.split(" - ")[0] # Using Italian Male for 3M
elif prefix == "3F":
voice_short_name = voice_fr_F.split(" - ")[0] # Using French Female for 3F
if pitch_mod_str:
try:
extracted_pitch = int(pitch_mod_str)
except ValueError:
print(f"Warning: Invalid pitch modifier '{pitch_mod_str}'")
rate_str = f"{rate:+d}%"
current_pitch = pitch
if extracted_pitch is not None:
current_pitch += extracted_pitch
print(f"Applying pitch modification: {extracted_pitch}Hz, new pitch: {current_pitch}Hz")
elif (voice_short_name == voice3.split(" - ")[0]):
current_pitch = 70 # Default pitch for the child voice
pitch_str = f"{current_pitch:+d}Hz"
try:
communicate = edge_tts.Communicate(text2, voice_short_name, rate=rate_str, pitch=pitch_str)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
tmp_path = tmp_file.name
await communicate.save(tmp_path)
return tmp_path
except Exception as e:
print(f"Edge TTS error processing '{processed_text}': {e}")
return None
#return None
# Main text-to-speech function that processes paragraphs
async def text_to_speech(text, voice, rate, pitch):
if not text.strip():
return None, gr.Warning("Please enter text to convert.")
if not voice:
return None, gr.Warning("Please select a voice.")
# Split by quote marks
paragraphs = [p.strip() for p in re.split(r'"', text) if p.strip()]
audio_files = []
for paragraph in paragraphs:
audio_path = await paragraph_to_speech(paragraph, voice, rate, pitch)
if audio_path:
audio_files.append(audio_path)
if not audio_files:
return None, None # No audio generated
# Combine audio files if there are multiple paragraphs
if len(audio_files) == 1:
return audio_files[0], None
else:
# Simple concatenation for now - consider using a proper audio editing library for smoother transitions
combined_audio_path = tempfile.mktemp(suffix=".mp3")
with open(combined_audio_path, 'wb') as outfile:
for filename in audio_files:
with open(filename, 'rb') as infile:
outfile.write(infile.read())
os.remove(filename) # Clean up individual files
return combined_audio_path, None
# Gradio interface function
@spaces.GPU
def tts_interface(text, voice, rate, pitch):
audio, warning = asyncio.run(text_to_speech(text, voice, rate, pitch))
return audio, warning
# Create Gradio application
import gradio as gr
async def create_demo():
voices = await get_voices()
default_voice = "vi-VN-HoaiMyNeural - vi-VN (Female)" # 👈 Pick one of the available voices
description = """
Default = vi-VN-HoaiMyNeural - vi-VN (Female),
other voices 1F:en-US-EmmaMultilingualNeural - en-US (Female),
1M:vi-VN-NamMinhNeural - vi-VN (Male),
2F:de-DE-SeraphinaMultilingualNeural - de-DE (Female),
2M:ko-KR-HyunsuMultilingualNeural - ko-KR (Male),
1C:pt-BR-ThalitaMultilingualNeural - pt-BR (Female),
3M:it-IT-GiuseppeMultilingualNeural - it-IT (Male),
3F:fr-FR-VivienneMultilingualNeural - fr-FR (Female)
You can add a pitch modifier after the voice prefix (e.g., 1M-15 for +15Hz pitch).
Enter your text, select a voice, and adjust the speech rate and pitch.
The application will process your text segment by segment based on quote marks.
"""
demo = gr.Interface(
fn=tts_interface,
inputs=[
gr.Textbox(label="Input Text", lines=5, placeholder='Separate dialogue with quote marks. Add voice prefix (e.g., 1M, 1F) before dialogue. You can also add a pitch modifier like 1M-20 "Hello!"'),
gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Default Voice", value=default_voice),
gr.Slider(minimum=-50, maximum=50, value=0, label="Default Speech Rate Adjustment (%)", step=1),
gr.Slider(minimum=-20, maximum=100, value=0, label="Default Pitch Adjustment (Hz)", step=1)
],
outputs=[
gr.Audio(label="Generated Audio", type="filepath"),
gr.Markdown(label="Warning", visible=False)
],
title="Vietnamese TTS - all AI voices & pitch changes",
description=description,
article="Process text segments with voice prefixes and optional pitch modifiers.",
analytics_enabled=False,
allow_flagging=False
)
return demo
# Run the application
if __name__ == "__main__":
demo = asyncio.run(create_demo())
demo.launch()