cnph001 commited on
Commit
4bccf88
·
verified ·
1 Parent(s): b430ff0

Update app.py

Browse files

Testing youtube transcrip TTS

Files changed (1) hide show
  1. app.py +110 -158
app.py CHANGED
@@ -1,10 +1,11 @@
 
1
  import spaces
2
  import gradio as gr
3
  import edge_tts
4
  import asyncio
5
  import tempfile
6
  import os
7
- import re # Import the regular expression module
8
  from pathlib import Path
9
  from pydub import AudioSegment
10
 
@@ -14,11 +15,11 @@ def get_silence(duration_ms=1000):
14
  duration=duration_ms,
15
  frame_rate=24000 # 24kHz sampling rate
16
  )
17
-
18
  # Set audio parameters
19
  silent_audio = silent_audio.set_channels(1) # Mono
20
  silent_audio = silent_audio.set_sample_width(4) # 32-bit (4 bytes per sample)
21
-
22
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
23
  # Export with specific bitrate and codec parameters
24
  silent_audio.export(
@@ -39,178 +40,131 @@ async def get_voices():
39
  voices = await edge_tts.list_voices()
40
  return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
41
 
42
- # Text-to-speech function for a single paragraph with SS handling
43
- async def paragraph_to_speech(text, voice, rate, pitch):
44
- voice1 = "en-AU-WilliamNeural - en-AU (Male)"
45
- voice1F ="en-GB-SoniaNeural - en-GB (Female)"
46
- voice2 = "en-GB-RyanNeural - en-GB (Male)"
47
- voice2F = "en-US-JennyNeural - en-US (Female)"
48
- voice3 ="en-US-BrianMultilingualNeural - en-US (Male)" #good for reading
49
- voice3F = "en-HK-YanNeural - en-HK (Female)"
50
- voice4 = "en-GB-ThomasNeural - en-GB (Male)"
51
- voice4F ="en-US-EmmaNeural - en-US (Female)"
52
- voice5 = "en-GB-RyanNeural - en-GB (Male)" #Old Man
53
- voice6 = "en-GB-MaisieNeural - en-GB (Female)" #Child
54
-
55
- if not text.strip():
56
- return None, [] # Return None for audio path and empty list for silence
57
-
58
- audio_segments = []
59
- silence_durations = []
60
- parts = re.split(r'(SS\d+\.?\d*)', text) #this one separtate the SS## tag if any in the text.
61
- for part in parts:
62
- if re.match(r'SS\d+\.?\d*', part): #Check if there is Silence tag
63
- # At the top of your file:
64
- #SILENCE_PATH = Path(__file__).parent.absolute() / "Silence.mp3"
65
- # At the top of your file (assuming you uploaded "Silence.mp3" to root)
66
- #SILENCE_PATH = Path(__file__).parent.absolute() / "Silence.mp3"
67
- # At the top of your file:
68
- #SILENCE_PATH = Path(__file__).parent.absolute() / "static" / "intro.mp3"
69
- #if SILENCE_PATH.exists():
70
- # audio_segments.append(str(SILENCE_PATH))
71
- # print(f"Silence.mp3 file found at {SILENCE_PATH} and is inserted")
72
- #else:
73
- silence_duration = float(part[2:]) * 1000 # Convert to milliseconds
74
- print(f"Silence.mp3 file NOT FOUND")
75
- silence_file_path = get_silence(silence_duration) # Store the returned filename
76
- audio_segments.append(silence_file_path) # Use the stored filename
77
- elif part.strip():
78
- detect=0
79
- processed_text = part
80
- current_voice = voice
81
- current_rate = rate
82
- current_pitch = pitch
83
- if part.startswith("1F"):
84
- detect=1
85
- current_voice = voice1F.split(" - ")[0]
86
- current_pitch = 25
87
- elif part.startswith("2F"):
88
- detect=1
89
- current_voice = voice2F.split(" - ")[0]
90
- elif part.startswith("3F"):
91
- detect=1
92
- current_voice = voice3F.split(" - ")[0]
93
- elif part.startswith("4F"):
94
- detect=1
95
- current_voice = voice4F.split(" - ")[0]
96
- elif part.startswith("1M"):
97
- detect=1
98
- current_voice = voice1.split(" - ")[0]
99
- elif part.startswith("2M"):
100
- detect=1
101
- current_voice = voice2.split(" - ")[0]
102
- elif part.startswith("3M"):
103
- detect=1
104
- current_voice = voice3.split(" - ")[0]
105
- elif part.startswith("4M"):
106
- detect=1
107
- current_voice = voice4.split(" - ")[0]
108
- elif part.startswith("1O"): # Old man voice
109
- detect=1
110
- current_voice = voice5.split(" - ")[0]
111
- current_pitch = -20
112
- current_rate = -10
113
- elif part.startswith("1C"): #Child voice
114
- detect=1
115
- current_voice = voice6.split(" - ")[0]
116
- else:
117
- # Use selected voice, or fallback to default
118
- #voice_short_name = (voice or default_voice).split(" - ")[0]
119
- current_voice = (voice or default_voice).split(" - ")[0]
120
- processed_text=part[:]
121
- # Step 1: Use regex to find the first number, possibly negative, after a prefix (e.g., F-)
122
- #match = re.search(r'[A-Za-z]\d+', part) # Look for a letter followed by one or more digits
123
- match = re.search(r'[A-Za-z]+\-?\d+', part) # Look for a letter(s) followed by an optional '-' and digits
124
- if match:
125
- # Extract the prefix (e.g., '2F') and number (e.g., '-20')
126
- prefix = ''.join([ch for ch in match.group() if ch.isalpha()]) # Extract letters (prefix)
127
- number = int(''.join([ch for ch in match.group() if ch.isdigit() or ch == '-'])) # Extract digits (number)
128
- current_pitch = number
129
- # Step 2: Remove the found number from the string
130
- new_text = re.sub(r'[A-Za-z]+\-?\d+', '', part, count=1).strip() # Remove prefix and number (e.g., '2F-20')
131
- #processed_text = new_text[2:] #cut out the prefix like 1F, 3M etc
132
- processed_text = new_text[len(prefix):] # Dynamically remove the prefix part
133
- else:
134
- if detect:
135
- processed_text = part[2:]
136
- rate_str = f"{current_rate:+d}%"
137
- #if part[2:4].isdigit():
138
- # processed_text = part[4:]
139
- # pitch = int(part[2:4])
140
- pitch_str = f"{current_pitch:+d}Hz"
141
- communicate = edge_tts.Communicate(processed_text, current_voice, rate=rate_str, pitch=pitch_str)
142
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
143
- tmp_path = tmp_file.name
144
- await communicate.save(tmp_path)
145
- audio_segments.append(tmp_path)
146
- else:
147
- audio_segments.append(None) # Empty string
148
-
149
- return audio_segments, silence_durations
150
-
151
- # Main text-to-speech function that processes paragraphs and silence
152
- async def text_to_speech(text, voice, rate, pitch):
153
- if not text.strip():
154
- return None, gr.Warning("Please enter text to convert.")
155
  if not voice:
156
  return None, gr.Warning("Please select a voice.")
157
 
158
- #paragraphs = [p.strip() for p in re.split(r'"', text) if p.strip()]
159
- # Split the text using straight quotes (") and curly quotes (“ and ”)
160
- paragraphs = [p.strip() for p in re.split(r'[“”"]', text) if p.strip()]
161
- final_audio_segments = []
 
 
 
 
 
 
 
 
162
 
163
- for paragraph in paragraphs:
164
- audio_paths, silence_times = await paragraph_to_speech(paragraph, voice, rate, pitch)
165
- if audio_paths:
166
- for i, path in enumerate(audio_paths):
167
- final_audio_segments.append(path)
168
- if i < len(silence_times):
169
- final_audio_segments.append(silence_times[i])
170
 
171
- if not any(isinstance(item, str) for item in final_audio_segments):
172
- return None, None # No actual audio generated
173
 
174
- if all(not isinstance(item, str) for item in final_audio_segments):
175
- return None, "Only silence markers found."
 
176
 
177
  combined_audio_path = tempfile.mktemp(suffix=".mp3")
178
- with open(combined_audio_path, 'wb') as outfile:
179
- for segment in final_audio_segments:
180
- if isinstance(segment, str):
181
- try:
182
- with open(segment, 'rb') as infile:
183
- outfile.write(infile.read())
184
- os.remove(segment) # Clean up individual files
185
- except FileNotFoundError:
186
- print(f"Warning: Audio file not found: {segment}")
187
  return combined_audio_path, None
188
 
189
- # Gradio interface function
190
  @spaces.GPU
191
- def tts_interface(text, voice, rate, pitch):
192
- audio, warning = asyncio.run(text_to_speech(text, voice, rate, pitch))
193
  return audio, warning
194
 
195
- # Create Gradio application
196
- import gradio as gr
197
-
198
  async def create_demo():
199
  voices = await get_voices()
200
- default_voice = "en-US-AndrewMultilingualNeural - en-US (Male)" # 👈 Pick one of the available voices
201
  description = """
202
- Default = <b>"en-US-AndrewMultilingualNeural - en-US (Male),
203
- other voices 1F:en-GB-SoniaNeural, 2F:en-US-JennyNeural, 3F:en-HK-YanNeural, 4F:en-US-EmmaNeural
204
- 1M:en-AU-WilliamNeural, 2M:en-GB-RyanNeural, 3M:en-US-BrianMultilingualNeural, 4M:en-GB-ThomasNeural
205
- 1C: en-GB-MaisieNeural (Childvoice), 1O = en-GB-RyanNeural (OldMan)"</b>
206
- You can insert silence using the marker 'SS##' example "SS2.0"
207
- Enter your text, select a voice, and adjust the speech rate and pitch. Can also set like 1F-20 or 1M24.
 
 
 
 
208
  """
209
-
210
  demo = gr.Interface(
211
  fn=tts_interface,
212
  inputs=[
213
- gr.Textbox(label="Input Text", lines=5, placeholder="Separate paragraphs with two blank lines. Use 'SS[duration]' for silence."),
214
  gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value=default_voice),
215
  gr.Slider(minimum=-50, maximum=50, value=0, label="Speech Rate Adjustment (%)", step=1),
216
  gr.Slider(minimum=-50, maximum=50, value=0, label="Pitch Adjustment (Hz)", step=1)
@@ -219,15 +173,13 @@ async def create_demo():
219
  gr.Audio(label="Generated Audio", type="filepath"),
220
  gr.Markdown(label="Warning", visible=False)
221
  ],
222
- title="TTS using Edge Engine.. ENGLISH!",
223
  description=description,
224
- article="Process text paragraph by paragraph for smoother output and insert silence markers.",
225
  analytics_enabled=False,
226
  allow_flagging=False
227
  )
228
  return demo
229
 
230
- # Run the application
231
  if __name__ == "__main__":
232
  demo = asyncio.run(create_demo())
233
  demo.launch()
 
1
+ help me analyse this code, it is for a tts hugginface space
2
  import spaces
3
  import gradio as gr
4
  import edge_tts
5
  import asyncio
6
  import tempfile
7
  import os
8
+ import re
9
  from pathlib import Path
10
  from pydub import AudioSegment
11
 
 
15
  duration=duration_ms,
16
  frame_rate=24000 # 24kHz sampling rate
17
  )
18
+
19
  # Set audio parameters
20
  silent_audio = silent_audio.set_channels(1) # Mono
21
  silent_audio = silent_audio.set_sample_width(4) # 32-bit (4 bytes per sample)
22
+
23
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
24
  # Export with specific bitrate and codec parameters
25
  silent_audio.export(
 
40
  voices = await edge_tts.list_voices()
41
  return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
42
 
43
+ async def process_transcript_line(line, voice, rate, pitch):
44
+ """Processes a single transcript line to extract time, voice commands, and generate audio."""
45
+ match = re.match(r'(\d+):(\d+)(?:\.(\d+))?\s+(.*)', line)
46
+ if match:
47
+ minutes, seconds, milliseconds_str, text_with_commands = match.groups()
48
+ start_time_ms = int(minutes) * 60000 + int(seconds) * 1000 + (int(milliseconds_str) * 10 if milliseconds_str else 0)
49
+ if not text_with_commands.strip():
50
+ return start_time_ms, None
51
+
52
+ current_voice = voice
53
+ current_rate = rate
54
+ current_pitch = pitch
55
+ processed_text = text_with_commands
56
+
57
+ voice1 = "en-AU-WilliamNeural - en-AU (Male)"
58
+ voice1F ="en-GB-SoniaNeural - en-GB (Female)"
59
+ voice2 = "en-GB-RyanNeural - en-GB (Male)"
60
+ voice2F = "en-US-JennyNeural - en-US (Female)"
61
+ voice3 ="en-US-BrianMultilingualNeural - en-US (Male)" #good for reading
62
+ voice3F = "en-HK-YanNeural - en-HK (Female)"
63
+ voice4 = "en-GB-ThomasNeural - en-GB (Male)"
64
+ voice4F ="en-US-EmmaNeural - en-US (Female)"
65
+ voice5 = "en-GB-RyanNeural - en-GB (Male)" #Old Man
66
+ voice6 = "en-GB-MaisieNeural - en-GB (Female)" #Child
67
+
68
+ if text_with_commands.startswith("1F"):
69
+ current_voice = voice1F.split(" - ")[0]
70
+ current_pitch = 25
71
+ processed_text = text_with_commands[2:].strip()
72
+ elif text_with_commands.startswith("2F"):
73
+ current_voice = voice2F.split(" - ")[0]
74
+ processed_text = text_with_commands[2:].strip()
75
+ elif text_with_commands.startswith("3F"):
76
+ current_voice = voice3F.split(" - ")[0]
77
+ processed_text = text_with_commands[2:].strip()
78
+ elif text_with_commands.startswith("4F"):
79
+ current_voice = voice4F.split(" - ")[0]
80
+ processed_text = text_with_commands[2:].strip()
81
+ elif text_with_commands.startswith("1M"):
82
+ current_voice = voice1.split(" - ")[0]
83
+ processed_text = text_with_commands[2:].strip()
84
+ elif text_with_commands.startswith("2M"):
85
+ current_voice = voice2.split(" - ")[0]
86
+ processed_text = text_with_commands[2:].strip()
87
+ elif text_with_commands.startswith("3M"):
88
+ current_voice = voice3.split(" - ")[0]
89
+ processed_text = text_with_commands[2:].strip()
90
+ elif text_with_commands.startswith("4M"):
91
+ current_voice = voice4.split(" - ")[0]
92
+ processed_text = text_with_commands[2:].strip()
93
+ elif text_with_commands.startswith("1O"): # Old man voice
94
+ current_voice = voice5.split(" - ")[0]
95
+ current_pitch = -20
96
+ current_rate = -10
97
+ processed_text = text_with_commands[2:].strip()
98
+ elif text_with_commands.startswith("1C"): #Child voice
99
+ current_voice = voice6.split(" - ")[0]
100
+ processed_text = text_with_commands[2:].strip()
101
+
102
+ rate_str = f"{current_rate:+d}%"
103
+ pitch_str = f"{current_pitch:+d}Hz"
104
+ communicate = edge_tts.Communicate(processed_text, current_voice, rate=rate_str, pitch=pitch_str)
105
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
106
+ audio_path = tmp_file.name
107
+ await communicate.save(audio_path)
108
+ return start_time_ms, audio_path
109
+ return None, None
110
+
111
+ async def transcript_to_speech(transcript_text, voice, rate, pitch):
112
+ if not transcript_text.strip():
113
+ return None, gr.Warning("Please enter transcript text.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  if not voice:
115
  return None, gr.Warning("Please select a voice.")
116
 
117
+ lines = transcript_text.strip().split('\n')
118
+ audio_segments_with_time = []
119
+ max_end_time_ms = 0
120
+
121
+ for line in lines:
122
+ start_time, audio_path = await process_transcript_line(line, voice, rate, pitch)
123
+ if start_time is not None and audio_path:
124
+ audio = AudioSegment.from_mp3(audio_path)
125
+ audio_segments_with_time.append({'start': start_time, 'audio': audio, 'path': audio_path})
126
+ max_end_time_ms = max(max_end_time_ms, start_time + len(audio))
127
+ elif audio_path:
128
+ os.remove(audio_path) # Clean up even if no timestamp
129
 
130
+ if not audio_segments_with_time:
131
+ return None, "No valid transcript lines found."
 
 
 
 
 
132
 
133
+ # Create initial silence audio
134
+ final_audio = AudioSegment.silent(duration=max_end_time_ms, frame_rate=24000)
135
 
136
+ for segment in audio_segments_with_time:
137
+ final_audio = final_audio.overlay(segment['audio'], position=segment['start'])
138
+ os.remove(segment['path']) # Clean up individual audio files
139
 
140
  combined_audio_path = tempfile.mktemp(suffix=".mp3")
141
+ final_audio.export(combined_audio_path, format="mp3")
 
 
 
 
 
 
 
 
142
  return combined_audio_path, None
143
 
 
144
  @spaces.GPU
145
+ def tts_interface(transcript, voice, rate, pitch):
146
+ audio, warning = asyncio.run(transcript_to_speech(transcript, voice, rate, pitch))
147
  return audio, warning
148
 
 
 
 
149
  async def create_demo():
150
  voices = await get_voices()
151
+ default_voice = "en-US-AndrewMultilingualNeural - en-US (Male)"
152
  description = """
153
+ Process YouTube transcript text with timestamps to generate synchronized audio.
154
+ Each line should be in the format: `minutes:seconds[.milliseconds] text`.
155
+ Voice prefixes (e.g., 1F, 1C) can be used at the beginning of a line to switch voices.
156
+ Example:
157
+ ```
158
+ 0:00 This
159
+ 0:14 is the story of little Red Riding Hood
160
+ 0:38 1F Grandma isn’t feeling very well.
161
+ 0:48 1C Yes, said Little Red Riding Hood.
162
+ ```
163
  """
 
164
  demo = gr.Interface(
165
  fn=tts_interface,
166
  inputs=[
167
+ gr.Textbox(label="YouTube Transcript", lines=10, placeholder="0:00 This\n0:14 is the story...\n0:38 1F Grandma..."),
168
  gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value=default_voice),
169
  gr.Slider(minimum=-50, maximum=50, value=0, label="Speech Rate Adjustment (%)", step=1),
170
  gr.Slider(minimum=-50, maximum=50, value=0, label="Pitch Adjustment (Hz)", step=1)
 
173
  gr.Audio(label="Generated Audio", type="filepath"),
174
  gr.Markdown(label="Warning", visible=False)
175
  ],
176
+ title="TTS for YouTube Transcripts with Voice Switching",
177
  description=description,
 
178
  analytics_enabled=False,
179
  allow_flagging=False
180
  )
181
  return demo
182
 
 
183
  if __name__ == "__main__":
184
  demo = asyncio.run(create_demo())
185
  demo.launch()