Update app.py
Browse files
app.py
CHANGED
|
@@ -47,12 +47,12 @@ def get_prediction(inputs, **kwargs):
|
|
| 47 |
|
| 48 |
def transcribe(inputs: str,
|
| 49 |
add_punctuation: bool,
|
|
|
|
|
|
|
| 50 |
num_speakers: float,
|
| 51 |
min_speakers: float,
|
| 52 |
max_speakers: float,
|
| 53 |
-
chunk_length_s: float
|
| 54 |
-
add_silence_end: float,
|
| 55 |
-
add_silence_start: float):
|
| 56 |
if inputs is None:
|
| 57 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
| 58 |
with open(inputs, "rb") as f:
|
|
@@ -65,8 +65,8 @@ def transcribe(inputs: str,
|
|
| 65 |
min_speakers=int(min_speakers) if min_speakers != 0 else None,
|
| 66 |
max_speakers=int(max_speakers) if max_speakers != 0 else None,
|
| 67 |
chunk_length_s=int(chunk_length_s) if chunk_length_s != 30 else None,
|
| 68 |
-
add_silence_end=
|
| 69 |
-
add_silence_start=
|
| 70 |
)
|
| 71 |
output = ""
|
| 72 |
for n, s in enumerate(prediction["speaker_ids"]):
|
|
@@ -79,8 +79,8 @@ description = (f"Transcribe and diarize long-form microphone or audio inputs wit
|
|
| 79 |
f"Kotoba-Whisper [{model_name}](https://huggingface.co/{model_name}).")
|
| 80 |
title = f"Audio Transcription and Diarization with {os.path.basename(model_name)}"
|
| 81 |
shared_config = {"fn": transcribe, "title": title, "description": description, "allow_flagging": "never", "examples": [
|
| 82 |
-
[example_file, True,
|
| 83 |
-
[example_file, True,
|
| 84 |
]}
|
| 85 |
o_upload = gr.Markdown()
|
| 86 |
o_mic = gr.Markdown()
|
|
@@ -91,12 +91,12 @@ i_upload = gr.Interface(
|
|
| 91 |
inputs=[
|
| 92 |
gr.Audio(sources="upload", type="filepath", label="Audio file"),
|
| 93 |
gr.Checkbox(label="add punctuation", value=True),
|
|
|
|
|
|
|
| 94 |
gr.Slider(0, 10, label="num speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 95 |
gr.Slider(0, 10, label="min speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 96 |
gr.Slider(0, 10, label="max speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 97 |
gr.Slider(5, 30, label="chunk length for ASR", value=30, step=1),
|
| 98 |
-
gr.Slider(0, 0.5, label="silence at the end", value=0.5, step=0.05),
|
| 99 |
-
gr.Slider(0, 0.5, label="silence at the start", value=0.5, step=0.05),
|
| 100 |
],
|
| 101 |
outputs=gr.Markdown(),
|
| 102 |
**shared_config
|
|
@@ -105,12 +105,12 @@ i_mic = gr.Interface(
|
|
| 105 |
inputs=[
|
| 106 |
gr.Audio(sources="microphone", type="filepath", label="Microphone input"),
|
| 107 |
gr.Checkbox(label="add punctuation", value=True),
|
|
|
|
|
|
|
| 108 |
gr.Slider(0, 10, label="num speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 109 |
gr.Slider(0, 10, label="min speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 110 |
gr.Slider(0, 10, label="max speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 111 |
gr.Slider(5, 30, label="chunk length for ASR", value=30, step=1),
|
| 112 |
-
gr.Slider(0, 0.5, label="silence at the end", value=0.5, step=0.05),
|
| 113 |
-
gr.Slider(0, 0.5, label="silence at the start", value=0.5, step=0.05),
|
| 114 |
],
|
| 115 |
outputs=gr.Markdown(),
|
| 116 |
**shared_config
|
|
|
|
| 47 |
|
| 48 |
def transcribe(inputs: str,
|
| 49 |
add_punctuation: bool,
|
| 50 |
+
add_silence_end: bool,
|
| 51 |
+
add_silence_start: bool,
|
| 52 |
num_speakers: float,
|
| 53 |
min_speakers: float,
|
| 54 |
max_speakers: float,
|
| 55 |
+
chunk_length_s: float):
|
|
|
|
|
|
|
| 56 |
if inputs is None:
|
| 57 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
| 58 |
with open(inputs, "rb") as f:
|
|
|
|
| 65 |
min_speakers=int(min_speakers) if min_speakers != 0 else None,
|
| 66 |
max_speakers=int(max_speakers) if max_speakers != 0 else None,
|
| 67 |
chunk_length_s=int(chunk_length_s) if chunk_length_s != 30 else None,
|
| 68 |
+
add_silence_end=0.5 if add_silence_end else None,
|
| 69 |
+
add_silence_start=0.5 if add_silence_start else None
|
| 70 |
)
|
| 71 |
output = ""
|
| 72 |
for n, s in enumerate(prediction["speaker_ids"]):
|
|
|
|
| 79 |
f"Kotoba-Whisper [{model_name}](https://huggingface.co/{model_name}).")
|
| 80 |
title = f"Audio Transcription and Diarization with {os.path.basename(model_name)}"
|
| 81 |
shared_config = {"fn": transcribe, "title": title, "description": description, "allow_flagging": "never", "examples": [
|
| 82 |
+
[example_file, True, True, True 0, 0, 0, 30],
|
| 83 |
+
[example_file, True, True, True, 4, 0, 0, 30]
|
| 84 |
]}
|
| 85 |
o_upload = gr.Markdown()
|
| 86 |
o_mic = gr.Markdown()
|
|
|
|
| 91 |
inputs=[
|
| 92 |
gr.Audio(sources="upload", type="filepath", label="Audio file"),
|
| 93 |
gr.Checkbox(label="add punctuation", value=True),
|
| 94 |
+
gr.Checkbox(label="add silence at the end", value=True),
|
| 95 |
+
gr.Checkbox(label="add silence at the start", value=True),
|
| 96 |
gr.Slider(0, 10, label="num speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 97 |
gr.Slider(0, 10, label="min speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 98 |
gr.Slider(0, 10, label="max speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 99 |
gr.Slider(5, 30, label="chunk length for ASR", value=30, step=1),
|
|
|
|
|
|
|
| 100 |
],
|
| 101 |
outputs=gr.Markdown(),
|
| 102 |
**shared_config
|
|
|
|
| 105 |
inputs=[
|
| 106 |
gr.Audio(sources="microphone", type="filepath", label="Microphone input"),
|
| 107 |
gr.Checkbox(label="add punctuation", value=True),
|
| 108 |
+
gr.Checkbox(label="add silence at the end", value=True),
|
| 109 |
+
gr.Checkbox(label="add silence at the start", value=True),
|
| 110 |
gr.Slider(0, 10, label="num speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 111 |
gr.Slider(0, 10, label="min speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 112 |
gr.Slider(0, 10, label="max speakers (set 0 for auto-detect mode)", value=0, step=1),
|
| 113 |
gr.Slider(5, 30, label="chunk length for ASR", value=30, step=1),
|
|
|
|
|
|
|
| 114 |
],
|
| 115 |
outputs=gr.Markdown(),
|
| 116 |
**shared_config
|