|
import gradio as gr |
|
from processing import run |
|
import json |
|
from huggingface_hub import login |
|
import os |
|
|
|
|
|
|
|
|
|
|
|
hf_token = os.getenv("HF_Token") |
|
login(hf_token) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_OPTIONS = ["openai/whisper-tiny.en", "facebook/s2t-medium-librispeech-asr", "facebook/wav2vec2-base-960h","openai/whisper-large-v2","facebook/hf-seamless-m4t-medium"] |
|
DATASET_OPTIONS = ["Common Voice", "Librispeech ASR clean", "Librispeech ASR other", "OWN Recording/Sample"] |
|
|
|
|
|
def get_card(selected_model:str)->str: |
|
""" |
|
This function retrieves the markdown text displayed for each selected Model |
|
""" |
|
|
|
with open("cards.txt", "r") as f: |
|
cards = f.read() |
|
|
|
cards = cards.split("@@") |
|
for card in cards: |
|
if "ID: "+selected_model in card: |
|
return card |
|
|
|
return "## Unknown Model" |
|
|
|
def is_own(selected_option): |
|
""" |
|
In case the User wants to record an own Sample, this function makes the Components visible |
|
""" |
|
if selected_option == "OWN Recording/Sample": |
|
return gr.update(visible=True), gr.update(visible=True) |
|
else: |
|
return gr.update(visible=False), gr.update(visible=False) |
|
|
|
def make_visible(): |
|
""" |
|
This function makes the Components needed for displaying the Results visible |
|
""" |
|
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) |
|
|
|
|
|
|
|
INTRODUCTION = """### Welcome to ASR Model Comparison Hub! 🎉 |
|
|
|
Hey there, and welcome to an app designed just for developers like you, who are passionate about pushing the boundaries of Automatic Speech Recognition (ASR) technology! |
|
|
|
Here, you can easily compare different ASR models by selecting a dataset and choosing two models from the dropdown to see how they stack up against each other. If you're feeling creative, go ahead and select 'OWN' as your dataset option to upload your own audio file or record something new right in the app. Don’t forget to provide a transcription, and the app will handle the rest! |
|
|
|
ASR Model Comparison Hub uses the Word Error Rate (WER) ⬇️ (the lower the better) metric to give you a clear picture of each model's performance. And hey, don't miss out on checking the **Amazing Leaderboard** where you can see how a wide range of models have been evaluated—[Check it out here](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard). |
|
|
|
Happy experimenting and comparing! 🚀""" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
|
|
gr.Markdown('# <p style="text-align: center;">ASR Model Comparison 💬</p>') |
|
gr.Markdown(INTRODUCTION) |
|
|
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
pass |
|
with gr.Column(scale=5): |
|
|
|
data_subset = gr.Radio( |
|
value="Common Voice", |
|
choices=DATASET_OPTIONS, |
|
label="Data subset / Own Sample", |
|
) |
|
|
|
own_audio = gr.Audio(sources=['microphone'], visible=False, label=None) |
|
own_transcription = gr.TextArea(lines=2, visible=False, label=None) |
|
|
|
|
|
data_subset.change(is_own, inputs=[data_subset], outputs=[own_audio, own_transcription]) |
|
with gr.Column(scale=1): |
|
pass |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=1): |
|
model_1 = gr.Dropdown( |
|
choices=MODEL_OPTIONS, |
|
label=None |
|
) |
|
model_1_card = gr.Markdown("") |
|
|
|
|
|
with gr.Column(scale=1): |
|
model_2 = gr.Dropdown( |
|
choices=MODEL_OPTIONS, |
|
label=None |
|
) |
|
model_2_card = gr.Markdown("") |
|
|
|
|
|
model_1.change(get_card, inputs=model_1, outputs=model_1_card) |
|
model_2.change(get_card, inputs=model_2, outputs=model_2_card) |
|
|
|
|
|
eval_btn = gr.Button( |
|
value="Evaluate", |
|
variant="primary", |
|
size="sm") |
|
|
|
|
|
results_title = gr.Markdown( |
|
'## <p style="text-align: center;">Results</p>', |
|
visible=False |
|
) |
|
results_md = gr.Markdown("") |
|
results_plot = gr.Plot(show_label=False, visible=False) |
|
results_df = gr.DataFrame( |
|
visible=False, |
|
row_count=(5, "dynamic"), |
|
interactive=False, |
|
wrap=True, |
|
) |
|
|
|
|
|
eval_btn.click(make_visible, outputs=[results_plot, results_df, results_title]) |
|
eval_btn.click(run, [data_subset, model_1, model_2, own_audio, own_transcription], [results_md, results_plot, results_df], show_progress=False) |
|
|
|
demo.launch(debug=True) |