Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,38 +5,44 @@ from jiwer import wer
|
|
5 |
|
6 |
# Load models
|
7 |
whisper_pipeline_1 = pipeline("automatic-speech-recognition", model="maliahson/Finetuned_Whisper_Medium_Model_2")
|
8 |
-
|
9 |
-
|
10 |
|
11 |
def transcribe_and_compare(audio_path, original_transcription=None):
|
12 |
"""
|
13 |
-
Transcribes an audio file using
|
14 |
|
15 |
Args:
|
16 |
audio_path (str): Path to the audio file.
|
17 |
original_transcription (str, optional): Ground truth transcription.
|
18 |
-
|
19 |
Returns:
|
20 |
dict: Results including transcriptions and WER calculations.
|
21 |
"""
|
22 |
transcription_1 = whisper_pipeline_1(audio_path)["text"]
|
23 |
transcription_2 = whisper_pipeline_2(audio_path)["text"]
|
|
|
|
|
24 |
comparison_result = {
|
25 |
-
"Model 1 Output (maliahson/
|
26 |
"Model 2 Output (openai/whisper-large-v3-turbo)": transcription_2,
|
|
|
27 |
}
|
28 |
|
29 |
if original_transcription:
|
30 |
-
# Calculate Word Error Rate
|
31 |
wer_1 = wer(original_transcription, transcription_1)
|
32 |
wer_2 = wer(original_transcription, transcription_2)
|
|
|
|
|
33 |
comparison_result["WER Model 1"] = wer_1
|
34 |
comparison_result["WER Model 2"] = wer_2
|
|
|
35 |
else:
|
36 |
-
# Compare outputs of
|
37 |
comparison_result["Difference Between Models"] = {
|
38 |
-
"Model 1 Unique Words": set(transcription_1.split()) - set(transcription_2.split()),
|
39 |
-
"Model 2 Unique Words": set(transcription_2.split()) - set(transcription_1.split()),
|
|
|
40 |
}
|
41 |
|
42 |
return comparison_result
|
@@ -55,4 +61,4 @@ with gr.Blocks() as demo:
|
|
55 |
outputs=output
|
56 |
)
|
57 |
|
58 |
-
demo.launch(debug=True)
|
|
|
5 |
|
6 |
# Load models
|
7 |
whisper_pipeline_1 = pipeline("automatic-speech-recognition", model="maliahson/Finetuned_Whisper_Medium_Model_2")
|
8 |
+
whisper_pipeline_2 = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3-turbo", device=0 if torch.cuda.is_available() else "cpu")
|
9 |
+
whisper_pipeline_3 = pipeline("automatic-speech-recognition", model="ihanif/whisper-medium-urdu", device=0 if torch.cuda.is_available() else "cpu")
|
10 |
|
11 |
def transcribe_and_compare(audio_path, original_transcription=None):
|
12 |
"""
|
13 |
+
Transcribes an audio file using three Whisper models and compares results.
|
14 |
|
15 |
Args:
|
16 |
audio_path (str): Path to the audio file.
|
17 |
original_transcription (str, optional): Ground truth transcription.
|
|
|
18 |
Returns:
|
19 |
dict: Results including transcriptions and WER calculations.
|
20 |
"""
|
21 |
transcription_1 = whisper_pipeline_1(audio_path)["text"]
|
22 |
transcription_2 = whisper_pipeline_2(audio_path)["text"]
|
23 |
+
transcription_3 = whisper_pipeline_3(audio_path)["text"]
|
24 |
+
|
25 |
comparison_result = {
|
26 |
+
"Model 1 Output (maliahson/Finetuned_Whisper_Medium_Model_2)": transcription_1,
|
27 |
"Model 2 Output (openai/whisper-large-v3-turbo)": transcription_2,
|
28 |
+
"Model 3 Output (ihanif/whisper-medium-urdu)": transcription_3
|
29 |
}
|
30 |
|
31 |
if original_transcription:
|
32 |
+
# Calculate Word Error Rate (WER)
|
33 |
wer_1 = wer(original_transcription, transcription_1)
|
34 |
wer_2 = wer(original_transcription, transcription_2)
|
35 |
+
wer_3 = wer(original_transcription, transcription_3)
|
36 |
+
|
37 |
comparison_result["WER Model 1"] = wer_1
|
38 |
comparison_result["WER Model 2"] = wer_2
|
39 |
+
comparison_result["WER Model 3"] = wer_3
|
40 |
else:
|
41 |
+
# Compare outputs of all three models
|
42 |
comparison_result["Difference Between Models"] = {
|
43 |
+
"Model 1 Unique Words": set(transcription_1.split()) - set(transcription_2.split()) - set(transcription_3.split()),
|
44 |
+
"Model 2 Unique Words": set(transcription_2.split()) - set(transcription_1.split()) - set(transcription_3.split()),
|
45 |
+
"Model 3 Unique Words": set(transcription_3.split()) - set(transcription_1.split()) - set(transcription_2.split()),
|
46 |
}
|
47 |
|
48 |
return comparison_result
|
|
|
61 |
outputs=output
|
62 |
)
|
63 |
|
64 |
+
demo.launch(debug=True)
|