Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,11 +3,9 @@ from transformers import pipeline
|
|
| 3 |
import gradio as gr
|
| 4 |
import torch
|
| 5 |
|
| 6 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 7 |
asr = pipeline(model="asif00/whisper-bangla")
|
| 8 |
-
# asr.to(device=device)
|
| 9 |
ser = pipeline("text2text-generation", model="asif00/mbart_bn_error_correction")
|
| 10 |
-
|
| 11 |
|
| 12 |
@spaces.GPU
|
| 13 |
def transcribe(audio):
|
|
@@ -18,9 +16,9 @@ def transcribe(audio):
|
|
| 18 |
|
| 19 |
@spaces.GPU
|
| 20 |
def correction(text):
|
| 21 |
-
corrected_text = ser(text)
|
| 22 |
-
print(corrected_text)
|
| 23 |
-
return corrected_text
|
| 24 |
|
| 25 |
|
| 26 |
def transcribe_and_correct(audio):
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
import torch
|
| 5 |
|
|
|
|
| 6 |
asr = pipeline(model="asif00/whisper-bangla")
|
|
|
|
| 7 |
ser = pipeline("text2text-generation", model="asif00/mbart_bn_error_correction")
|
| 8 |
+
|
| 9 |
|
| 10 |
@spaces.GPU
|
| 11 |
def transcribe(audio):
|
|
|
|
| 16 |
|
| 17 |
@spaces.GPU
|
| 18 |
def correction(text):
|
| 19 |
+
corrected_text = ser(text)
|
| 20 |
+
print(corrected_text["generated_text"])
|
| 21 |
+
return corrected_text["generated_text"]
|
| 22 |
|
| 23 |
|
| 24 |
def transcribe_and_correct(audio):
|