# backend.py from transformers import pipeline import torch class AudioTranscriber: def __init__(self, model_name="openai/whisper-small"): self.device = 0 if torch.cuda.is_available() else -1 print(f"Loading model '{model_name}' on device: {'GPU' if self.device == 0 else 'CPU'}") self.pipe = pipeline( "automatic-speech-recognition", model=model_name, chunk_length_s=30, device=self.device ) def transcribe(self, audio_path): """ Transcribe an audio file. Args: audio_path (str): Path to the audio file Returns: str: Transcribed text """ if audio_path is None: return "No audio file provided." try: result = self.pipe(audio_path) return result.get("text", "").strip() except Exception as e: return f"Transcription error: {str(e)}"