satish99017's picture
Update app.py
b1cb380
import streamlit as st
from transformers import pipeline
import os
import io
import tempfile
import base64
from audiorecorder import audiorecorder
from openai import OpenAI
from pydub import AudioSegment
os.environ['OPENAI_API_KEY'] = "" ###add the openai key here
client = OpenAI()
st.title("Whisper App")
audio = audiorecorder("Click to record", "Click to stop recording")
if len(audio) > 0:
temp_dir = tempfile.mkdtemp()
temp_file_path = os.path.join(temp_dir, 'temp_audio.wav')
audio.export(temp_file_path, format=".wav")
print(audio)
song = AudioSegment.from_wav("temp_audio.wav")
song.export("temp_audio", format = "flac")
######################## models
# model = pipeline("sentiment-analysis")
# st.title("Hugging Face Model Demo")
# input_text = st.text_input("Enter your text", "")
# if st.button("Analyze"):
# # Perform inference using the loaded model
# result = model(input_text)
# st.write("Prediction:", result[0]['label'], "| Score:", result[0]['score'])