import streamlit as st from transformers import pipeline from scipy.io.wavfile import write import gradio as gr import wavio input_file = 'recorded.wav' output_file = 'output_filtered_receiver.wav' low_frequency = 18000 high_frequency = 19000 bit_duration = 0.007 sample_rate = 44100 amplitude_scaling_factor = 10.0 def record(audio): """ This function records audio and writes it to a .wav file. Parameters: audio (tuple): A tuple containing the sample rate and the audio data. Returns: str: A success message if the audio is recorded correctly, otherwise an error message. """ try: # Check if the audio tuple contains exactly two elements if len(audio) != 2: return f"Error: Expected a tuple with 2 elements, but got {len(audio)}" # Unpack the sample rate and data from the audio tuple sr, data = audio # Write the audio data to a .wav file wavio.write("recorded.wav", data, sr) # Call the filtered function to apply the bandpass filter to the audio data filtered() # Return a success message return f"Audio receive correctly" except Exception as e: # If an error occurs, return an error message return f"Error: {str(e)}" with gr.Blocks() as demo: btn_record = gr.Button(value="record") btn_record.click(fn=record, inputs=input_audio, outputs=output_text) demo.launch() ######################## models # model = pipeline("sentiment-analysis") # st.title("Hugging Face Model Demo") # input_text = st.text_input("Enter your text", "") # if st.button("Analyze"): # # Perform inference using the loaded model # result = model(input_text) # st.write("Prediction:", result[0]['label'], "| Score:", result[0]['score'])