olivercareyncl's picture
Update app.py
2ad6233 verified
raw
history blame
2.5 kB
import gradio as gr
from transformers import pipeline
import matplotlib.pyplot as plt
import torch
from pydub import AudioSegment
import numpy as np
# Load sentiment analysis model
sentiment_pipeline = pipeline("sentiment-analysis")
# Function to analyze sentiment
def analyze_sentiment(text):
result = sentiment_pipeline(text)[0]
sentiment, confidence = result['label'], result['score']
# Sentiment Mapping for UI Display
sentiment_map = {
"POSITIVE": ("🟒 Positive 😊", "green"),
"NEGATIVE": ("πŸ”΄ Negative 😠", "red"),
"NEUTRAL": ("🟑 Neutral 😐", "orange")
}
sentiment_label, color = sentiment_map.get(sentiment.upper(), ("βšͺ Unknown ❓", "gray"))
# Generate Confidence Score Bar Chart
fig, ax = plt.subplots(figsize=(4, 2))
ax.bar(sentiment, confidence, color=color)
ax.set_ylim([0, 1])
ax.set_ylabel("Confidence Score")
ax.set_title("Sentiment Confidence")
return sentiment_label, f"Confidence: {confidence:.2f}", fig
# Function to process audio input
def process_audio(audio_path):
try:
audio = AudioSegment.from_file(audio_path)
audio = audio.set_channels(1).set_frame_rate(16000)
audio_array = np.array(audio.get_array_of_samples(), dtype=np.float32) / 32768.0
return analyze_sentiment("Speech-to-text processing not available in this version.")
except Exception as e:
return "Error processing audio", str(e), None
# Gradio UI
with gr.Blocks(theme=gr.themes.Soft()) as iface:
gr.Markdown("# πŸ“’ Text Sentiment Analyzer")
gr.Markdown("Analyze the sentiment of your text input and visualize the confidence score.")
with gr.Row():
text_input = gr.Textbox(lines=2, placeholder="Enter text here...", label="Your Input")
audio_input = gr.Audio(source="microphone", type="filepath", label="Or Speak")
analyze_button = gr.Button("Analyze Sentiment ✨")
with gr.Row():
sentiment_output = gr.Textbox(label="Sentiment Result", interactive=False)
confidence_output = gr.Textbox(label="Confidence Score", interactive=False)
chart_output = gr.Plot(label="Confidence Score Chart")
analyze_button.click(analyze_sentiment, inputs=text_input, outputs=[sentiment_output, confidence_output, chart_output])
analyze_button.click(process_audio, inputs=audio_input, outputs=[sentiment_output, confidence_output, chart_output])
# Launch the app
if __name__ == "__main__":
iface.launch()