Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,24 +1,65 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
# Load
|
5 |
sentiment_pipeline = pipeline("sentiment-analysis")
|
6 |
|
7 |
# Function to analyze sentiment
|
8 |
def analyze_sentiment(text):
|
9 |
result = sentiment_pipeline(text)[0]
|
10 |
-
sentiment = result['label']
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
# Launch the app
|
24 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import torch
|
5 |
+
from pydub import AudioSegment
|
6 |
+
import numpy as np
|
7 |
|
8 |
+
# Load sentiment analysis model
|
9 |
sentiment_pipeline = pipeline("sentiment-analysis")
|
10 |
|
11 |
# Function to analyze sentiment
|
12 |
def analyze_sentiment(text):
|
13 |
result = sentiment_pipeline(text)[0]
|
14 |
+
sentiment, confidence = result['label'], result['score']
|
15 |
+
|
16 |
+
# Sentiment Mapping for UI Display
|
17 |
+
sentiment_map = {
|
18 |
+
"POSITIVE": ("π’ Positive π", "green"),
|
19 |
+
"NEGATIVE": ("π΄ Negative π ", "red"),
|
20 |
+
"NEUTRAL": ("π‘ Neutral π", "orange")
|
21 |
+
}
|
22 |
+
|
23 |
+
sentiment_label, color = sentiment_map.get(sentiment.upper(), ("βͺ Unknown β", "gray"))
|
24 |
+
|
25 |
+
# Generate Confidence Score Bar Chart
|
26 |
+
fig, ax = plt.subplots(figsize=(4, 2))
|
27 |
+
ax.bar(sentiment, confidence, color=color)
|
28 |
+
ax.set_ylim([0, 1])
|
29 |
+
ax.set_ylabel("Confidence Score")
|
30 |
+
ax.set_title("Sentiment Confidence")
|
31 |
+
|
32 |
+
return sentiment_label, f"Confidence: {confidence:.2f}", fig
|
33 |
+
|
34 |
+
# Function to process audio input
|
35 |
+
def process_audio(audio_path):
|
36 |
+
try:
|
37 |
+
audio = AudioSegment.from_file(audio_path)
|
38 |
+
audio = audio.set_channels(1).set_frame_rate(16000)
|
39 |
+
audio_array = np.array(audio.get_array_of_samples(), dtype=np.float32) / 32768.0
|
40 |
+
return analyze_sentiment("Speech-to-text processing not available in this version.")
|
41 |
+
except Exception as e:
|
42 |
+
return "Error processing audio", str(e), None
|
43 |
+
|
44 |
+
# Gradio UI
|
45 |
+
with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
46 |
+
gr.Markdown("# π’ Text Sentiment Analyzer")
|
47 |
+
gr.Markdown("Analyze the sentiment of your text input and visualize the confidence score.")
|
48 |
+
|
49 |
+
with gr.Row():
|
50 |
+
text_input = gr.Textbox(lines=2, placeholder="Enter text here...", label="Your Input")
|
51 |
+
audio_input = gr.Audio(source="microphone", type="filepath", label="Or Speak")
|
52 |
+
|
53 |
+
analyze_button = gr.Button("Analyze Sentiment β¨")
|
54 |
+
|
55 |
+
with gr.Row():
|
56 |
+
sentiment_output = gr.Textbox(label="Sentiment Result", interactive=False)
|
57 |
+
confidence_output = gr.Textbox(label="Confidence Score", interactive=False)
|
58 |
+
|
59 |
+
chart_output = gr.Plot(label="Confidence Score Chart")
|
60 |
+
|
61 |
+
analyze_button.click(analyze_sentiment, inputs=text_input, outputs=[sentiment_output, confidence_output, chart_output])
|
62 |
+
analyze_button.click(process_audio, inputs=audio_input, outputs=[sentiment_output, confidence_output, chart_output])
|
63 |
|
64 |
# Launch the app
|
65 |
if __name__ == "__main__":
|