BIGBULL7777 commited on
Commit
36c32d8
ยท
1 Parent(s): e6bbe26

final project

Browse files
Files changed (3) hide show
  1. app.py +129 -0
  2. genai.png +0 -0
  3. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import whisper
3
+ from transformers import pipeline
4
+
5
+ model = whisper.load_model("base")
6
+ sentiment_analysis = pipeline("sentiment-analysis", framework="pt", model="SamLowe/roberta-base-go_emotions")
7
+
8
+ def analyze_sentiment(text):
9
+ results = sentiment_analysis(text)
10
+ sentiment_results = {result['label']: result['score'] for result in results}
11
+ return sentiment_results
12
+
13
+ def get_sentiment_emoji(sentiment):
14
+ # Define the emojis corresponding to each sentiment
15
+ emoji_mapping = {
16
+ "disappointment": "๐Ÿ˜ž",
17
+ "sadness": "๐Ÿ˜ข",
18
+ "annoyance": "๐Ÿ˜ ",
19
+ "neutral": "๐Ÿ˜",
20
+ "disapproval": "๐Ÿ‘Ž",
21
+ "realization": "๐Ÿ˜ฎ",
22
+ "nervousness": "๐Ÿ˜ฌ",
23
+ "approval": "๐Ÿ‘",
24
+ "joy": "๐Ÿ˜„",
25
+ "anger": "๐Ÿ˜ก",
26
+ "embarrassment": "๐Ÿ˜ณ",
27
+ "caring": "๐Ÿค—",
28
+ "remorse": "๐Ÿ˜”",
29
+ "disgust": "๐Ÿคข",
30
+ "grief": "๐Ÿ˜ฅ",
31
+ "confusion": "๐Ÿ˜•",
32
+ "relief": "๐Ÿ˜Œ",
33
+ "desire": "๐Ÿ˜",
34
+ "admiration": "๐Ÿ˜Œ",
35
+ "optimism": "๐Ÿ˜Š",
36
+ "fear": "๐Ÿ˜จ",
37
+ "love": "โค๏ธ",
38
+ "excitement": "๐ŸŽ‰",
39
+ "curiosity": "๐Ÿค”",
40
+ "amusement": "๐Ÿ˜„",
41
+ "surprise": "๐Ÿ˜ฒ",
42
+ "gratitude": "๐Ÿ™",
43
+ "pride": "๐Ÿฆ"
44
+ }
45
+ return emoji_mapping.get(sentiment, "")
46
+
47
+ def display_sentiment_results(sentiment_results, option):
48
+ sentiment_text = ""
49
+ for sentiment, score in sentiment_results.items():
50
+ emoji = get_sentiment_emoji(sentiment)
51
+ if option == "Sentiment Only":
52
+ sentiment_text += f"{sentiment} {emoji}\n"
53
+ elif option == "Sentiment + Score":
54
+ sentiment_text += f"{sentiment} {emoji}: {score}\n"
55
+ return sentiment_text
56
+
57
+ def inference(audio, sentiment_option):
58
+ audio = whisper.load_audio(audio)
59
+ audio = whisper.pad_or_trim(audio)
60
+
61
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
62
+
63
+ _, probs = model.detect_language(mel)
64
+ lang = max(probs, key=probs.get)
65
+
66
+ options = whisper.DecodingOptions(fp16=False)
67
+ result = whisper.decode(model, mel, options)
68
+
69
+ sentiment_results = analyze_sentiment(result.text)
70
+ sentiment_output = display_sentiment_results(sentiment_results, sentiment_option)
71
+
72
+ return lang.upper(), result.text, sentiment_output
73
+
74
+ title = """<h1 align="center">๐ŸŽค Sentiment analysis for Voice Calls ๐Ÿ’ฌ</h1>"""
75
+ image_path = "genai.png"
76
+ description = """ This POC was developed for AI FINTECH HACKATHON @ BHARATPE"""
77
+
78
+ custom_css = """
79
+ #banner-image {
80
+ display: block;
81
+ margin-left: auto;
82
+ margin-right: auto;
83
+ }
84
+ #chat-message {
85
+ font-size: 14px;
86
+ min-height: 300px;
87
+ }
88
+ """
89
+
90
+ block = gr.Blocks(css=custom_css)
91
+
92
+ with block:
93
+ gr.HTML(title)
94
+
95
+ with gr.Row():
96
+ with gr.Column():
97
+ gr.Image(image_path, elem_id="banner-image", show_label=False)
98
+ with gr.Column():
99
+ gr.HTML(description)
100
+
101
+ with gr.Group():
102
+ with gr.Group():
103
+ audio = gr.Audio(
104
+ label="Input Audio",
105
+ show_label=False,
106
+ type="filepath"
107
+ )
108
+
109
+ sentiment_option = gr.Radio(
110
+ choices=["Sentiment Only", "Sentiment + Score"],
111
+ label="Select an option",
112
+ # default="Sentiment Only"
113
+ )
114
+
115
+ btn = gr.Button("Transcribe")
116
+
117
+ lang_str = gr.Textbox(label="Language")
118
+
119
+ text = gr.Textbox(label="Transcription")
120
+
121
+ sentiment_output = gr.Textbox(label="Sentiment Analysis Results",
122
+ # output=True
123
+ )
124
+
125
+ btn.click(inference, inputs=[audio, sentiment_option], outputs=[lang_str, text, sentiment_output])
126
+
127
+
128
+
129
+ block.launch()
genai.png ADDED
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ git+https://github.com/openai/whisper.git
2
+ transformers
3
+ gradio
4
+ torch
5
+ torchaudio
6
+ torchvision