loge-dot commited on
Commit
7a66365
·
1 Parent(s): 9952320

last_version_upload to huggingface

Browse files
__pycache__/emotion_analyzer.cpython-313.pyc ADDED
Binary file (8.74 kB). View file
 
app.py CHANGED
@@ -11,20 +11,20 @@ except RuntimeError:
11
  sys.path.append(str(Path(__file__).parent))
12
 
13
 
14
- from pages import emotion_analyzer # 导入情绪分析页面和 Chatbot 页面
15
-
16
  def main():
17
  st.set_page_config(
18
  page_title="Audio Emotion Recognition System",
19
  page_icon="🎵",
20
  layout="wide"
21
  )
22
-
23
- st.title("Audio Emotion Recognition System")
24
- st.write("This is a web application for audio emotion recognition.")
25
-
26
- # 只测试情绪分析页面
27
- emotion_analyzer.show()
 
 
28
 
29
  if __name__ == "__main__":
30
  main()
 
11
  sys.path.append(str(Path(__file__).parent))
12
 
13
 
 
 
14
  def main():
15
  st.set_page_config(
16
  page_title="Audio Emotion Recognition System",
17
  page_icon="🎵",
18
  layout="wide"
19
  )
20
+ st.sidebar.title("Navigation Bar")
21
+ st.sidebar.markdown("<small>(Chatbot is not available now, update soon...😉)</small>", unsafe_allow_html=True)
22
+ app_mode = st.sidebar.radio("Go to", ["Emotion Analyzer", "Chatbot"])
23
+ if app_mode == "Emotion Analyzer":
24
+ from emotion_analyzer import show # 直接导入模块
25
+ show()
26
+ elif app_mode == "Chatbot":
27
+ st.write("Chatbot is not available now, update soon...😉")
28
 
29
  if __name__ == "__main__":
30
  main()
pages/chatbot.py → chatbot.py RENAMED
@@ -1,97 +1,97 @@
1
- import streamlit as st
2
- import torch
3
- import torchaudio
4
- import json
5
- from openai import AzureOpenAI
6
- from openai.types.beta.threads import Message
7
- from safetensors.torch import load_file
8
- from transformers import AutoTokenizer, Wav2Vec2Processor, BertModel, Wav2Vec2Model
9
- from huggingface_hub import hf_hub_download
10
- from dotenv import load_dotenv
11
- from utils import model_inference
12
- import os
13
-
14
- # 加载环境变量
15
- load_dotenv("Group7/.env")
16
- api_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
17
- api_key = os.getenv("AZURE_OPENAI_API_KEY")
18
- api_version = os.getenv("AZURE_OPENAI_API_VERSION")
19
- api_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT")
20
-
21
- # 初始化 OpenAI 客户端
22
- client = AzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_endpoint)
23
-
24
- # 设定 Chatbot 角色
25
- instruction = (
26
- "You are a psychiatrist talking to a patient who may be depressed. "
27
- "You'll receive their emotional state and conversation text. "
28
- "Your goal is to help them open up and guide them to a positive path. "
29
- "Be friendly, professional, empathetic, and supportive."
30
- )
31
-
32
- # 设定 Chatbot 线程和助手
33
- if "thread" not in st.session_state:
34
- st.session_state.thread = client.beta.threads.create()
35
-
36
- if "assistant" not in st.session_state:
37
- assistant_id = "asst_Sb1W9jVTeL1iyzu6N5MilgA1"
38
- try:
39
- st.session_state.assistant = client.beta.assistants.retrieve(assistant_id=assistant_id)
40
- except:
41
- st.session_state.assistant = client.beta.assistants.create(
42
- name="Depression Chatbot",
43
- instructions=instruction,
44
- model=api_deployment_name,
45
- )
46
-
47
- # 发送消息到 Azure Chatbot
48
- def send_message_to_chatbot(user_input, emotion):
49
- chat_history = client.beta.threads.messages.list(thread_id=st.session_state.thread.id)
50
- messages = [{"role": msg.role, "content": msg.content} for msg in chat_history]
51
-
52
- messages.append({"role": "user", "content": f"Emotion: {emotion}. {user_input}"})
53
-
54
- client.beta.threads.messages.create(
55
- thread_id=st.session_state.thread.id,
56
- role="user",
57
- content=f"Emotion: {emotion}. {user_input}",
58
- )
59
-
60
- run = client.beta.threads.runs.create(
61
- thread_id=st.session_state.thread.id,
62
- assistant_id=st.session_state.assistant.id,
63
- )
64
-
65
- while run.status in ["queued", "in_progress"]:
66
- run = client.beta.threads.runs.retrieve(run.id)
67
-
68
- response_messages = client.beta.threads.messages.list(thread_id=st.session_state.thread.id)
69
- return response_messages[-1].content if response_messages else "No response."
70
-
71
- # Streamlit 界面
72
- st.title("🧠 AI Depression Chatbot")
73
-
74
- if "chat_history" not in st.session_state:
75
- st.session_state.chat_history = []
76
-
77
- # 用户输入
78
- user_input = st.text_input("Enter your message:")
79
- audio_file = st.file_uploader("Upload audio file", type=["wav", "mp3"])
80
-
81
- if st.button("Send"):
82
- if user_input or audio_file:
83
- emotion_probabilities = model_inference.predict_emotion(user_input, audio_file)
84
- dominant_emotion = max(emotion_probabilities, key=emotion_probabilities.get)
85
-
86
- chatbot_response = send_message_to_chatbot(user_input, dominant_emotion)
87
-
88
- # 保存聊天记录
89
- st.session_state.chat_history.append({"role": "user", "content": user_input})
90
- st.session_state.chat_history.append({"role": "assistant", "content": chatbot_response})
91
-
92
- # 显示聊天记录
93
- for chat in st.session_state.chat_history:
94
- st.write(f"**{chat['role'].capitalize()}**: {chat['content']}")
95
-
96
- else:
97
- st.warning("Please enter a message or upload an audio file.")
 
1
+ import streamlit as st
2
+ import torch
3
+ import torchaudio
4
+ import json
5
+ from openai import AzureOpenAI
6
+ from openai.types.beta.threads import Message
7
+ from safetensors.torch import load_file
8
+ from transformers import AutoTokenizer, Wav2Vec2Processor, BertModel, Wav2Vec2Model
9
+ from huggingface_hub import hf_hub_download
10
+ from dotenv import load_dotenv
11
+ from utils import model_inference
12
+ import os
13
+
14
+ # 加载环境变量
15
+ load_dotenv(r"Group7/.env")
16
+ api_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
17
+ api_key = os.getenv("AZURE_OPENAI_API_KEY")
18
+ api_version = os.getenv("AZURE_OPENAI_API_VERSION")
19
+ api_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT")
20
+
21
+ # 初始化 OpenAI 客户端
22
+ client = AzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_endpoint)
23
+
24
+ # 设定 Chatbot 角色
25
+ instruction = (
26
+ "You are a psychiatrist talking to a patient who may be depressed. "
27
+ "You'll receive their emotional state and conversation text. "
28
+ "Your goal is to help them open up and guide them to a positive path. "
29
+ "Be friendly, professional, empathetic, and supportive."
30
+ )
31
+
32
+ # 设定 Chatbot 线程和助手
33
+ if "thread" not in st.session_state:
34
+ st.session_state.thread = client.beta.threads.create()
35
+
36
+ if "assistant" not in st.session_state:
37
+ assistant_id = "asst_Sb1W9jVTeL1iyzu6N5MilgA1"
38
+ try:
39
+ st.session_state.assistant = client.beta.assistants.retrieve(assistant_id=assistant_id)
40
+ except:
41
+ st.session_state.assistant = client.beta.assistants.create(
42
+ name="Depression Chatbot",
43
+ instructions=instruction,
44
+ model=api_deployment_name,
45
+ )
46
+
47
+ # 发送消息到 Azure Chatbot
48
+ def send_message_to_chatbot(user_input, emotion):
49
+ chat_history = client.beta.threads.messages.list(thread_id=st.session_state.thread.id)
50
+ messages = [{"role": msg.role, "content": msg.content} for msg in chat_history]
51
+
52
+ messages.append({"role": "user", "content": f"Emotion: {emotion}. {user_input}"})
53
+
54
+ client.beta.threads.messages.create(
55
+ thread_id=st.session_state.thread.id,
56
+ role="user",
57
+ content=f"Emotion: {emotion}. {user_input}",
58
+ )
59
+
60
+ run = client.beta.threads.runs.create(
61
+ thread_id=st.session_state.thread.id,
62
+ assistant_id=st.session_state.assistant.id,
63
+ )
64
+
65
+ while run.status in ["queued", "in_progress"]:
66
+ run = client.beta.threads.runs.retrieve(run.id)
67
+
68
+ response_messages = client.beta.threads.messages.list(thread_id=st.session_state.thread.id)
69
+ return response_messages[-1].content if response_messages else "No response."
70
+
71
+ # Streamlit 界面
72
+ st.title("🧠 AI Depression Chatbot")
73
+
74
+ if "chat_history" not in st.session_state:
75
+ st.session_state.chat_history = []
76
+
77
+ # 用户输入
78
+ user_input = st.text_input("Enter your message:")
79
+ audio_file = st.file_uploader("Upload audio file", type=["wav", "mp3"])
80
+
81
+ if st.button("Send"):
82
+ if user_input or audio_file:
83
+ emotion_probabilities = model_inference.predict_emotion(user_input, audio_file)
84
+ dominant_emotion = max(emotion_probabilities, key=emotion_probabilities.get)
85
+
86
+ chatbot_response = send_message_to_chatbot(user_input, dominant_emotion)
87
+
88
+ # 保存聊天记录
89
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
90
+ st.session_state.chat_history.append({"role": "assistant", "content": chatbot_response})
91
+
92
+ # 显示聊天记录
93
+ for chat in st.session_state.chat_history:
94
+ st.write(f"**{chat['role'].capitalize()}**: {chat['content']}")
95
+
96
+ else:
97
+ st.warning("Please enter a message or upload an audio file.")
components/__pycache__/visualizations.cpython-313.pyc CHANGED
Binary files a/components/__pycache__/visualizations.cpython-313.pyc and b/components/__pycache__/visualizations.cpython-313.pyc differ
 
components/visualizations.py CHANGED
@@ -27,11 +27,9 @@ def plot_emotion_distribution(emotion_dict):
27
  polar=dict(
28
  radialaxis=dict(
29
  visible=True,
30
- range=[0, 0.3], # 设置范围
31
- tickvals=[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3], # 设置刻度
32
- ticktext=['0%', '5%', '10%', '15%', '20%', '25%', '30%'] # 设置刻度标签
33
  )),
34
  showlegend=False
35
  )
36
 
37
- st.plotly_chart(fig, use_container_width=True)
 
27
  polar=dict(
28
  radialaxis=dict(
29
  visible=True,
30
+ range=[0, 1] # 设置范围
 
 
31
  )),
32
  showlegend=False
33
  )
34
 
35
+ st.plotly_chart(fig, use_container_width=True)
emotion_analyzer.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from components.visualizations import plot_emotion_distribution
3
+ from utils import model_inference
4
+ from components.audio_player import play_audio
5
+ from components.debug_tools import DebugTools
6
+ import json
7
+ import os
8
+ import time
9
+
10
+ # ✅ 初始化 session_state 以存储分析结果和反馈状态
11
+ if "emotion_result" not in st.session_state:
12
+ st.session_state.emotion_result = None
13
+ if "feedback_submitted" not in st.session_state:
14
+ st.session_state.feedback_submitted = False
15
+ if "user_feedback" not in st.session_state:
16
+ st.session_state.user_feedback = ""
17
+
18
+ def show_history():
19
+ """📜 显示历史记录(最近 5 条)"""
20
+ history_file = "history\history.json"
21
+
22
+ if os.path.exists(history_file):
23
+ with open(history_file, 'r') as f:
24
+ history = json.load(f)
25
+
26
+ if history:
27
+ st.subheader("📜 History")
28
+ with st.expander("Click to view past records", expanded=False):
29
+ for record in history[-5:]: # 只显示最近 5 条
30
+ st.write(f"🎧 **Audio File**: {record['audio_file']}")
31
+ st.write(f"📝 **Transcript**: {record['transcript']}")
32
+ st.write(f"🎭 **Emotions**: {record['emotions']}")
33
+ st.write("---")
34
+ else:
35
+ st.write("🚫 No history records found.")
36
+ else:
37
+ st.write("🚫 No history file.")
38
+
39
+ def show():
40
+ st.markdown(
41
+ "<h1 style='text-align: center; color: #FF5733;'>😉 Emotion Analyzer 🎭</h1>",
42
+ unsafe_allow_html=True
43
+ )
44
+
45
+ # 📜 显示历史记录
46
+ show_history()
47
+
48
+ # 🛠️ 初始化调试工具
49
+ debug = DebugTools()
50
+ debug.show_debug_info()
51
+
52
+ # 🎤 让用户上传音频和文本输入
53
+ st.markdown("---")
54
+ col1, col2 = st.columns(2)
55
+ with col1:
56
+ audio_file = st.file_uploader("Upload your audio file", type=['wav', 'mp3'])
57
+ # 🎧 如果音频存在,则显示音频信息
58
+ if audio_file is not None:
59
+ debug.show_audio_info(audio_file)
60
+ st.audio(audio_file, format="audio/mp3")
61
+ with col2:
62
+ text_input = st.text_input("Enter your text input")
63
+ transcript = text_input # 默认情况下,文本输入即为转写结果
64
+ st.write("📄 **Audio transcript:**", transcript)
65
+
66
+ # 🎯 **如果用户删除了音频,则自动重置情绪分析结果**
67
+ if audio_file is None and "emotion_result" in st.session_state and st.session_state.emotion_result is not None:
68
+ st.warning("🚫 Audio file removed. Resetting analysis results.")
69
+ st.session_state.emotion_result = None
70
+
71
+ # # 🎧 如果音频存在,则显示音频信息
72
+ # if audio_file is not None:
73
+ # debug.show_audio_info(audio_file)
74
+ # st.audio(audio_file, format="audio/mp3")
75
+
76
+ if audio_file is not None and text_input:
77
+ if st.button("Analyse Your Emotion! 😊"):
78
+ st.session_state.feedback_submitted = False # 重置反馈状态
79
+
80
+ # 📊 显示进度条
81
+ progress_bar = st.progress(0)
82
+ for percent in range(0, 101, 20):
83
+ progress_bar.progress(percent)
84
+ time.sleep(0.2) # 提高加载体验
85
+
86
+ try:
87
+ transcript = text_input
88
+
89
+ # 🔍 执行情绪分析
90
+ with st.spinner("Analysing emotion..."):
91
+ for percent in range(30, 101, 20):
92
+ progress_bar.progress(percent)
93
+ time.sleep(0.2) # 避免过快闪过
94
+ emotions = model_inference.predict_emotion(text_input, audio_file)
95
+
96
+ if emotions:
97
+ best_emotion = max(emotions, key=lambda x: float(emotions[x].strip('%')))
98
+ # 🎭 显示情绪分析结果
99
+ st.markdown("---")
100
+ st.markdown("### 🎭 Emotion Analysis Results")
101
+ st.success(f"🎭 **Predicted Emotion:** {best_emotion}")
102
+ st.success(f"📊 **Emotion Probabilities:** {emotions}")
103
+
104
+ # ✅ 存储分析结果,防止页面刷新导致结果丢失
105
+ st.session_state.emotion_result = {
106
+ "best_emotion": best_emotion,
107
+ "emotions": emotions,
108
+ "transcript": transcript
109
+ }
110
+
111
+ except Exception as e:
112
+ debug.log_error(e, context=f"Processing file: {audio_file.name}")
113
+
114
+ # 🎭 只有当有分析结果时,才显示结果
115
+ if st.session_state.emotion_result:
116
+ best_emotion = st.session_state.emotion_result["best_emotion"]
117
+ emotions = st.session_state.emotion_result["emotions"]
118
+
119
+ # 🎈 Streamlit 内置动画(仅用于 "Joy" 和 "Sad")
120
+ if best_emotion == "Joy":
121
+ st.balloons()
122
+ st.markdown("**Yay! You seem so happy! 🎉😊**")
123
+ st.write("Keep smiling, it's contagious! 🌟")
124
+ elif best_emotion == "Sad":
125
+ st.snow()
126
+ st.markdown("**Oh no, you seem a bit down... 😔💭**")
127
+ st.write("I'm here if you need to talk or cheer up! 💬💖")
128
+ else:
129
+ emotion_messages = {
130
+ "Angry": ["😡 **Whoa! Looks like you're really angry...**", "Take a deep breath, let's cool off together! 🧘‍♂️"],
131
+ "Surprised": ["🤯 **Wow, you're really surprised!**", "That's a big reaction! Hope it's a good surprise! 🎉"],
132
+ "Fearful": ["😨 **Looks like you're feeling scared...**", "It's okay to be scared. I'm here to help! 💪"],
133
+ "Disgusted": ["🤢 **Yikes, you're feeling disgusted...**", "That doesn't sound pleasant. Let's turn it around! 🌈"],
134
+ "Neutral": ["🤔 **Hmm, your emotions seem mixed...**", "Can you tell me more? Let's figure this out together! 💬"]
135
+ }
136
+ if best_emotion in emotion_messages:
137
+ st.markdown(emotion_messages[best_emotion][0])
138
+ st.write(emotion_messages[best_emotion][1])
139
+
140
+ # 📊 显示情绪分布图
141
+ plot_emotion_distribution(emotions)
142
+
143
+ # 💾 保存历史记录
144
+ if audio_file is not None:
145
+ model_inference.save_history(audio_file, transcript, emotions)
146
+ else:
147
+ st.warning("⚠️ No audio file uploaded. History not saved.")
148
+
149
+ # 📣 提供反馈功能
150
+ if st.session_state.emotion_result:
151
+ st.subheader("📣 Give us your feedback!")
152
+ feedback = st.radio("Do you agree with the prediction?", ["😀 Great!", "😐 Okay", "😡 Wrong"], index=1)
153
+ user_feedback = st.text_area("Tell us more (optional):", st.session_state.user_feedback)
154
+
155
+ if st.button("Submit Feedback"):
156
+ st.session_state.user_feedback = user_feedback
157
+ st.session_state.feedback_submitted = True
158
+
159
+ if st.session_state.feedback_submitted:
160
+ st.success("✅ Thank you for your feedback! It has been recorded.")
161
+
162
+
history/history.json DELETED
@@ -1,142 +0,0 @@
1
- [
2
- {
3
- "audio_file": "test.wav",
4
- "transcript": "please go now and make sure they stay from away, come back as soon as you can",
5
- "emotions": {
6
- "Neutral": "11.89%",
7
- "Joy": "16.05%",
8
- "Sad": "14.53%",
9
- "Angry": "14.88%",
10
- "Surprised": "14.02%",
11
- "Fearful": "14.50%",
12
- "Disgusted": "14.13%"
13
- },
14
- "probabilities": null
15
- },
16
- {
17
- "audio_file": "test.wav",
18
- "transcript": "please go now and make sure they stay from away, come back as soon as you can",
19
- "emotions": {
20
- "Neutral": "11.89%",
21
- "Joy": "16.05%",
22
- "Sad": "14.53%",
23
- "Angry": "14.88%",
24
- "Surprised": "14.02%",
25
- "Fearful": "14.50%",
26
- "Disgusted": "14.13%"
27
- },
28
- "probabilities": null
29
- },
30
- {
31
- "audio_file": "test.wav",
32
- "transcript": "please go now and make sure they stay from away, come back as soon as you can",
33
- "emotions": {
34
- "Neutral": "11.89%",
35
- "Joy": "16.05%",
36
- "Sad": "14.53%",
37
- "Angry": "14.88%",
38
- "Surprised": "14.02%",
39
- "Fearful": "14.50%",
40
- "Disgusted": "14.13%"
41
- },
42
- "probabilities": null
43
- },
44
- {
45
- "audio_file": "test.wav",
46
- "transcript": "please go now and make sure they stay from away, come back as soon as you can",
47
- "emotions": {
48
- "Neutral": "11.89%",
49
- "Joy": "16.05%",
50
- "Sad": "14.53%",
51
- "Angry": "14.88%",
52
- "Surprised": "14.02%",
53
- "Fearful": "14.50%",
54
- "Disgusted": "14.13%"
55
- },
56
- "probabilities": null
57
- },
58
- {
59
- "audio_file": "test.wav",
60
- "transcript": "please go now and make sure they stay from away, come back as soon as you can",
61
- "emotions": {
62
- "Neutral": "11.89%",
63
- "Joy": "16.05%",
64
- "Sad": "14.53%",
65
- "Angry": "14.88%",
66
- "Surprised": "14.02%",
67
- "Fearful": "14.50%",
68
- "Disgusted": "14.13%"
69
- },
70
- "probabilities": null
71
- },
72
- {
73
- "audio_file": "test.wav",
74
- "transcript": "Please go now, and make sure to stay safe on the way. Come back as soon as you can.",
75
- "emotions": {
76
- "Neutral": "11.93%",
77
- "Joy": "16.90%",
78
- "Sad": "14.75%",
79
- "Angry": "14.36%",
80
- "Surprised": "14.24%",
81
- "Fearful": "14.24%",
82
- "Disgusted": "13.57%"
83
- },
84
- "probabilities": null
85
- },
86
- {
87
- "audio_file": "test.wav",
88
- "transcript": "Please go now, and make sure to stay safe on the way. Come back as soon as you can.",
89
- "emotions": {
90
- "Neutral": "54.73%",
91
- "Joy": "5.80%",
92
- "Sad": "29.00%",
93
- "Angry": "2.24%",
94
- "Surprised": "0.51%",
95
- "Fearful": "6.93%",
96
- "Disgusted": "0.78%"
97
- },
98
- "probabilities": null
99
- },
100
- {
101
- "audio_file": "en-US-DavisNeural_sad_2x(1).wav",
102
- "transcript": "I am fine. Just thinking about some things, that's all.",
103
- "emotions": {
104
- "Neutral": "12.95%",
105
- "Joy": "42.23%",
106
- "Sad": "41.40%",
107
- "Angry": "0.25%",
108
- "Surprised": "0.75%",
109
- "Fearful": "2.24%",
110
- "Disgusted": "0.19%"
111
- },
112
- "probabilities": null
113
- },
114
- {
115
- "audio_file": "en-US-NancyNeural_sad_2x(1).wav",
116
- "transcript": "I am fine. Just thinking about some things, that's all.",
117
- "emotions": {
118
- "Neutral": "13.23%",
119
- "Joy": "43.65%",
120
- "Sad": "39.48%",
121
- "Angry": "0.28%",
122
- "Surprised": "0.79%",
123
- "Fearful": "2.35%",
124
- "Disgusted": "0.22%"
125
- },
126
- "probabilities": null
127
- },
128
- {
129
- "audio_file": "301_26.wav",
130
- "transcript": "stayed in school you know not let the accident just distract me and not i wouldve remained in school i probably wouldnt have had kids early you know um",
131
- "emotions": {
132
- "Neutral": "16.10%",
133
- "Joy": "0.46%",
134
- "Sad": "81.54%",
135
- "Angry": "0.30%",
136
- "Surprised": "0.18%",
137
- "Fearful": "1.24%",
138
- "Disgusted": "0.17%"
139
- },
140
- "probabilities": null
141
- }
142
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pages/__init__.py DELETED
@@ -1 +0,0 @@
1
- # 空文件,使pages成为一个Python包
 
 
pages/__pycache__/__init__.cpython-313.pyc DELETED
Binary file (144 Bytes)
 
pages/__pycache__/chatbot.cpython-313.pyc DELETED
Binary file (5.51 kB)
 
pages/__pycache__/emotion_analyzer.cpython-313.pyc DELETED
Binary file (3.53 kB)
 
pages/emotion_analyzer.py DELETED
@@ -1,80 +0,0 @@
1
- import streamlit as st
2
- from components.visualizations import plot_emotion_distribution
3
- from utils import model_inference
4
- from components.audio_player import play_audio
5
- from components.debug_tools import DebugTools
6
- import json
7
- import os
8
-
9
- def show_history():
10
- """显示历史记录"""
11
- history_file = "history.json"
12
-
13
- if os.path.exists(history_file):
14
- with open(history_file, 'r') as f:
15
- history = json.load(f)
16
-
17
- if history:
18
- st.subheader("History")
19
- for record in history:
20
- st.write(f"Audio File: {record['audio_file']}")
21
- st.write(f"Transcript: {record['transcript']}")
22
- st.write(f"Emotions: {record['emotions']}")
23
- st.write(f"Probabilities: {record['probabilities']}")
24
- st.write("---")
25
- else:
26
- st.write("No history records.")
27
- else:
28
- st.write("No history file.")
29
-
30
- def show():
31
- st.header("Emotion Analyzer")
32
-
33
- # 显示历史记录
34
- show_history()
35
-
36
- # 初始化调试工具
37
- debug = DebugTools()
38
-
39
- # 显示系统调试信息
40
- debug.show_debug_info()
41
-
42
- # 文件上传
43
- audio_file = st.file_uploader("Upload audio file", type=['wav', 'mp3'])
44
- text_input = st.text_input("Enter text input")
45
-
46
- if audio_file is not None and text_input:
47
- # 显示音频文件信息
48
- debug.show_audio_info(audio_file)
49
-
50
- # 使用audio_player组件
51
- play_audio(audio_file)
52
-
53
- if st.button("Analyse Your Emotion!😊"):
54
- # 显示进度条
55
- progress_bar = st.progress(0)
56
-
57
- try:
58
- # 直接使用用户输入的文本作为转写
59
- transcript = text_input
60
- st.write("Audio transcript:", transcript)
61
-
62
- # 2. 情绪分析
63
- with st.spinner("Analysing emotion..."):
64
- progress_bar.progress(30)
65
- emotions = model_inference.predict_emotion(text_input, audio_file)
66
-
67
- # 3. 显示结果
68
- progress_bar.progress(30)
69
-
70
- # 显示预测结果
71
- st.success(f"Predict: {emotions}")
72
-
73
- # 显示情绪概率分布图
74
- plot_emotion_distribution(emotions)
75
-
76
- # 保存历史记录
77
- model_inference.save_history(audio_file, transcript, emotions, None) # 这里可以根据需要调整
78
-
79
- except Exception as e:
80
- debug.log_error(e, context=f"Processing file: {audio_file.name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils/__pycache__/model_inference.cpython-313.pyc CHANGED
Binary files a/utils/__pycache__/model_inference.cpython-313.pyc and b/utils/__pycache__/model_inference.cpython-313.pyc differ
 
utils/model_inference.py CHANGED
@@ -71,6 +71,7 @@ class MultimodalClassifier(nn.Module):
71
  audio_features = audio_outputs.hidden_states[-1][:, 0, :]
72
 
73
  # **拼接特征**
 
74
  combined_features = torch.cat((text_features, audio_features), dim=-1)
75
 
76
  # **分类**
@@ -98,13 +99,11 @@ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
98
 
99
  def preprocess_text(text):
100
  text_inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
101
- print(text_inputs)
102
  return text_inputs.to(device)
103
 
104
  def preprocess_audio(audio_path):
105
  waveform, sample_rate = torchaudio.load(audio_path)
106
  waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform)
107
- print(waveform)
108
  return waveform.to(device)
109
 
110
  labels = ["Neutral", "Joy", "Sad", "Angry", "Surprised", "Fearful", "Disgusted"]
@@ -123,7 +122,7 @@ def generate_transcript(audio_file):
123
  """生成音频的文字转写"""
124
  return audio_file.name # 直接返回音频文件的名称
125
 
126
- def save_history(audio_file, transcript, emotions, probabilities):
127
  """保存分析历史记录到文件"""
128
  history_file = r"history/history.json"
129
 
@@ -138,7 +137,6 @@ def save_history(audio_file, transcript, emotions, probabilities):
138
  "audio_file": audio_file.name,
139
  "transcript": transcript,
140
  "emotions": emotions,
141
- "probabilities": probabilities
142
  })
143
 
144
  with open(history_file, 'w') as f:
 
71
  audio_features = audio_outputs.hidden_states[-1][:, 0, :]
72
 
73
  # **拼接特征**
74
+
75
  combined_features = torch.cat((text_features, audio_features), dim=-1)
76
 
77
  # **分类**
 
99
 
100
  def preprocess_text(text):
101
  text_inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
 
102
  return text_inputs.to(device)
103
 
104
  def preprocess_audio(audio_path):
105
  waveform, sample_rate = torchaudio.load(audio_path)
106
  waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform)
 
107
  return waveform.to(device)
108
 
109
  labels = ["Neutral", "Joy", "Sad", "Angry", "Surprised", "Fearful", "Disgusted"]
 
122
  """生成音频的文字转写"""
123
  return audio_file.name # 直接返回音频文件的名称
124
 
125
+ def save_history(audio_file, transcript, emotions):
126
  """保存分析历史记录到文件"""
127
  history_file = r"history/history.json"
128
 
 
137
  "audio_file": audio_file.name,
138
  "transcript": transcript,
139
  "emotions": emotions,
 
140
  })
141
 
142
  with open(history_file, 'w') as f: