Spaces:
Sleeping
Sleeping
picard.tseng
commited on
Commit
·
ffc96c8
1
Parent(s):
18a3ddb
first commit
Browse files- app.py +141 -0
- requirements.txt +3 -0
app.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import openai
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import outlines
|
| 6 |
+
# =============== 基本設定 ===============
|
| 7 |
+
#openai.api_key = st.secrets["OPENAI_API_KEY"] # 建議放在 .streamlit/secrets.toml 中
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def choose_api_key(model_id):
|
| 11 |
+
api_key = os.environ["TOGETHER_API_KEY"]
|
| 12 |
+
if( ('o3-' or 'gpt-') in model_id):
|
| 13 |
+
api_key = os.environ["CHATGPT_API_KEY"]
|
| 14 |
+
print(f'api_key:{api_key}')
|
| 15 |
+
return api_key
|
| 16 |
+
|
| 17 |
+
# =============== 呼叫 LLM 摘要函數 ===============
|
| 18 |
+
def call_llm(model_id, content):
|
| 19 |
+
api_key = choose_api_key(model_id)
|
| 20 |
+
client = openai.OpenAI(api_key=api_key,base_url="https://api.together.xyz/v1")
|
| 21 |
+
|
| 22 |
+
response = client.chat.completions.create(
|
| 23 |
+
model = model_id,
|
| 24 |
+
messages=[{"role": "user", "content": content}],)
|
| 25 |
+
return response.choices[0].message.content
|
| 26 |
+
'''
|
| 27 |
+
|
| 28 |
+
response = openai.ChatCompletion.create(
|
| 29 |
+
#model=model_engine,
|
| 30 |
+
messages=[{"role": "user", "content": content}],
|
| 31 |
+
temperature=0.3
|
| 32 |
+
)'''
|
| 33 |
+
return response['choices'][0]['message']['content']
|
| 34 |
+
|
| 35 |
+
# =============== Streamlit 介面 ===============
|
| 36 |
+
st.title("📑 會議紀錄摘要與評估系統")
|
| 37 |
+
|
| 38 |
+
# 1️⃣ 上傳會議紀錄文字檔
|
| 39 |
+
uploaded_file = st.file_uploader("請上傳會議紀錄文字檔 (.txt)", type="txt")
|
| 40 |
+
|
| 41 |
+
# 2️⃣ 選擇摘要用的 LLM 模型
|
| 42 |
+
llm_models = [ "Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 43 |
+
"Qwen/Qwen2-VL-72B-Instruct",
|
| 44 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
| 45 |
+
"gpt-4o",
|
| 46 |
+
"o3-mini",
|
| 47 |
+
"gpt-3.5-turbo"
|
| 48 |
+
]
|
| 49 |
+
selected_summary_model = st.selectbox("選擇用來摘要的 LLM 模型", llm_models)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# 3️⃣ 按鈕 → 產生摘要
|
| 53 |
+
if uploaded_file and st.button("🚀 產生會議摘要"):
|
| 54 |
+
meeting_text = uploaded_file.read().decode("utf-8")
|
| 55 |
+
if(meeting_text):
|
| 56 |
+
#summary_task = "請將以下會議紀錄內容進行摘要,並以條列式清楚表達重點:"
|
| 57 |
+
prompt_summary = f'''
|
| 58 |
+
You are a helpful assistant specialized in summarizing business meeting transcripts. Given a detailed company meeting transcript, your task is to generate a well-structured summary that includes the following:
|
| 59 |
+
(1)Meeting Outline
|
| 60 |
+
A high-level bullet-point outline of the meeting topics discussed, in the order they appeared.
|
| 61 |
+
(2)Speaker Contributions
|
| 62 |
+
A section listing each participant’s key opinions, statements, and concerns raised during the meeting.
|
| 63 |
+
(3)Key Conclusions and Action Items
|
| 64 |
+
A clear list of the main conclusions reached by the team and the action plans assigned to each person or department.
|
| 65 |
+
|
| 66 |
+
The summary should be concise, clear, and organized, avoiding unnecessary repetition while preserving essential details. Your summary should be written in traditional Chinese.
|
| 67 |
+
|
| 68 |
+
====
|
| 69 |
+
Following is the meeting transcripts:
|
| 70 |
+
{meeting_text}
|
| 71 |
+
'''
|
| 72 |
+
|
| 73 |
+
#cprint(prompt,"yellow")
|
| 74 |
+
summary_result = call_llm(selected_summary_model, meeting_text)
|
| 75 |
+
|
| 76 |
+
# 存檔
|
| 77 |
+
summary_filename = f"summary_{selected_summary_model}.txt"
|
| 78 |
+
with open(summary_filename, "w", encoding="utf-8") as f:
|
| 79 |
+
f.write(summary_result)
|
| 80 |
+
|
| 81 |
+
st.success(f"✅ 已完成摘要,檔名:{summary_filename}")
|
| 82 |
+
st.text_area("📋 產生的會議摘要", summary_result, height=300)
|
| 83 |
+
|
| 84 |
+
# 保留原文與摘要供評估用
|
| 85 |
+
st.session_state['meeting_text'] = meeting_text
|
| 86 |
+
st.session_state['summary_result'] = summary_result
|
| 87 |
+
|
| 88 |
+
# 4️⃣ 選擇評價用的 LLM 模型
|
| 89 |
+
if 'summary_result' in st.session_state:
|
| 90 |
+
selected_eval_model = st.selectbox("選擇用來評價的 LLM 模型", llm_models)
|
| 91 |
+
|
| 92 |
+
# 5️⃣ 按鈕 → 產生評價
|
| 93 |
+
if st.button("📝 產生摘要評價"):
|
| 94 |
+
#eval_task = "請根據提供的會議紀錄原文與產生的摘要,評估該摘要的完整性、準確性、表達清晰度,並給予建議。"
|
| 95 |
+
|
| 96 |
+
#combined_content = f"【會議紀錄原文】\n{st.session_state['meeting_text']}\n\n【會議摘要】\n{st.session_state['summary_result']}"
|
| 97 |
+
|
| 98 |
+
prompt_evaluation = f'''
|
| 99 |
+
You are a professional meeting summary reviewer. Please evaluate the following meeting summary based on these three criteria:
|
| 100 |
+
|
| 101 |
+
1. Coverage Completeness: Does the summary cover all the key points discussed in the meeting?
|
| 102 |
+
2. Accuracy: Is the summary faithful to the original meeting content, without adding, omitting, or distorting information?
|
| 103 |
+
3. Clarity and Conciseness: Is the summary clear, well-structured, and avoids unnecessary repetition or irrelevant content?
|
| 104 |
+
|
| 105 |
+
Provide a score from 1 to 5 for each item.
|
| 106 |
+
Then, calculate a total score (maximum 15 points).
|
| 107 |
+
Finally, write a brief comment about the overall quality.
|
| 108 |
+
|
| 109 |
+
====
|
| 110 |
+
【會議原文】
|
| 111 |
+
{st.session_state['meeting_text']}
|
| 112 |
+
|
| 113 |
+
【會議摘要】
|
| 114 |
+
{st.session_state['summary_result']}
|
| 115 |
+
|
| 116 |
+
====
|
| 117 |
+
Please reply in the following format in Traditional Chinese:
|
| 118 |
+
|
| 119 |
+
完整性:X/5
|
| 120 |
+
準確性:X/5
|
| 121 |
+
簡潔性:X/5
|
| 122 |
+
總分:X/15
|
| 123 |
+
評語:XXXXX
|
| 124 |
+
'''
|
| 125 |
+
|
| 126 |
+
evaluation_result = call_llm(selected_eval_model, prompt_evaluation)
|
| 127 |
+
eval_filename = ""
|
| 128 |
+
# 存檔
|
| 129 |
+
'''
|
| 130 |
+
if(len(selected_eval_model.split('/')) > 1):
|
| 131 |
+
eval_filename = selected_eval_model.split('/')[1]
|
| 132 |
+
else:
|
| 133 |
+
eval_filename = selected_eval_model
|
| 134 |
+
'''
|
| 135 |
+
eval_filename = f"evaluation_{eval_filename}.txt"
|
| 136 |
+
with open(eval_filename, "w", encoding="utf-8") as f:
|
| 137 |
+
f.write(evaluation_result)
|
| 138 |
+
|
| 139 |
+
st.success(f"✅ 已完成評價,檔名:{eval_filename}")
|
| 140 |
+
st.text_area("📖 評估結果", evaluation_result, height=300)
|
| 141 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
openai
|
| 3 |
+
outlines
|