JenniferHJF commited on
Commit
4eeb268
·
verified ·
1 Parent(s): a99fdaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -26
app.py CHANGED
@@ -1,39 +1,48 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
 
4
- # 頁面設定
5
- st.set_page_config(page_title="輕量級文本分類器", page_icon="📊", layout="centered")
6
- st.title("🧠 輕量級 AI 文本判斷器")
7
  st.markdown("""
8
- 使用開源模型 `distilbert-base-uncased-finetuned-sst-2-english` 進行快速文本分類。
9
- 此模型主要訓練於英文情感分類任務,可初步判斷文本是否帶有正向或負面情緒。
10
 
11
- > ⚠️ 注意:模型非專門用於仇恨語言檢測,僅用作演示用途。
 
 
 
 
12
  """)
13
 
14
- # 示例句子
15
  examples = [
16
- "I really love how inclusive this community is!",
17
- "This is terrible and offensive.",
18
- "The movie was okay, not great.",
19
- "I hate everything about this.",
20
- "What a lovely message!",
21
- "You are stupid and shouldn't exist."
22
  ]
23
 
24
- selected_example = st.selectbox("📚 選擇範例句子", options=examples, index=0)
25
- text = st.text_area("✍️ 或自行輸入文本:", value=selected_example, height=150)
26
 
27
- if st.button("🚦 開始分析"):
28
- with st.spinner("模型正在分析中..."):
29
- classifier = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
30
- result = classifier(text)[0]
31
- label = result['label']
32
- score = result['score']
 
33
 
34
- st.markdown(f"### 🔍 預測結果:`{label}`")
35
- st.progress(score)
36
- st.write(f"模型信心指數:**{score:.2%}**")
37
- else:
38
- st.info("👈 請輸入內容,然後點擊「開始分析」")
 
39
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import pipeline
3
 
4
+ # Page setup
5
+ st.set_page_config(page_title="Hate Speech Detector", page_icon="🚨", layout="centered")
6
+ st.title("🚨 Hate Speech Classification Demo")
7
  st.markdown("""
8
+ This app uses the fine-tuned model `JenniferHJF/qwen1.5-emoji-finetuned` to predict whether the given input text contains **offensive or hateful content**.
 
9
 
10
+ The model returns:
11
+ - `1` if the content is offensive
12
+ - `0` if it is not offensive
13
+
14
+ ⚠️ Note: This is a demo and the model may not be perfect in detecting nuanced or implicit hate speech.
15
  """)
16
 
17
+ # Example inputs
18
  examples = [
19
+ "You're a disgrace to this country.",
20
+ "Hope you have a great day!",
21
+ "Why are you even alive?",
22
+ "That was really rude and uncalled for.",
23
+ "You are amazing and smart!",
24
+ "Get lost, nobody wants you here."
25
  ]
26
 
27
+ selected_example = st.selectbox("📘 Choose an example sentence:", options=examples, index=0)
28
+ text = st.text_area("📝 Or enter your own text below:", value=selected_example, height=150)
29
 
30
+ if st.button("🚀 Analyze"):
31
+ with st.spinner("Running model inference..."):
32
+ classifier = pipeline("text-generation", model="JenniferHJF/qwen1.5-emoji-finetuned", max_new_tokens=20)
33
+ output = classifier(f"""Please determine whether the following text is offensive.
34
+ Reply with '1' for offensive, '0' for non-offensive.
35
+ Text: {text}
36
+ """)[0]["generated_text"]
37
 
38
+ # Extract the last '0' or '1' from output
39
+ prediction = "Unknown"
40
+ if "1" in output.strip().splitlines()[-1]:
41
+ prediction = "Offensive (1)"
42
+ elif "0" in output.strip().splitlines()[-1]:
43
+ prediction = "Non-Offensive (0)"
44
 
45
+ st.markdown(f"### ✅ Prediction: `{prediction}`")
46
+ st.code(output.strip(), language="text")
47
+ else:
48
+ st.info("👈 Enter text and click 'Analyze' to begin.")