Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,48 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
|
4 |
-
#
|
5 |
-
st.set_page_config(page_title="
|
6 |
-
st.title("
|
7 |
st.markdown("""
|
8 |
-
|
9 |
-
此模型主要訓練於英文情感分類任務,可初步判斷文本是否帶有正向或負面情緒。
|
10 |
|
11 |
-
|
|
|
|
|
|
|
|
|
12 |
""")
|
13 |
|
14 |
-
#
|
15 |
examples = [
|
16 |
-
"
|
17 |
-
"
|
18 |
-
"
|
19 |
-
"
|
20 |
-
"
|
21 |
-
"
|
22 |
]
|
23 |
|
24 |
-
selected_example = st.selectbox("
|
25 |
-
text = st.text_area("
|
26 |
|
27 |
-
if st.button("
|
28 |
-
with st.spinner("
|
29 |
-
classifier = pipeline("
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
39 |
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
|
4 |
+
# Page setup
|
5 |
+
st.set_page_config(page_title="Hate Speech Detector", page_icon="🚨", layout="centered")
|
6 |
+
st.title("🚨 Hate Speech Classification Demo")
|
7 |
st.markdown("""
|
8 |
+
This app uses the fine-tuned model `JenniferHJF/qwen1.5-emoji-finetuned` to predict whether the given input text contains **offensive or hateful content**.
|
|
|
9 |
|
10 |
+
The model returns:
|
11 |
+
- `1` if the content is offensive
|
12 |
+
- `0` if it is not offensive
|
13 |
+
|
14 |
+
⚠️ Note: This is a demo and the model may not be perfect in detecting nuanced or implicit hate speech.
|
15 |
""")
|
16 |
|
17 |
+
# Example inputs
|
18 |
examples = [
|
19 |
+
"You're a disgrace to this country.",
|
20 |
+
"Hope you have a great day!",
|
21 |
+
"Why are you even alive?",
|
22 |
+
"That was really rude and uncalled for.",
|
23 |
+
"You are amazing and smart!",
|
24 |
+
"Get lost, nobody wants you here."
|
25 |
]
|
26 |
|
27 |
+
selected_example = st.selectbox("📘 Choose an example sentence:", options=examples, index=0)
|
28 |
+
text = st.text_area("📝 Or enter your own text below:", value=selected_example, height=150)
|
29 |
|
30 |
+
if st.button("🚀 Analyze"):
|
31 |
+
with st.spinner("Running model inference..."):
|
32 |
+
classifier = pipeline("text-generation", model="JenniferHJF/qwen1.5-emoji-finetuned", max_new_tokens=20)
|
33 |
+
output = classifier(f"""Please determine whether the following text is offensive.
|
34 |
+
Reply with '1' for offensive, '0' for non-offensive.
|
35 |
+
Text: {text}
|
36 |
+
""")[0]["generated_text"]
|
37 |
|
38 |
+
# Extract the last '0' or '1' from output
|
39 |
+
prediction = "Unknown"
|
40 |
+
if "1" in output.strip().splitlines()[-1]:
|
41 |
+
prediction = "Offensive (1)"
|
42 |
+
elif "0" in output.strip().splitlines()[-1]:
|
43 |
+
prediction = "Non-Offensive (0)"
|
44 |
|
45 |
+
st.markdown(f"### ✅ Prediction: `{prediction}`")
|
46 |
+
st.code(output.strip(), language="text")
|
47 |
+
else:
|
48 |
+
st.info("👈 Enter text and click 'Analyze' to begin.")
|