Update app.py
Browse files
app.py
CHANGED
@@ -6,58 +6,64 @@ import gradio as gr
|
|
6 |
dataset = load_dataset("Koushim/processed-jigsaw-toxic-comments", split="train", streaming=True)
|
7 |
|
8 |
# Sample examples
|
9 |
-
|
10 |
for example in dataset:
|
11 |
score = example['toxicity']
|
12 |
text = example['text']
|
13 |
-
if score < 0.3 and len(
|
14 |
-
|
15 |
-
elif 0.3 <= score < 0.7 and len(
|
16 |
-
|
17 |
-
elif score >= 0.7 and len(
|
18 |
-
|
19 |
-
if len(
|
20 |
break
|
21 |
|
22 |
examples_html = f"""
|
23 |
-
###
|
24 |
-
|
25 |
-
####
|
26 |
-
- {
|
27 |
-
- {
|
28 |
-
- {
|
29 |
-
|
30 |
-
####
|
31 |
-
- {
|
32 |
-
- {
|
33 |
-
- {
|
34 |
-
|
35 |
-
####
|
36 |
-
- {
|
37 |
-
- {
|
38 |
-
- {
|
39 |
"""
|
40 |
|
41 |
# Load toxicity detection pipeline
|
42 |
classifier = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-offensive", top_k=None)
|
43 |
|
44 |
-
def
|
45 |
preds = classifier(text)[0]
|
46 |
-
|
47 |
for pred in preds:
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
with gr.Blocks() as demo:
|
54 |
-
gr.Markdown("#
|
55 |
-
gr.Markdown("
|
56 |
gr.Markdown(examples_html)
|
57 |
|
58 |
-
inp = gr.Textbox(label="
|
59 |
-
out = gr.Markdown(label="
|
60 |
-
btn = gr.Button("Check
|
61 |
-
btn.click(fn=
|
62 |
|
63 |
demo.launch()
|
|
|
6 |
dataset = load_dataset("Koushim/processed-jigsaw-toxic-comments", split="train", streaming=True)
|
7 |
|
8 |
# Sample examples
|
9 |
+
green, yellow, red = [], [], []
|
10 |
for example in dataset:
|
11 |
score = example['toxicity']
|
12 |
text = example['text']
|
13 |
+
if score < 0.3 and len(green) < 3:
|
14 |
+
green.append((text, score))
|
15 |
+
elif 0.3 <= score < 0.7 and len(yellow) < 3:
|
16 |
+
yellow.append((text, score))
|
17 |
+
elif score >= 0.7 and len(red) < 3:
|
18 |
+
red.append((text, score))
|
19 |
+
if len(green) == 3 and len(yellow) == 3 and len(red) == 3:
|
20 |
break
|
21 |
|
22 |
examples_html = f"""
|
23 |
+
### 🥰 Examples: Is your partner a Green Flag or Red Flag?
|
24 |
+
|
25 |
+
#### 💚 Green Flag (Wholesome vibes 🌸)
|
26 |
+
- {green[0][0]} (toxicity: {green[0][1]:.2f})
|
27 |
+
- {green[1][0]} (toxicity: {green[1][1]:.2f})
|
28 |
+
- {green[2][0]} (toxicity: {green[2][1]:.2f})
|
29 |
+
|
30 |
+
#### 🟡 Yellow Flag (Eh… watch out 👀)
|
31 |
+
- {yellow[0][0]} (toxicity: {yellow[0][1]:.2f})
|
32 |
+
- {yellow[1][0]} (toxicity: {yellow[1][1]:.2f})
|
33 |
+
- {yellow[2][0]} (toxicity: {yellow[2][1]:.2f})
|
34 |
+
|
35 |
+
#### ❤️ Red Flag (🚨 Run bestie, run! 🚨)
|
36 |
+
- {red[0][0]} (toxicity: {red[0][1]:.2f})
|
37 |
+
- {red[1][0]} (toxicity: {red[1][1]:.2f})
|
38 |
+
- {red[2][0]} (toxicity: {red[2][1]:.2f})
|
39 |
"""
|
40 |
|
41 |
# Load toxicity detection pipeline
|
42 |
classifier = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-offensive", top_k=None)
|
43 |
|
44 |
+
def predict_flag(text):
|
45 |
preds = classifier(text)[0]
|
46 |
+
score = 0.0
|
47 |
for pred in preds:
|
48 |
+
if pred['label'].lower() in ['toxic', 'offensive', 'abusive']:
|
49 |
+
score = pred['score']
|
50 |
+
break
|
51 |
+
# Decide flag
|
52 |
+
if score < 0.3:
|
53 |
+
return f"💚 **Green Flag!**\nNot toxic at all. Keep them! 🌷 (toxicity: {score:.2f})"
|
54 |
+
elif 0.3 <= score < 0.7:
|
55 |
+
return f"🟡 **Yellow Flag!**\nHmm… could be better. Watch out. 👀 (toxicity: {score:.2f})"
|
56 |
+
else:
|
57 |
+
return f"❤️ **Red Flag!**\n🚨 Yikes, that’s toxic! 🚨 (toxicity: {score:.2f})"
|
58 |
|
59 |
with gr.Blocks() as demo:
|
60 |
+
gr.Markdown("# 💌 Green Flag or Red Flag?")
|
61 |
+
gr.Markdown("Ever wondered if your partner’s texts are a green flag 💚 or a 🚨 red flag? Paste their messages below and let AI judge. Just for fun 😉")
|
62 |
gr.Markdown(examples_html)
|
63 |
|
64 |
+
inp = gr.Textbox(label="📩 Paste your partner's message here")
|
65 |
+
out = gr.Markdown(label="🧪 Verdict")
|
66 |
+
btn = gr.Button("👀 Check Now")
|
67 |
+
btn.click(fn=predict_flag, inputs=inp, outputs=out)
|
68 |
|
69 |
demo.launch()
|