Go-Raw commited on
Commit
6ad54e6
·
verified ·
1 Parent(s): ad87d66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -36
app.py CHANGED
@@ -6,58 +6,64 @@ import gradio as gr
6
  dataset = load_dataset("Koushim/processed-jigsaw-toxic-comments", split="train", streaming=True)
7
 
8
  # Sample examples
9
- low, medium, high = [], [], []
10
  for example in dataset:
11
  score = example['toxicity']
12
  text = example['text']
13
- if score < 0.3 and len(low) < 3:
14
- low.append((text, score))
15
- elif 0.3 <= score < 0.7 and len(medium) < 3:
16
- medium.append((text, score))
17
- elif score >= 0.7 and len(high) < 3:
18
- high.append((text, score))
19
- if len(low) == 3 and len(medium) == 3 and len(high) == 3:
20
  break
21
 
22
  examples_html = f"""
23
- ### 🧪 Examples of Toxicity Levels
24
-
25
- #### 🔷 Low Toxicity
26
- - {low[0][0]} (score: {low[0][1]:.2f})
27
- - {low[1][0]} (score: {low[1][1]:.2f})
28
- - {low[2][0]} (score: {low[2][1]:.2f})
29
-
30
- #### 🟠 Medium Toxicity
31
- - {medium[0][0]} (score: {medium[0][1]:.2f})
32
- - {medium[1][0]} (score: {medium[1][1]:.2f})
33
- - {medium[2][0]} (score: {medium[2][1]:.2f})
34
-
35
- #### 🔴 High Toxicity
36
- - {high[0][0]} (score: {high[0][1]:.2f})
37
- - {high[1][0]} (score: {high[1][1]:.2f})
38
- - {high[2][0]} (score: {high[2][1]:.2f})
39
  """
40
 
41
  # Load toxicity detection pipeline
42
  classifier = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-offensive", top_k=None)
43
 
44
- def predict_toxicity(text):
45
  preds = classifier(text)[0]
46
- results = []
47
  for pred in preds:
48
- label = pred['label']
49
- score = pred['score']
50
- results.append(f"**{label}**: {score:.2f}")
51
- return "\n".join(results)
 
 
 
 
 
 
52
 
53
  with gr.Blocks() as demo:
54
- gr.Markdown("# 🧹 Hate Speech & Toxicity Monitor")
55
- gr.Markdown("This tool shows examples of toxic comments and lets you check your own text for toxicity using a Hugging Face model.")
56
  gr.Markdown(examples_html)
57
 
58
- inp = gr.Textbox(label="🔷 Enter your comment")
59
- out = gr.Markdown(label="🔷 Toxicity Scores")
60
- btn = gr.Button("Check Toxicity")
61
- btn.click(fn=predict_toxicity, inputs=inp, outputs=out)
62
 
63
  demo.launch()
 
6
  dataset = load_dataset("Koushim/processed-jigsaw-toxic-comments", split="train", streaming=True)
7
 
8
  # Sample examples
9
+ green, yellow, red = [], [], []
10
  for example in dataset:
11
  score = example['toxicity']
12
  text = example['text']
13
+ if score < 0.3 and len(green) < 3:
14
+ green.append((text, score))
15
+ elif 0.3 <= score < 0.7 and len(yellow) < 3:
16
+ yellow.append((text, score))
17
+ elif score >= 0.7 and len(red) < 3:
18
+ red.append((text, score))
19
+ if len(green) == 3 and len(yellow) == 3 and len(red) == 3:
20
  break
21
 
22
  examples_html = f"""
23
+ ### 🥰 Examples: Is your partner a Green Flag or Red Flag?
24
+
25
+ #### 💚 Green Flag (Wholesome vibes 🌸)
26
+ - {green[0][0]} (toxicity: {green[0][1]:.2f})
27
+ - {green[1][0]} (toxicity: {green[1][1]:.2f})
28
+ - {green[2][0]} (toxicity: {green[2][1]:.2f})
29
+
30
+ #### 🟡 Yellow Flag (Eh… watch out 👀)
31
+ - {yellow[0][0]} (toxicity: {yellow[0][1]:.2f})
32
+ - {yellow[1][0]} (toxicity: {yellow[1][1]:.2f})
33
+ - {yellow[2][0]} (toxicity: {yellow[2][1]:.2f})
34
+
35
+ #### ❤️ Red Flag (🚨 Run bestie, run! 🚨)
36
+ - {red[0][0]} (toxicity: {red[0][1]:.2f})
37
+ - {red[1][0]} (toxicity: {red[1][1]:.2f})
38
+ - {red[2][0]} (toxicity: {red[2][1]:.2f})
39
  """
40
 
41
  # Load toxicity detection pipeline
42
  classifier = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-offensive", top_k=None)
43
 
44
+ def predict_flag(text):
45
  preds = classifier(text)[0]
46
+ score = 0.0
47
  for pred in preds:
48
+ if pred['label'].lower() in ['toxic', 'offensive', 'abusive']:
49
+ score = pred['score']
50
+ break
51
+ # Decide flag
52
+ if score < 0.3:
53
+ return f"💚 **Green Flag!**\nNot toxic at all. Keep them! 🌷 (toxicity: {score:.2f})"
54
+ elif 0.3 <= score < 0.7:
55
+ return f"🟡 **Yellow Flag!**\nHmm… could be better. Watch out. 👀 (toxicity: {score:.2f})"
56
+ else:
57
+ return f"❤️ **Red Flag!**\n🚨 Yikes, that’s toxic! 🚨 (toxicity: {score:.2f})"
58
 
59
  with gr.Blocks() as demo:
60
+ gr.Markdown("# 💌 Green Flag or Red Flag?")
61
+ gr.Markdown("Ever wondered if your partner’s texts are a green flag 💚 or a 🚨 red flag? Paste their messages below and let AI judge. Just for fun 😉")
62
  gr.Markdown(examples_html)
63
 
64
+ inp = gr.Textbox(label="📩 Paste your partner's message here")
65
+ out = gr.Markdown(label="🧪 Verdict")
66
+ btn = gr.Button("👀 Check Now")
67
+ btn.click(fn=predict_flag, inputs=inp, outputs=out)
68
 
69
  demo.launch()