import os
import re
import fitz # PyMuPDF
import tempfile
import base64
from datetime import datetime
import streamlit as st
from transformers import pipeline
from groq import Groq
import streamlit.components.v1 as components
from io import BytesIO
import random
import matplotlib.pyplot as plt
import numpy as np
import time
# Page configuration
st.set_page_config(
page_title="ZeroPhish Gate",
page_icon="🛡️",
layout="wide",
initial_sidebar_state="collapsed"
)
# ⛳ Access secrets securely from environment variables
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
HF_TOKEN = os.getenv("HF_TOKEN")
# ✅ Validate secrets (simplified for Hugging Face)
if not HF_TOKEN:
st.warning("⚠️ HF_TOKEN not found. Using demo mode with limited features.")
# Use GROQ if available, otherwise show a warning
groq_client = None
if GROQ_API_KEY:
try:
groq_client = Groq(api_key=GROQ_API_KEY)
except:
st.warning("⚠️ Failed to initialize GROQ client. Expert analysis will be limited.")
# ✅ Load phishing detection pipeline from Hugging Face
@st.cache_resource(show_spinner="Loading AI model...")
def load_phishing_model():
try:
return pipeline(
"text-classification",
model="ealvaradob/bert-finetuned-phishing",
token=HF_TOKEN
)
except Exception as e:
st.error(f"❌ Error loading model: {e}")
# Return a simple lambda function as fallback
return lambda text: [{'label': 'UNKNOWN', 'score': 0.5}]
phishing_pipe = load_phishing_model()
# ✅ Language and role options
language_choices = ["English", "Urdu", "French", "Spanish", "German", "Chinese"]
role_choices = ["Admin", "Procurement", "Logistics", "Finance", "HR", "IT", "Executive"]
# ✅ Glossary terms
GLOSSARY = {
"phishing": "Phishing is a scam where attackers trick you into revealing personal information.",
"malware": "Malicious software designed to harm or exploit systems.",
"spam": "Unwanted or unsolicited messages.",
"tone": "The emotional character of the message.",
"spear phishing": "Targeted phishing attacks aimed at specific individuals or organizations.",
"smishing": "SMS phishing - phishing conducted via text messages.",
"vishing": "Voice phishing - phishing conducted via phone calls.",
"social engineering": "Manipulating people into revealing confidential information."
}
# ✅ Translations (demo dictionary-based)
TRANSLATIONS = {
"Phishing": {"Urdu": "فشنگ", "French": "Hameçonnage", "Spanish": "Suplantación de identidad", "German": "Phishing", "Chinese": "钓鱼"},
"Spam": {"Urdu": "سپیم", "French": "Courrier indésirable", "Spanish": "Correo basura", "German": "Spam", "Chinese": "垃圾邮件"},
"Malware": {"Urdu": "میلویئر", "French": "Logiciel malveillant", "Spanish": "Software malicioso", "German": "Schadware", "Chinese": "恶意软件"},
"Safe": {"Urdu": "محفوظ", "French": "Sûr", "Spanish": "Seguro", "German": "Sicher", "Chinese": "安全的"}
}
# ✅ In-memory history
if "history" not in st.session_state:
st.session_state.history = []
# =======================
# Custom CSS for Enhanced UI
# =======================
def load_css():
st.markdown("""
""", unsafe_allow_html=True)
# =======================
# Function Definitions
# =======================
def extract_text_from_file(file):
if file is None:
return ""
ext = file.name.split(".")[-1].lower()
if ext == "pdf":
try:
doc = fitz.open(stream=file.read(), filetype="pdf")
return "\n".join(page.get_text() for page in doc)
except Exception as e:
st.error(f"❌ Error reading PDF: {e}")
return ""
elif ext == "txt":
try:
return file.read().decode("utf-8")
except Exception as e:
st.error(f"❌ Error reading text file: {e}")
return ""
return ""
def analyze_with_huggingface(text):
try:
result = phishing_pipe(text)
label = result[0]['label']
confidence = round(result[0]['score'] * 100, 2)
threat_type = {
"PHISHING": "Phishing",
"SPAM": "Spam",
"MALWARE": "Malware",
"LEGITIMATE": "Safe"
}.get(label.upper(), "Unknown")
return label, confidence, threat_type
except Exception as e:
st.error(f"❌ Model error: {e}")
return "Error", 0, f"Error: {e}"
def get_severity_class(threat_type, score):
if threat_type.lower() == "safe":
return "success"
elif score > 85:
return "danger"
else:
return "warning"
def semantic_analysis(text, role, language):
# If GROQ is not available, return a generic analysis
if not groq_client:
return f"This message shows signs of potentially being a {random.choice(['phishing attempt', 'spam', 'suspicious message'])}. Be cautious with any links or attachments. Always verify the sender through official channels before taking any action."
try:
prompt = f"""
You are a cybersecurity expert specialized in analyzing suspicious messages and explaining them in simple terms.
Analyze the following message for a {role} and provide:
1. Whether it appears to be a phishing attempt, spam, malware, or legitimate
2. The specific red flags or indicators that support your analysis
3. What actions the recipient should take
4. How this type of attack typically works
Keep your explanation concise (150-200 words), informative and avoid asking questions.
Message to analyze:
{text}
"""
response = groq_client.chat.completions.create(
model="llama3-8b-8192",
messages=[
{"role": "system", "content": "You are a cybersecurity assistant specialized in explaining phishing and suspicious messages."},
{"role": "user", "content": prompt}
]
)
raw = response.choices[0].message.content
clean = re.sub(r"Is there anything else you'd like.*", "", raw, flags=re.I).strip()
return clean
except Exception as e:
st.warning(f"⚠️ LLM analysis unavailable: {e}")
return "This message shows signs of potentially malicious content. Be cautious with any links or attachments. Always verify the sender through official channels before taking any action."
def translate_label(threat_type, language="English"):
if language == "English":
return threat_type
return TRANSLATIONS.get(threat_type, {}).get(language, threat_type)
def create_report(label, score, threat_type, explanation, text):
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"Zerophish_Report_{ts}.txt"
report = f"""
🔍 AI THREAT DETECTION REPORT
============================
Generated: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
INPUT MESSAGE:
{text}
ANALYSIS RESULTS:
----------------
Prediction: {label}
Threat Type: {threat_type}
Confidence: {score}%
EXPERT EXPLANATION:
-----------------
{explanation}
RECOMMENDATIONS:
--------------
1. Do not click any links or download any attachments from this message if marked as suspicious
2. Report this message to your IT security team
3. Delete the message from your inbox
4. Be vigilant for similar messages in the future
============================
Generated by ZeroPhish Gate
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".txt") as temp:
temp.write(report)
return temp.name
def render_history():
if not st.session_state.history:
st.info("🕒 No analysis history yet. Analyze messages to see your history here.")
return
for i, record in enumerate(reversed(st.session_state.history)):
severity = get_severity_class(record['threat'], record['score'])
with st.container():
st.markdown(f"""
Entry #{len(st.session_state.history) - i}
Input: {record['input'][:100]}{'...' if len(record['input']) > 100 else ''}
{record['threat']}Confidence: {record['score']}%
Summary: {record['summary'][:150]}{'...' if len(record['summary']) > 150 else ''}
""", unsafe_allow_html=True)
def create_threat_visualization(threat_type, score):
# Create figure and axis
fig, ax = plt.subplots(figsize=(8, 1))
# Define the color gradient based on threat type and score
if threat_type.lower() == "safe":
color = '#10B981' # Green for safe
elif score > 85:
color = '#EF4444' # Red for high confidence threats
else:
color = '#F59E0B' # Amber for medium confidence threats
# Create the gauge chart
ax.barh(0, score, height=0.6, color=color)
ax.barh(0, 100, height=0.6, color='#E5E7EB', zorder=0)
# Add score text
ax.text(score/2, 0, f"{score}%", ha='center', va='center', color='white', fontweight='bold')
# Clean up the chart
ax.set_xlim(0, 100)
ax.set_ylim(-0.5, 0.5)
ax.axis('off')
# Add threat level indicators
plt.text(25, -0.4, 'LOW', ha='center', fontsize=8, color='#4B5563')
plt.text(50, -0.4, 'MEDIUM', ha='center', fontsize=8, color='#4B5563')
plt.text(75, -0.4, 'HIGH', ha='center', fontsize=8, color='#4B5563')
# Create buffer for returning
buf = BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight', dpi=100)
plt.close(fig)
buf.seek(0)
return buf
# Web-based text-to-speech using ResponsiveVoice (no server-side dependencies)
def add_responsive_voice(text, lang='English'):
# Map our language names to ResponsiveVoice API names
lang_map = {
'English': 'UK English Female',
'French': 'French Female',
'Spanish': 'Spanish Female',
'German': 'Deutsch Female',
'Chinese': 'Chinese Female',
'Urdu': 'Hindi Female' # Fallback since Urdu isn't directly supported
}
voice = lang_map.get(lang, 'UK English Female')
html = f"""
Listen to analysis
"""
components.html(html, height=60)
# Create demo messages for user to try
def get_demo_messages():
return [
"Hello, I noticed an issue with your account. Please click this link to verify your details: http://amaz0n-secure.com/verify",
"URGENT: Your account has been compromised. Call this number immediately: +1-555-123-4567 to secure your account.",
"Dear user, we have detected unusual activity on your account. Please download this attachment to review the details.",
"This is a reminder that the company picnic is scheduled for this Saturday at 2pm in Central Park. Please RSVP by Thursday."
]
# =======================
# Streamlit App UI
# =======================
def main():
load_css()
# App Header
st.markdown('
', unsafe_allow_html=True)
col1, col2 = st.columns([1, 5])
with col1:
st.image("https://img.icons8.com/fluency/96/shield.png", width=80)
with col2:
st.title("🛡️ ZeroPhish Gate")
st.markdown("**AI-powered phishing detection and security education platform**")
st.markdown('
', unsafe_allow_html=True)
# Create a placeholder for the progress bar
progress_placeholder = st.empty()
# Simulate a progress bar for better UX
for percent_complete in range(0, 101, 5):
time.sleep(0.05)
progress_placeholder.progress(percent_complete)
# Analysis logic
label, score, threat_type = analyze_with_huggingface(text_input)
translated_threat = translate_label(threat_type, language)
# Remove the progress bar
progress_placeholder.empty()
# Get severity class for styling
severity = get_severity_class(threat_type, score)
# Display results
st.markdown(f'
""", unsafe_allow_html=True)
with col2:
# Show confidence visualization
confidence_chart = create_threat_visualization(threat_type, score)
st.image(confidence_chart, caption="Threat Confidence Level")
# More detailed analysis if suspicious
if threat_type.lower() != "safe":
st.markdown("### 🧠 Expert Analysis")
with st.spinner("Generating detailed analysis..."):
summary = semantic_analysis(text_input, role, language)
st.write(summary)
# Add voice playback using ResponsiveVoice
add_responsive_voice(summary, language)
col1, col2 = st.columns(2)
with col1:
if st.button("📤 Send Report to IT Security Team", use_container_width=True):
st.success("📨 Report sent to IT security team successfully.")
with col2:
# Generate and offer download link
report_path = create_report(label, score, threat_type, summary, text_input)
with open(report_path, "rb") as f:
report_data = f.read()
b64_report = base64.b64encode(report_data).decode()
href = f''
st.markdown(href, unsafe_allow_html=True)
# Security tips based on threat type
st.markdown("### 🔐 Security Tips")
if threat_type.lower() == "phishing":
st.info("• Never click on suspicious links\n• Check the sender's email address carefully\n• Contact the supposed sender through official channels to verify")
elif threat_type.lower() == "spam":
st.info("• Mark as spam in your email client\n• Consider using email filtering services\n• Don't reply or click on any links")
elif threat_type.lower() == "malware":
st.warning("• Don't download any attachments\n• Run a virus scan if you've interacted with this message\n• Report to your IT department immediately")
else:
st.success("✅ This message appears to be legitimate. No further action required.")
# Save to history
st.session_state.history.append({
"input": text_input,
"threat": threat_type,
"score": score,
"summary": summary if threat_type.lower() != "safe" else "Message appears to be safe. No detailed analysis required."
})
st.markdown('
', unsafe_allow_html=True)
elif analyze_btn and not text_input.strip():
st.warning("⚠️ Please enter some text or upload a file to analyze.")
with tab2:
st.subheader("📊 Analysis History")
if clear_btn:
st.session_state.history.clear()
st.success("✅ History cleared!")
render_history()
with tab3:
st.subheader("📚 Security Resources")
st.markdown('
', unsafe_allow_html=True)
st.markdown("### 📖 Glossary of Security Terms")
for term, definition in GLOSSARY.items():
st.markdown(f"**{term.capitalize()}**: {definition}")
st.markdown('
', unsafe_allow_html=True)
st.markdown("### 🚨 How to Report Phishing")
st.markdown("""
**Internal Reporting:**
* Forward suspicious emails to your IT security team
* Report through your organization's security incident portal
**External Reporting:**
* [Report to the Anti-Phishing Working Group](https://apwg.org/reportphishing/)
* [Report to the FBI's Internet Crime Complaint Center](https://www.ic3.gov/)
* Forward phishing emails to [phishing-report@us-cert.gov](mailto:phishing-report@us-cert.gov)
""")
st.markdown('