Rithvickkr
Fixed Mistral-7B response, context repetition, and multi-threat detection 2
1911367
raw
history blame
30.2 kB
import gradio as gr
import requests
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
import os
import re
from collections import defaultdict
from datetime import datetime, timedelta
import json
import time
import logging
from retrying import retry
# Set up logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Suppress Hugging Face symlink warning
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
# Modal Mistral-7B API endpoint
MODAL_API = "https://rithvickkumar27--mistral-7b-api-analyze.modal.run"
# Configure LlamaIndex
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
# Initialize LlamaIndex
def init_llama_index():
try:
documents = SimpleDirectoryReader("corpus", filename_as_id=True).load_data()
logger.info(f"Loaded {len(documents)} corpus documents")
return VectorStoreIndex.from_documents(documents)
except Exception as e:
logger.error(f"Error loading corpus: {e}")
return None
index = init_llama_index()
query_engine = index.as_retriever() if index else None
# Retry decorator for Mistral-7B API
@retry(stop_max_attempt_number=3, wait_fixed=2000)
def call_mistral_llm(prompt):
logger.debug(f"Sending prompt to Mistral-7B API: {prompt[:100]}...")
try:
response = requests.post(MODAL_API, json={"prompt": prompt}, timeout=120)
logger.debug(f"Mistral-7B API response status: {response.status_code}")
if response.status_code == 200:
data = response.json()
if "error" in data:
logger.error(f"Mistral API error: {data['error']}")
return f"Modal API error: {data['error']}"
response_text = data.get("response", "LLM error: No response")
logger.info("Mistral-7B response received successfully")
return response_text
else:
logger.error(f"Mistral API error: Status {response.status_code}")
return f"Modal API error: Status {response.status_code}"
except requests.exceptions.RequestException as e:
logger.error(f"Mistral API request failed: {e}")
raise
# Enhanced DSATP log parsing
def dsatp_parse_log(text: str) -> dict:
log = text.lower()
lines = log.split('\n')
detected_threats = []
# Comprehensive threat dictionary
threats = {
"compromised": {"classification": "System Compromise", "severity": "Critical", "mitigation": "Isolate process, run port scan, reset credentials"},
"unauthorized": {"classification": "Unauthorized Access", "severity": "High", "mitigation": "Quarantine MAC address, review access logs"},
"high cpu": {"classification": "Resource Abuse", "severity": "Medium", "mitigation": "Check for crypto-miner or DoS, limit resource usage"},
"inbound traffic": {"classification": "Network Intrusion", "severity": "Medium", "mitigation": "Block closed ports, enable firewall rules"},
"firmware mismatch": {"classification": "Firmware Vulnerability", "severity": "High", "mitigation": "Validate OTA or rollback, update firmware"},
"ddos": {"classification": "DDoS Attack", "severity": "Critical", "mitigation": "Rate-limit traffic, enable DDoS protection"},
"phishing": {"classification": "Phishing Attempt", "severity": "High", "mitigation": "Block malicious URLs, educate users"},
"sql injection": {"classification": "SQL Injection", "severity": "Critical", "mitigation": "Sanitize inputs, patch database"},
"xss": {"classification": "Cross-Site Scripting", "severity": "High", "mitigation": "Escape HTML, update web apps"},
"privilege escalation": {"classification": "Privilege Escalation", "severity": "Critical", "mitigation": "Patch vulnerabilities, restrict permissions"},
"trojan": {"classification": "Malware Detected", "severity": "Critical", "mitigation": "Quarantine file, run antivirus"},
"ransomware": {"classification": "Malware Detected", "severity": "Critical", "mitigation": "Quarantine file, run antivirus, restore from backup"},
"heuristic.behavior.suspicious": {"classification": "Suspicious Activity", "severity": "High", "mitigation": "Monitor process, run memory scan"},
"malicious": {"classification": "Malware Detected", "severity": "Critical", "mitigation": "Quarantine file, run antivirus, block malicious URLs"},
"ufw block": {"classification": "Network Intrusion", "severity": "High", "mitigation": "Investigate blocked IP, strengthen firewall rules"},
"sudo": {"classification": "Privilege Escalation", "severity": "High", "mitigation": "Audit user permissions, review sudo logs"},
"reverse ssh": {"classification": "Persistence Mechanism", "severity": "Critical", "mitigation": "Disable unauthorized SSH services, inspect network connections"},
"failed password": {"classification": "Brute-Force Attempt", "severity": "Critical", "mitigation": "Block suspicious IPs, disable password-based SSH, enable fail2ban"},
"invalid user": {"classification": "Brute-Force Attempt", "severity": "Critical", "mitigation": "Block suspicious IPs, disable password-based SSH, enable fail2ban"}
}
# Advanced threat detection
failed_attempts = defaultdict(list)
suspicious_terms = [
"failed password", "invalid user", "error", "denied", "malformed packet",
"flood", "syn flood", "http flood", "suspicious url", "script tag",
"sqlmap", "union select", "escalation attempt", "rootkit", "yara_match",
"wget", "curl", "bash", "sh", "payload", "ufw", "sudo", "root", "ssh",
"cron", "systemd"
]
ip_pattern = r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b'
timestamp_pattern = r'\w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2}'
url_pattern = r'(https?://[^\s]+)'
sql_pattern = r'(union\s+select|select\s+.*\s+from|drop\s+table)'
xss_pattern = r'(<script>|on\w+\s*=|javascript:)'
yara_pattern = r'yara_match:\s*([\w\.]+)\s+detected'
script_pattern = r'(wget|curl)\s+.*(http[s]?://[^\s]+)\s*.*\.(sh|bash)'
sudo_pattern = r'sudo:.*user=root\s*;'
ssh_pattern = r'reverse\s+ssh|tunnel\s+service'
malicious_url_pattern = r'http[s]?://.*(malicious|payload)[^\s]*'
for line in lines:
# YARA match detection
yara_match = re.search(yara_pattern, line)
if yara_match:
threat_type = yara_match.group(1).lower()
if "trojan" in threat_type:
detected_threats.append(threats["trojan"] | {"confidence": 0.95})
elif "ransomware" in threat_type:
detected_threats.append(threats["ransomware"] | {"confidence": 0.95})
elif "heuristic" in threat_type:
detected_threats.append(threats["heuristic.behavior.suspicious"] | {"confidence": 0.85})
# Malicious script download
if re.search(script_pattern, line, re.IGNORECASE) or re.search(malicious_url_pattern, line, re.IGNORECASE):
detected_threats.append(threats["malicious"] | {"confidence": 0.95})
# Firewall block
if re.search(r'ufw\s+block', line, re.IGNORECASE):
ip_match = re.search(ip_pattern, line)
ip = ip_match.group() if ip_match else "unknown IP"
detected_threats.append(threats["ufw block"] | {"mitigation": f"Investigate blocked IP {ip}, strengthen firewall rules", "confidence": 0.9})
# Privilege escalation
if re.search(sudo_pattern, line, re.IGNORECASE):
detected_threats.append(threats["sudo"] | {"confidence": 0.85})
# Reverse SSH tunnel
if re.search(ssh_pattern, line, re.IGNORECASE):
detected_threats.append(threats["reverse ssh"] | {"confidence": 0.95})
# Brute-force detection
if "failed password" in line or "invalid user" in line:
ip_match = re.search(ip_pattern, line)
time_match = re.search(timestamp_pattern, line)
if ip_match and time_match:
ip = ip_match.group()
try:
timestamp = datetime.strptime(time_match.group(), "%b %d %H:%M:%S")
timestamp = timestamp.replace(year=2025)
failed_attempts[ip].append(timestamp)
except ValueError:
continue
# Phishing, SQL Injection, XSS
if re.search(url_pattern, line) and "phishing" in line:
detected_threats.append(threats["phishing"] | {"confidence": 0.85})
if re.search(sql_pattern, line, re.IGNORECASE):
detected_threats.append(threats["sql injection"] | {"confidence": 0.9})
if re.search(xss_pattern, line, re.IGNORECASE):
detected_threats.append(threats["xss"] | {"confidence": 0.85})
# Brute-force and DDoS detection
for ip, timestamps in failed_attempts.items():
if len(timestamps) >= 5:
time_window = max(timestamps) - min(timestamps)
if time_window <= timedelta(seconds=60):
detected_threats.append({
"classification": "Brute-Force Attempt",
"severity": "Critical",
"mitigation": f"Block IP {ip}, disable password-based SSH, enable fail2ban",
"confidence": 0.95
})
elif len(timestamps) >= 10 and time_window <= timedelta(minutes=5):
detected_threats.append({
"classification": "DDoS Attack",
"severity": "Critical",
"mitigation": f"Rate-limit traffic from {ip}, enable DDoS protection",
"confidence": 0.9
})
# Suspicious activity
suspicious_count = sum(line.count(term) for term in suspicious_terms for line in lines)
if suspicious_count >= 5 and not detected_threats:
detected_threats.append({
"classification": "Suspicious Activity",
"severity": "Medium",
"mitigation": "Review logs for anomalies, monitor affected IPs",
"confidence": 0.75
})
# Select the highest-severity threat
if detected_threats:
severity_order = {"Critical": 3, "High": 2, "Medium": 1, "Safe": 0}
highest_threat = max(detected_threats, key=lambda x: (severity_order.get(x["severity"], 0), x["confidence"]))
logger.info(f"Detected threats: {len(detected_threats)}, Selected: {highest_threat}")
return highest_threat
logger.info("No threats detected")
return {"classification": "No Threat", "severity": "Safe", "mitigation": "None", "confidence": 0.5}
# Enhanced DSATP YARA scanning
def dsatp_yara_scan(file_path: str) -> dict:
try:
import yara
rules = yara.compile(source="""
rule BruteForceLog {
strings:
$failed = "failed password" nocase
$invalid = "invalid user" nocase
$denied = "denied" nocase
condition:
($failed and #failed >= 3) or ($invalid and #invalid >= 3) or ($denied and #denied >= 3)
}
rule MalwareLog {
strings:
$mirai = "mirai" nocase
$botnet = "botnet" nocase
$exploit = "exploit" nocase
$rootkit = "rootkit" nocase
$trojan = "trojan" nocase
$ransomware = "ransomware" nocase
$wget = "wget" nocase
$payload = "payload" nocase
$malicious = "malicious" nocase
condition:
any of them
}
rule SuspiciousBehavior {
strings:
$heuristic = "heuristic" nocase
$suspicious = "suspicious" nocase
$ufw = "ufw block" nocase
condition:
any of them
}
rule PhishingLog {
strings:
$phish = "phishing" nocase
$url = "http://" nocase
$url2 = "https://" nocase
condition:
$phish or ($url and #url >= 2) or ($url2 and #url2 >= 2)
}
rule DDoSLog {
strings:
$flood = "flood" nocase
$syn = "syn" nocase
$http_flood = "http flood" nocase
condition:
any of them
}
rule SQLInjectionLog {
strings:
$sql = "union select" nocase
$sql2 = "drop table" nocase
condition:
any of them
}
rule XSSLog {
strings:
$xss = "<script>" nocase
$xss2 = "javascript:" nocase
condition:
any of them
}
rule PersistenceLog {
strings:
$ssh = "reverse ssh" nocase
$tunnel = "tunnel service" nocase
$sudo = "sudo:.*user=root" nocase
condition:
any of them
}
""")
matches = rules.match(file_path)
detected_threats = []
if matches:
for match in matches:
if match.rule == "BruteForceLog":
detected_threats.append({
"classification": "Brute-Force Attempt",
"severity": "Critical",
"mitigation": "Block suspicious IPs, disable password-based SSH, enable fail2ban",
"confidence": 0.95
})
elif match.rule == "MalwareLog":
detected_threats.append({
"classification": "Malware Detected",
"severity": "Critical",
"mitigation": "Quarantine file, run antivirus",
"confidence": 0.95
})
elif match.rule == "SuspiciousBehavior":
detected_threats.append({
"classification": "Suspicious Activity",
"severity": "High",
"mitigation": "Monitor process, run memory scan",
"confidence": 0.85
})
elif match.rule == "PhishingLog":
detected_threats.append({
"classification": "Phishing Attempt",
"severity": "High",
"mitigation": "Block malicious URLs, educate users",
"confidence": 0.85
})
elif match.rule == "DDoSLog":
detected_threats.append({
"classification": "DDoS Attack",
"severity": "Critical",
"mitigation": "Rate-limit traffic, enable DDoS protection",
"confidence": 0.9
})
elif match.rule == "SQLInjectionLog":
detected_threats.append({
"classification": "SQL Injection",
"severity": "Critical",
"mitigation": "Sanitize inputs, patch database",
"confidence": 0.9
})
elif match.rule == "XSSLog":
detected_threats.append({
"classification": "Cross-Site Scripting",
"severity": "High",
"mitigation": "Escape HTML, update web apps",
"confidence": 0.85
})
elif match.rule == "PersistenceLog":
detected_threats.append({
"classification": "Persistence Mechanism",
"severity": "Critical",
"mitigation": "Disable unauthorized SSH services, inspect network connections",
"confidence": 0.95
})
# Select the highest-severity threat
if detected_threats:
severity_order = {"Critical": 3, "High": 2, "Medium": 1, "Safe": 0}
highest_threat = max(detected_threats, key=lambda x: (severity_order.get(x["severity"], 0), x["confidence"]))
logger.info(f"YARA scan detected threats: {len(detected_threats)}, Selected: {highest_threat}")
return highest_threat
logger.info("YARA scan: No threats detected")
return {
"classification": "No Malware",
"severity": "Safe",
"mitigation": "None",
"confidence": 0.7
}
except Exception as e:
logger.error(f"YARA scan error: {e}")
return {"error": str(e), "severity": "Unknown", "mitigation": "Check file format"}
# Chatbot function
def chatbot_response(user_input, file, history, state):
if history is None:
history = []
input_text = user_input
scan_result = None
start_time = time.time()
if file:
try:
input_text = open(file.name, "r").read()
scan_result = dsatp_yara_scan(file.name)
except Exception as e:
scan_result = {"error": f"File error: {e}", "severity": "Unknown", "mitigation": "Check file"}
else:
scan_result = dsatp_parse_log(input_text)
context_str = "No context available."
if query_engine:
try:
# Map classification to precise keywords for relevant CVEs
threat_keywords = {
"Brute-Force Attempt": "brute force, ssh, login attempt, authentication failure, openssh, password attack",
"Malware Detected": "malware, trojan, ransomware, payload, malicious script, backdoor, virus",
"Network Intrusion": "firewall, intrusion, ufw, network attack, port scan, unauthorized access",
"Privilege Escalation": "privilege escalation, sudo, root, unauthorized access, cwe-269",
"Persistence Mechanism": "ssh tunnel, reverse ssh, persistence, backdoor, remote access",
"System Compromise": "compromise, breach, unauthorized access, cwe-284",
"Unauthorized Access": "unauthorized access, login failure, cwe-287",
"Resource Abuse": "resource abuse, crypto-miner, denial of service, cwe-400",
"Firmware Vulnerability": "firmware, vulnerability, iot, cwe-119",
"DDoS Attack": "ddos, denial of service, network flood, cwe-400",
"Phishing Attempt": "phishing, malicious url, social engineering, cwe-601",
"SQL Injection": "sql injection, database attack, cwe-89",
"Cross-Site Scripting": "xss, cross-site scripting, web attack, cwe-79",
"Suspicious Activity": "suspicious activity, anomaly, heuristic, cwe-693"
}
classification = scan_result.get("classification", "unknown")
keywords = threat_keywords.get(classification, "security threat")
query = f"Mitigation for: {keywords}"
results = query_engine.retrieve(query)
context_items = []
for res in results[:3]:
# Include full CVE text without truncation
context_items.append(res.text)
context_str = "\n\n".join(context_items)
logger.debug(f"LlamaIndex query: {query}, Results: {len(results)}")
except Exception as e:
logger.error(f"LlamaIndex error: {e}")
context_str = f"Context error: {e}"
if "error" not in scan_result:
prompt = f"""
You are a Security Analyst. Based on:
Classification: {scan_result['classification']}
Severity: {scan_result['severity']}
Mitigation: {scan_result['mitigation']}
Confidence: {scan_result['confidence']}
Context: {context_str}
Provide a concise response to the user, summarizing the threat and recommended actions in a professional tone. If multiple threats are detected, mention the most severe and note others briefly. Include actionable steps tailored to the threat and avoid generic advice unless no specific mitigation is available.
"""
try:
llm_response = call_mistral_llm(prompt)
except Exception as e:
logger.warning(f"Mistral-7B failed after retries: {e}, using fallback response")
llm_response = f"A {scan_result['severity'].lower()} {scan_result['classification'].lower()} was detected. Recommended actions: {scan_result['mitigation']}."
response = f"**Classification**: {scan_result['classification']}\n**Severity**: {scan_result['severity']}\n**Mitigation**: {scan_result['mitigation']}\n**Confidence**: {scan_result['confidence']:.1f}\n\n{llm_response}\n\n**Context**:\n{context_str}"
else:
response = f"**Error**: {scan_result['error']}\n**Context**:\n{context_str}"
updated_history = history + [
{"role": "user", "content": input_text or "File uploaded"},
{"role": "assistant", "content": response}
]
# Update state
state = {
"classification": scan_result.get("classification", "Awaiting Data"),
"severity": scan_result.get("severity", "Unknown"),
"mitigation": scan_result.get("mitigation", "None"),
"confidence": scan_result.get("confidence", 0.0),
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
# Generate threat card
threat_card_content = f"""
<div class="threat-card" id="threat-card">
<p><strong>Classification:</strong> <span id="classification">{state['classification']}</span></p>
<p><strong>Severity:</strong> <span class="severity-label severity-{state['severity'].lower()}" id="severity">{state['severity']}</span></p>
<p><strong>Mitigation:</strong> <span id="mitigation">{state['mitigation']}</span></p>
<p><strong>Confidence:</strong> <span id="confidence">{state['confidence']:.1f}</span></p>
<p><strong>Timestamp:</strong> <span id="timestamp">{state['timestamp']}</span></p>
</div>
"""
# Update threat history
if not hasattr(chatbot_response, "threat_history"):
chatbot_response.threat_history = []
chatbot_response.threat_history.append(state)
if len(chatbot_response.threat_history) > 10:
chatbot_response.threat_history.pop(0)
# Generate threat history table
history_table = "<table style='width:100%; border-collapse: collapse;'><tr><th>Timestamp</th><th>Classification</th><th>Severity</th><th>Confidence</th></tr>"
for entry in reversed(chatbot_response.threat_history):
history_table += f"<tr><td>{entry['timestamp']}</td><td>{entry['classification']}</td><td>{entry['severity']}</td><td>{entry['confidence']:.1f}</td></tr>"
history_table += "</table>"
logger.info(f"Response generated in {time.time() - start_time:.2f} seconds")
return updated_history, scan_result, state, threat_card_content, history_table
# Gradio interface
with gr.Blocks(css="""
body {
background: linear-gradient(135deg, #1a1a2e, #16213e);
color: #e0e0e0;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
margin: 0;
padding: 20px;
}
.threat-card {
background: rgba(255, 255, 255, 0.08);
color: #e0e0e0;
padding: 24px;
border-radius: 16px;
backdrop-filter: blur(12px);
box-shadow: 0 6px 40px rgba(0, 0, 0, 0.3);
margin-bottom: 24px;
transition: transform 0.3s ease, box-shadow 0.3s ease;
animation: fadeIn 0.5s ease-in;
}
@keyframes fadeIn {
from { opacity: 0; transform: scale(0.95) translateY(10px); }
to { opacity: 1; transform: scale(1) translateY(0); }
}
.threat-card:hover {
transform: translateY(-8px);
box-shadow: 0 12px 60px rgba(0, 0, 0, 0.4);
}
.severity-critical {
background: #dc2626;
padding: 8px 16px;
border-radius: 8px;
font-weight: 600;
display: inline-block;
}
.severity-high {
background: #f59e0b;
padding: 8px 16px;
border-radius: 8px;
font-weight: 600;
}
.severity-medium {
background: #eab308;
padding: 8px 16px;
border-radius: 8px;
font-weight: 600;
}
.severity-safe {
background: #10b981;
padding: 8px 16px;
border-radius: 8px;
font-weight: 600;
}
.severity-unknown {
background: #6b7280;
padding: 8px 16px;
border-radius: 8px;
font-weight: 600;
}
.gr-button {
background: linear-gradient(90deg, #4f46e5, #7c3aed);
color: white;
border: none;
padding: 12px 24px;
border-radius: 12px;
font-size: 16px;
font-weight: 600;
transition: background 0.3s ease, transform 0.2s ease;
cursor: pointer;
}
.gr-button:hover {
background: linear-gradient(90deg, #4338ca, #6d28d9);
transform: translateY(-2px);
}
.gr-button:disabled {
background: #6b7280;
cursor: not-allowed;
}
.gr-button:disabled::after {
content: " Analyzing...";
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0% { opacity: 1; }
50% { opacity: 0.5; }
100% { opacity: 1; }
}
.gr-textbox, .gr-file {
background: rgba(255, 255, 255, 0.05);
color: #e0e0e0;
border: 1px solid rgba(255, 255, 255, 0.15);
border-radius: 12px;
padding: 12px;
transition: border-color 0.3s ease;
}
.gr-textbox:focus, .gr-file:focus {
border-color: #4f46e5;
outline: none;
}
.gr-chatbot {
background: rgba(255, 255, 255, 0.03);
border-radius: 16px;
padding: 16px;
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.2);
}
.gr-chatbot .message {
border-radius: 12px;
padding: 12px 16px;
margin: 8px;
max-width: 80%;
animation: slideIn 0.3s ease;
}
@keyframes slideIn {
from { opacity: 0; transform: translateY(10px); }
to { opacity: 1; transform: translateY(0); }
}
.gr-chatbot .user {
background: linear-gradient(90deg, #4f46e5, #7c3aed);
color: white;
margin-left: auto;
}
.gr-chatbot .assistant {
background: #2d3748;
color: #e0e0e0;
margin-right: auto;
}
table {
background: rgba(255, 255, 255, 0.05);
border-radius: 12px;
padding: 16px;
margin-top: 16px;
width: 100%;
border-collapse: separate;
border-spacing: 0;
}
th, td {
padding: 12px;
text-align: left;
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
}
th {
background: rgba(255, 255, 255, 0.08);
font-weight: 600;
color: #a0aec0;
}
tr:last-child td {
border-bottom: none;
}
.text-center {
text-align: center;
font-size: 28px;
font-weight: 700;
margin-bottom: 24px;
color: #ffffff;
text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3);
}
@media (max-width: 768px) {
.gr-row { flex-direction: column; }
.threat-card, table, .gr-chatbot { padding: 12px; }
.gr-button { padding: 10px 16px; }
.gr-chatbot .message { max-width: 90%; }
}
""") as demo:
gr.Markdown(
"""
# AI Cybersecurity Agent
Secure your systems with real-time detection of all threats: malware, DDoS, brute-force, and more.
""",
elem_classes="text-center"
)
state = gr.State(value={"classification": "Awaiting Data", "severity": "Unknown", "mitigation": "None", "confidence": 0.0, "timestamp": ""})
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(label="Security Analyst Chat", type="messages", height=500)
user_input = gr.Textbox(placeholder="Enter log data or alert (e.g., 'System compromised!', 'Trojan detected')", lines=3)
file_input = gr.File(label="Upload .txt/.log file", file_types=[".txt", ".log"])
submit_btn = gr.Button("Analyze")
with gr.Column(scale=1):
gr.Markdown("### Threat Analysis Results")
output_json = gr.JSON(label="Scan Results")
gr.Markdown("### Current Threat")
threat_card = gr.Markdown(
"""
<div class="threat-card" id="threat-card">
<p><strong>Classification:</strong> <span id="classification">Awaiting Data</span></p>
<p><strong>Severity:</strong> <span class="severity-label severity-unknown" id="severity">Unknown</span></p>
<p><strong>Mitigation:</strong> <span id="mitigation">None</span></p>
<p><strong>Confidence:</strong> <span id="confidence">0.0</span></p>
<p><strong>Timestamp:</strong> <span id="timestamp">-</span></p>
</div>
""",
visible=True
)
gr.Markdown("### Threat History")
history_table = gr.Markdown(
"""
<table style='width:100%; border-collapse: collapse;'>
<tr><th>Timestamp</th><th>Classification</th><th>Severity</th><th>Confidence</th></tr>
<tr><td colspan='4'>No threats detected yet</td></tr>
</table>
""",
visible=True
)
submit_btn.click(
fn=chatbot_response,
inputs=[user_input, file_input, chatbot, state],
outputs=[chatbot, output_json, state, threat_card, history_table]
)
# Launch the app
if __name__ == "__main__":
demo.launch(mcp_server=True)