|
import os |
|
from flask import Flask, request, render_template_string |
|
from PIL import Image |
|
import torch |
|
from transformers import pipeline, CLIPProcessor, CLIPModel |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
upload_folder = os.path.join('static', 'uploads') |
|
os.makedirs(upload_folder, exist_ok=True) |
|
|
|
|
|
news_models = { |
|
"mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"), |
|
"google-electra": pipeline("text-classification", model="google/electra-base-discriminator"), |
|
"bert-base": pipeline("text-classification", model="bert-base-uncased") |
|
} |
|
|
|
|
|
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") |
|
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") |
|
|
|
|
|
HTML_TEMPLATE = """ |
|
<!DOCTYPE html> |
|
<html lang="en"> |
|
<head> |
|
<meta charset="UTF-8"> |
|
<title>AI & News Detection</title> |
|
<style> |
|
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: #f5f5f5; padding: 20px; } |
|
.container { background: white; padding: 30px; border-radius: 12px; max-width: 850px; margin: auto; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); } |
|
textarea, select, input[type='file'] { width: 100%; padding: 12px; margin-top: 10px; border-radius: 8px; border: 1px solid #ccc; } |
|
button { background-color: #4CAF50; color: white; border: none; padding: 12px 20px; border-radius: 8px; cursor: pointer; font-size: 16px; margin-top: 10px; } |
|
button:hover { background-color: #45a049; } |
|
.result { background: #e7f3fe; padding: 15px; border-radius: 10px; margin-top: 20px; } |
|
</style> |
|
</head> |
|
<body> |
|
<div class="container"> |
|
<h1>π° Fake News Detection</h1> |
|
<form method="POST" action="/detect"> |
|
<textarea name="text" placeholder="Enter news text..." required></textarea> |
|
<label for="model">Select Fake News Model:</label> |
|
<select name="model" required> |
|
<option value="mrm8488">MRM8488 (BERT-Tiny)</option> |
|
<option value="google-electra">Google Electra (Base Discriminator)</option> |
|
<option value="bert-base">BERT-Base Uncased</option> |
|
</select> |
|
<button type="submit">Detect News Authenticity</button> |
|
</form> |
|
|
|
{% if news_prediction %} |
|
<div class="result"> |
|
<h2>π§ News Detection Result:</h2> |
|
<p>{{ news_prediction }}</p> |
|
</div> |
|
{% endif %} |
|
|
|
<h1>πΌοΈ AI vs. Human Image Detection</h1> |
|
<form method="POST" action="/detect_image" enctype="multipart/form-data"> |
|
<input type="file" name="image" required> |
|
<button type="submit">Upload and Detect</button> |
|
</form> |
|
|
|
{% if image_prediction %} |
|
<div class="result"> |
|
<h2>π· Image Detection Result:</h2> |
|
<p>{{ image_prediction|safe }}</p> |
|
<p><strong>Explanation:</strong> The model compares the uploaded image against the text prompts "AI-generated image" and "Human-created image" to determine similarity. Higher similarity to the AI prompt suggests an AI-generated image, and vice versa.</p> |
|
</div> |
|
{% endif %} |
|
</div> |
|
</body> |
|
</html> |
|
""" |
|
|
|
@app.route("/", methods=["GET"]) |
|
def home(): |
|
return render_template_string(HTML_TEMPLATE) |
|
|
|
@app.route("/detect", methods=["POST"]) |
|
def detect(): |
|
text = request.form.get("text") |
|
model_key = request.form.get("model") |
|
|
|
if not text or model_key not in news_models: |
|
return render_template_string(HTML_TEMPLATE, news_prediction="Invalid input or model selection.") |
|
|
|
result = news_models[model_key](text)[0] |
|
label = "REAL" if result['label'].lower() in ["real", "label_1", "neutral"] else "FAKE" |
|
confidence = result['score'] * 100 |
|
|
|
prediction_text = f"News is {label} (Confidence: {confidence:.2f}%)" |
|
return render_template_string(HTML_TEMPLATE, news_prediction=prediction_text) |
|
|
|
@app.route("/detect_image", methods=["POST"]) |
|
def detect_image(): |
|
if "image" not in request.files: |
|
return render_template_string(HTML_TEMPLATE, image_prediction="No image uploaded.") |
|
|
|
file = request.files["image"] |
|
img = Image.open(file).convert("RGB") |
|
|
|
|
|
prompts = ["AI-generated image", "Human-created image"] |
|
inputs = clip_processor(text=prompts, images=img, return_tensors="pt", padding=True) |
|
|
|
with torch.no_grad(): |
|
outputs = clip_model(**inputs) |
|
similarity = outputs.logits_per_image.softmax(dim=1).squeeze().tolist() |
|
|
|
ai_similarity, human_similarity = similarity |
|
prediction = "AI-Generated" if ai_similarity > human_similarity else "Human-Created" |
|
|
|
prediction_text = ( |
|
f"Prediction: <strong>{prediction}</strong><br>" |
|
f"AI Similarity: {ai_similarity * 100:.2f}% | Human Similarity: {human_similarity * 100:.2f}%" |
|
) |
|
|
|
return render_template_string(HTML_TEMPLATE, image_prediction=prediction_text) |
|
|
|
if __name__ == "__main__": |
|
app.run(host="0.0.0.0", port=7860) |
|
|