import os from flask import Flask, request, render_template_string from PIL import Image import torch from transformers import pipeline, CLIPProcessor, CLIPModel app = Flask(__name__) # Create the 'static/uploads' folder if it doesn't exist upload_folder = os.path.join('static', 'uploads') os.makedirs(upload_folder, exist_ok=True) # Fake News Detection Models news_models = { "mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"), "google-electra": pipeline("text-classification", model="google/electra-base-discriminator"), "bert-base": pipeline("text-classification", model="bert-base-uncased") } # Image Detection Model (CLIP-based) clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") # HTML Template with both Fake News and Image Detection HTML_TEMPLATE = """ AI & News Detection

📰 Fake News Detection

{% if news_prediction %}

🧠 News Detection Result:

{{ news_prediction }}

{% endif %}

🖼️ AI vs. Human Image Detection

{% if image_prediction %}

📷 Image Detection Result:

{{ image_prediction|safe }}

Explanation: The model compares the uploaded image against the text prompts "AI-generated image" and "Human-created image" to determine similarity. Higher similarity to the AI prompt suggests an AI-generated image, and vice versa.

{% endif %}
""" @app.route("/", methods=["GET"]) def home(): return render_template_string(HTML_TEMPLATE) @app.route("/detect", methods=["POST"]) def detect(): text = request.form.get("text") model_key = request.form.get("model") if not text or model_key not in news_models: return render_template_string(HTML_TEMPLATE, news_prediction="Invalid input or model selection.") result = news_models[model_key](text)[0] label = "REAL" if result['label'].lower() in ["real", "label_1", "neutral"] else "FAKE" confidence = result['score'] * 100 prediction_text = f"News is {label} (Confidence: {confidence:.2f}%)" return render_template_string(HTML_TEMPLATE, news_prediction=prediction_text) @app.route("/detect_image", methods=["POST"]) def detect_image(): if "image" not in request.files: return render_template_string(HTML_TEMPLATE, image_prediction="No image uploaded.") file = request.files["image"] img = Image.open(file).convert("RGB") # Compare with AI and Human prompts prompts = ["AI-generated image", "Human-created image"] inputs = clip_processor(text=prompts, images=img, return_tensors="pt", padding=True) with torch.no_grad(): outputs = clip_model(**inputs) similarity = outputs.logits_per_image.softmax(dim=1).squeeze().tolist() ai_similarity, human_similarity = similarity prediction = "AI-Generated" if ai_similarity > human_similarity else "Human-Created" prediction_text = ( f"Prediction: {prediction}
" f"AI Similarity: {ai_similarity * 100:.2f}% | Human Similarity: {human_similarity * 100:.2f}%" ) return render_template_string(HTML_TEMPLATE, image_prediction=prediction_text) if __name__ == "__main__": app.run(host="0.0.0.0", port=7860)