Spaces:
Running
Running
Upload 2 files
Browse files- app.py +88 -0
- requirements.txt +7 -0
app.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
from mtcnn import MTCNN
|
| 7 |
+
from huggingface_hub import hf_hub_download
|
| 8 |
+
from tensorflow.keras.models import load_model
|
| 9 |
+
from tensorflow.keras.applications.xception import preprocess_input as xcp_pre
|
| 10 |
+
from tensorflow.keras.applications.efficientnet import preprocess_input as eff_pre
|
| 11 |
+
|
| 12 |
+
# ---------------------------------------------------------
|
| 13 |
+
# Load models from Hugging Face Hub
|
| 14 |
+
# ---------------------------------------------------------
|
| 15 |
+
xcp_path = hf_hub_download(repo_id="Zeyadd-Mostaffa/deepfake-image-detector_final", filename="xception_model.h5")
|
| 16 |
+
eff_path = hf_hub_download(repo_id="Zeyadd-Mostaffa/deepfake-image-detector_final", filename="efficientnet_model.h5")
|
| 17 |
+
xcp_model = load_model(xcp_path)
|
| 18 |
+
eff_model = load_model(eff_path)
|
| 19 |
+
|
| 20 |
+
# ---------------------------------------------------------
|
| 21 |
+
# Face Detection
|
| 22 |
+
# ---------------------------------------------------------
|
| 23 |
+
detector = MTCNN()
|
| 24 |
+
|
| 25 |
+
def extract_faces(image):
|
| 26 |
+
faces = detector.detect_faces(image)
|
| 27 |
+
if not faces:
|
| 28 |
+
return []
|
| 29 |
+
results = []
|
| 30 |
+
for i, face in enumerate(faces):
|
| 31 |
+
x, y, w, h = face['box']
|
| 32 |
+
x, y = max(0, x), max(0, y)
|
| 33 |
+
cropped = image[y:y+h, x:x+w]
|
| 34 |
+
if cropped.shape[0] >= 60 and cropped.shape[1] >= 60:
|
| 35 |
+
results.append((cropped, (x, y, w, h)))
|
| 36 |
+
return results
|
| 37 |
+
|
| 38 |
+
# ---------------------------------------------------------
|
| 39 |
+
# Inference Function
|
| 40 |
+
# ---------------------------------------------------------
|
| 41 |
+
def predict_faces(image):
|
| 42 |
+
faces = extract_faces(image)
|
| 43 |
+
if not faces:
|
| 44 |
+
return "No faces detected", None
|
| 45 |
+
|
| 46 |
+
annotated = image.copy()
|
| 47 |
+
results = []
|
| 48 |
+
|
| 49 |
+
for i, (face, (x, y, w, h)) in enumerate(faces):
|
| 50 |
+
# Preprocess
|
| 51 |
+
xcp_img = cv2.resize(face, (299, 299))
|
| 52 |
+
eff_img = cv2.resize(face, (224, 224))
|
| 53 |
+
|
| 54 |
+
xcp_tensor = xcp_pre(xcp_img.astype(np.float32))[np.newaxis, ...]
|
| 55 |
+
eff_tensor = eff_pre(eff_img.astype(np.float32))[np.newaxis, ...]
|
| 56 |
+
|
| 57 |
+
# Predict
|
| 58 |
+
xcp_pred = xcp_model.predict(xcp_tensor, verbose=0).flatten()[0]
|
| 59 |
+
eff_pred = eff_model.predict(eff_tensor, verbose=0).flatten()[0]
|
| 60 |
+
avg_pred = (xcp_pred + eff_pred) / 2
|
| 61 |
+
|
| 62 |
+
label = "Real" if avg_pred > 0.5 else "Fake"
|
| 63 |
+
confidence = f"{avg_pred:.2f}"
|
| 64 |
+
|
| 65 |
+
# Annotate
|
| 66 |
+
color = (0, 255, 0) if label == "Real" else (0, 0, 255)
|
| 67 |
+
cv2.rectangle(annotated, (x, y), (x + w, y + h), color, 2)
|
| 68 |
+
cv2.putText(annotated, f"{label} ({confidence})", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
|
| 69 |
+
|
| 70 |
+
results.append(f"Face {i+1}: {label} (Avg: {avg_pred:.3f}, XCP: {xcp_pred:.3f}, EFF: {eff_pred:.3f})")
|
| 71 |
+
|
| 72 |
+
return "\n".join(results), annotated
|
| 73 |
+
|
| 74 |
+
# ---------------------------------------------------------
|
| 75 |
+
# Gradio Interface
|
| 76 |
+
# ---------------------------------------------------------
|
| 77 |
+
interface = gr.Interface(
|
| 78 |
+
fn=predict_faces,
|
| 79 |
+
inputs=gr.Image(type="numpy", label="Upload Image"),
|
| 80 |
+
outputs=[
|
| 81 |
+
gr.Textbox(label="Predictions"),
|
| 82 |
+
gr.Image(type="numpy", label="Annotated Image")
|
| 83 |
+
],
|
| 84 |
+
title="Deepfake Detector (Multi-Face Ensemble)",
|
| 85 |
+
description="This model detects all faces in an image and classifies each one as real or fake using Xception and EfficientNetB4 ensemble."
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
interface.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tensorflow>=2.9.0
|
| 2 |
+
mtcnn
|
| 3 |
+
opencv-python
|
| 4 |
+
numpy
|
| 5 |
+
pandas
|
| 6 |
+
gradio
|
| 7 |
+
huggingface_hub
|