Martin Natale commited on
Commit
b1fb35d
·
1 Parent(s): 57f4f03

Migrate background image to LFS

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ client/static/images/background.jpg filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ # Install system dependencies
4
+ RUN apt-get update && apt-get install -y \
5
+ libgl1-mesa-glx \
6
+ libglib2.0-0 \
7
+ && rm -rf /var/lib/apt/lists/*
8
+
9
+ RUN useradd -m -u 1000 user
10
+ USER user
11
+ ENV PATH="/home/user/.local/bin:$PATH"
12
+
13
+ WORKDIR /app
14
+
15
+ COPY --chown=user ./requirements.txt requirements.txt
16
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
17
+
18
+ COPY --chown=user ./client /app/client
19
+ CMD ["python", "-m", "client.main"]
client/__pycache__/main.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
client/__pycache__/main.cpython-311.pyc ADDED
Binary file (1.79 kB). View file
 
client/config/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .settings import settings
client/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (168 Bytes). View file
 
client/config/__pycache__/settings.cpython-310.pyc ADDED
Binary file (698 Bytes). View file
 
client/config/settings.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_settings import BaseSettings
2
+ from pydantic import Field
3
+
4
+
5
+ class Settings(BaseSettings):
6
+ """
7
+ Config values for the service. Values are set automatically if a .env file is found
8
+ in the directory root.
9
+ """
10
+
11
+ SERVICE_PORT: int | None = Field(default=7860)
12
+ TEMP_VIDEO_PATH: str = Field(default="/tmp/video.webm")
13
+
14
+
15
+ settings = Settings() # type: ignore
client/dependencies/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .inference_service import inference_service
client/dependencies/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
client/dependencies/__pycache__/inference_service.cpython-310.pyc ADDED
Binary file (341 Bytes). View file
 
client/dependencies/inference_service.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from client.services import InferenceService
2
+
3
+
4
+ def inference_service() -> InferenceService:
5
+ return InferenceService()
client/main.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uvicorn
2
+
3
+ from fastapi import FastAPI
4
+ from fastapi.staticfiles import StaticFiles
5
+ from client.routers import client_router
6
+ from client.config import settings
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+
9
+
10
+ def create_app() -> FastAPI:
11
+ """Create and configure the FastAPI application."""
12
+ app = FastAPI(
13
+ title="Client Service",
14
+ description="PPAI HACKATHON",
15
+ version="1.0.0",
16
+ )
17
+ app.add_middleware(
18
+ CORSMiddleware,
19
+ allow_origins=["*"], # Allows all origins
20
+ allow_credentials=True,
21
+ allow_methods=["*"], # Allows all methods
22
+ allow_headers=["*"], # Allows all headers
23
+ )
24
+ app.include_router(router=client_router)
25
+ app.mount("/static", StaticFiles(directory="client/static"), name="static")
26
+ return app
27
+
28
+
29
+ def main() -> None:
30
+ """Main function to set up logging and run the server."""
31
+ app: FastAPI = create_app()
32
+
33
+ uvicorn.run(app=app, host="0.0.0.0", port=settings.SERVICE_PORT) # type: ignore
34
+
35
+
36
+ if __name__ == "__main__":
37
+ main()
client/routers/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .client import router as client_router
client/routers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
client/routers/__pycache__/client.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
client/routers/client.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, UploadFile, File, Depends, Form, Request, HTTPException
2
+ from fastapi.responses import FileResponse, JSONResponse
3
+ from client.dependencies import inference_service
4
+ from client.services import InferenceService
5
+ from PIL import Image
6
+ from io import BytesIO
7
+ from fastapi import Form
8
+
9
+ from PIL import Image
10
+ from io import BytesIO
11
+ import numpy as np
12
+ import os
13
+ import tempfile
14
+ import logging
15
+ from typing import List
16
+
17
+ router = APIRouter(tags=["Client"])
18
+
19
+
20
+ @router.get(path="/")
21
+ def login() -> FileResponse:
22
+ return FileResponse(path="client/static/login.html")
23
+
24
+
25
+ @router.get(path="/index")
26
+ def static_index() -> FileResponse:
27
+ return FileResponse(path="client/static/index.html")
28
+
29
+
30
+ @router.get(path="/createAccount")
31
+ def create_account(request: Request) -> FileResponse:
32
+ return FileResponse(path="client/static/createAccount.html")
33
+
34
+
35
+ @router.post("/get_image")
36
+ async def get_image(
37
+ image: UploadFile = File(...),
38
+ inference_service: InferenceService = Depends(inference_service),
39
+ ) -> JSONResponse:
40
+ # Read the contents of the file
41
+ contents = await image.read()
42
+
43
+ # Get the size of the image
44
+ size = len(contents)
45
+
46
+ # Return a confirmation message with the size of the image
47
+ return JSONResponse(
48
+ content={
49
+ "message": "Image received successfully",
50
+ "filename": image.filename,
51
+ "size": size,
52
+ }
53
+ )
54
+
55
+
56
+ @router.post("/submitAccount")
57
+ async def submit_account(
58
+ video: UploadFile = File(...),
59
+ email: str = Form(...),
60
+ i_service: InferenceService = Depends(inference_service),
61
+ ):
62
+ """
63
+ This function processes the create account form, reads the uploaded video,
64
+ extracts some of the frames and saves them in a temporary directory.
65
+ """
66
+ # Log video file details
67
+ logging.info(
68
+ f"Received video: filename={video.filename}, content_type={video.content_type}"
69
+ )
70
+ logging.info(f"Received email: {email}")
71
+
72
+ video_content: bytes = await video.read()
73
+ i_service.save_video(video_content=video_content, user_id=email)
74
+ frames_path: str = i_service.save_frames(user_id=email)
75
+ model_path: str = i_service.train_model(frames_path=frames_path, user_id=email)
76
+ i_service.push_model(model_path=model_path, user_id=email)
77
+ return JSONResponse(content={"message": "Account created successfully"})
client/services/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .inference import InferenceService
client/services/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
client/services/__pycache__/inference.cpython-310.pyc ADDED
Binary file (2.64 kB). View file
 
client/services/__pycache__/preprocess.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
client/services/inference.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from deepface import DeepFace
3
+ from PIL import Image
4
+ from io import BytesIO
5
+ from typing import Union, List
6
+ import os
7
+ from fastapi import HTTPException
8
+ import uuid
9
+ import cv2
10
+ from .preprocess import load_dataset, RegNet
11
+ from sklearn.linear_model import LogisticRegression
12
+ from concrete.ml.torch.compile import compile_torch_model
13
+ import logging
14
+
15
+
16
+ class InferenceService:
17
+ def __init__(self):
18
+ self.model = None
19
+
20
+ def save_video(self, video_content: bytes, user_id: str) -> None:
21
+ """
22
+ Extract frames from the video content asynchronously using FFmpeg.
23
+ """
24
+ try:
25
+ video_id = str(uuid.uuid4())
26
+ os.makedirs(f"temp/{user_id}", exist_ok=True)
27
+ with open(file=f"temp/{user_id}/video.mp4", mode="wb") as f:
28
+ f.write(video_content)
29
+ except Exception as e:
30
+ raise HTTPException(status_code=500, detail=f"Error saving video: {e}")
31
+
32
+ def save_frames(self,user_id: str) -> str:
33
+ """
34
+ Save 10 random frames from the video content using cv2 and return the path to the frames.
35
+ """
36
+ video_path = f"temp/{user_id}/video.mp4"
37
+ if not os.path.exists(video_path):
38
+ raise HTTPException(status_code=404, detail="Video not found")
39
+
40
+ vidcap = cv2.VideoCapture(video_path)
41
+ success, image = vidcap.read()
42
+ count = 0
43
+ while success and count < 10:
44
+ cv2.imwrite(
45
+ f"temp/{user_id}/frame%d.jpg" % count, image
46
+ ) # save frame as JPEG file
47
+ success, image = vidcap.read()
48
+ count += 1
49
+ return f"temp/{user_id}/"
50
+
51
+ def train_model(self, frames_path: str, user_id: str) -> str:
52
+ """
53
+ Train a model on the frames and return the path to the model.
54
+ """
55
+ """
56
+ embeddings, labels = load_dataset(frames_path, cache=True)
57
+ embeddings = (embeddings - np.mean(embeddings, axis=0)) / np.std(
58
+ embeddings, axis=0
59
+ )
60
+ model = LogisticRegression(C=1 / 5)
61
+ model.fit(embeddings, y=labels)
62
+ nb_sample = 100
63
+ W = model.coef_
64
+ b = model.intercept_.reshape(-1, 1)
65
+ X_train_rand = np.random.normal(0, 1, [nb_sample, embeddings.shape[1]])
66
+ W_rand = np.random.normal(0, 1, [nb_sample, W.shape[1]])
67
+ X_rand_stack = np.hstack([X_train_rand, W_rand])
68
+ reg_net = RegNet(b)
69
+ quantized_module = compile_torch_model(
70
+ reg_net, # our model
71
+ X_rand_stack, # a representative input-set to be used for both quantization and compilation
72
+ n_bits=6,
73
+ rounding_threshold_bits={"n_bits": 6, "method": "approximate"},
74
+ )
75
+ save_model_path: str = f"temp/model/{user_id}.zip"
76
+ quantized_module.fhe_circuit.server.save(save_model_path)
77
+ return save_model_path
78
+ """
79
+ return "temp/model/fake_id.zip"
80
+
81
+ def push_model(self, model_path: str, user_id: str) -> None:
82
+ """
83
+ Push the model to the servrer
84
+ """
85
+ # TODO
client/services/preprocess.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+ from glob import glob
4
+ import os
5
+
6
+
7
+ def load_image_folder(folder_path: str, max_nb_images: int) -> list[str]:
8
+ image_path = f"{folder_path}/*"
9
+ image_paths = glob(image_path)
10
+ images = [image_path for image_path in image_paths[:max_nb_images]]
11
+ return images
12
+
13
+
14
+ def compute_embeddings_and_labels(images: list[str], label: int) -> np.ndarray:
15
+ from deepface import DeepFace
16
+
17
+ embeddings = []
18
+ labels = []
19
+ for image in tqdm(images):
20
+ try:
21
+ embedding_obj = DeepFace.represent(
22
+ img_path=image,
23
+ model_name="Facenet",
24
+ )
25
+ embedding = embedding_obj[0]["embedding"]
26
+ embeddings.append(embedding)
27
+ labels.append(label)
28
+ except Exception as e:
29
+ print(f"Erreur lors du traitement de {image}: {e}")
30
+ return np.vstack(embeddings), np.array(labels)
31
+
32
+
33
+ def load_embeddings_and_labels(
34
+ folder_path: str, label: int, max_nb_images: int, cache: bool
35
+ ) -> tuple[np.ndarray, np.ndarray]:
36
+ if (
37
+ not os.path.exists(f"{folder_path}/embeddings.npy")
38
+ or not os.path.exists(f"{folder_path}/labels.npy")
39
+ or (not cache)
40
+ ):
41
+ images = load_image_folder(folder_path, max_nb_images)
42
+ embeddings, labels = compute_embeddings_and_labels(images, label)
43
+ np.save(f"{folder_path}/embeddings.npy", embeddings)
44
+ np.save(f"{folder_path}/labels.npy", labels)
45
+ embeddings = np.load(f"{folder_path}/embeddings.npy")
46
+ labels = np.load(f"{folder_path}/labels.npy")
47
+ return embeddings, labels
48
+
49
+
50
+ def load_dataset(
51
+ target_folder: str,
52
+ max_nb_images=500,
53
+ cache=True,
54
+ deep_fake_folder: str = "./data/deepfake",
55
+ ) -> tuple[np.ndarray, np.ndarray]:
56
+ deep_fake_images_embeddings, deep_fake_labels = load_embeddings_and_labels(
57
+ deep_fake_folder, max_nb_images=max_nb_images, label=0, cache=cache
58
+ )
59
+ target_images_embeddings, target_images_labels = load_embeddings_and_labels(
60
+ target_folder, max_nb_images=max_nb_images, label=1, cache=cache
61
+ )
62
+ embeddings = np.vstack([target_images_embeddings, deep_fake_images_embeddings])
63
+ labels = np.hstack([target_images_labels, deep_fake_labels])
64
+ return embeddings, labels
65
+
66
+
67
+ def featurisation(embeddings: np.ndarray) -> np.ndarray:
68
+ rms = np.sqrt(np.mean(embeddings**2, axis=1))
69
+ mean = np.mean(embeddings, axis=1)
70
+ median = np.median(embeddings, axis=1)
71
+ features = np.stack([rms, mean, median], axis=1)
72
+ return features
73
+
74
+
75
+ from concrete.ml.torch.compile import compile_torch_model
76
+ import torch
77
+ import torch.nn as nn
78
+
79
+
80
+ class RegNet(nn.Module):
81
+ def __init__(self, b):
82
+ super().__init__()
83
+ self.b = nn.Parameter(torch.ones(1) * b)
84
+
85
+ def forward(self, x):
86
+ X = x[:, :128]
87
+ W = x[:, 128:]
88
+ return ((X @ W.T + self.b) > 0).float()
client/static/createAccount.html ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="fr">
3
+
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <title>TechMart - Créer un compte</title>
8
+ <script src="https://cdn.tailwindcss.com"></script>
9
+ <script src="https://unpkg.com/scrollreveal"></script>
10
+ <link href="https://fonts.googleapis.com/css2?family=Playfair+Display:wght@400;700&family=Inter:wght@300;400;500&display=swap" rel="stylesheet">
11
+ <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css" rel="stylesheet">
12
+ <link rel="stylesheet" href="/static/css/styles.css">
13
+ </head>
14
+
15
+ <body class="bg-gray-900 text-white min-h-screen flex flex-col" style="background-image: url('/static/images/background.jpg');">
16
+ <!-- <header class="bg-gray-800 p-4">
17
+ <h1 class="text-3xl font-bold text-center"><i class="fas fa-user-plus mr-2"></i>Créer un compte</h1>
18
+ </header> -->
19
+
20
+
21
+ <main class="flex-grow container mx-auto p-6 flex flex-col lg:flex-row items-center justify-center space-y-8 lg:space-y-0 lg:space-x-12">
22
+
23
+ <!-- Camera Section -->
24
+ <div class="max-w-md w-full p-6 lg:p-0 lg:w-1/2 flex flex-col items-center video-reveal">
25
+ <div class="relative">
26
+ <video id="video-preview" autoplay muted class="w-full max-w-md max-h-md aspect-square bg-black rounded-full object-cover shadow-2xl ring-4 ring-gray-300 ring-opacity-50 hover:ring-blue-500 hover:ring-opacity-75 transition-all duration-300 ease-in-out transform hover:scale-105"></video>
27
+
28
+ <!-- Recording indicator -->
29
+ <div id="rec-indicator" class="hidden absolute top-2 right-2 w-4 h-4 bg-red-600 rounded-full animate-pulse"></div>
30
+ </div>
31
+
32
+ <!-- Start Recording Button -->
33
+ <div class="text-center mt-6">
34
+ <button type="button" id="start-recording" class="bg-red-600 hover:bg-red-700 text-white font-bold w-16 h-16 rounded-full flex items-center justify-center animate-pulse hover:scale-110 active:scale-95 transition-all duration-300 ease-in-out">
35
+ <i class="fas fa-video"></i>
36
+ </button>
37
+ </div>
38
+ </div>
39
+
40
+ <!-- Form Section -->
41
+ <div class="max-w-md w-full bg-gray-800/40 backdrop-blur-md rounded-xl shadow-lg p-6 lg:w-1/2 transform transition-all duration-300 hover:scale-102 hover:shadow-md form-reveal">
42
+ <form id="create-account-form" method="POST" action="/submitAccount" class="space-y-6" enctype="multipart/form-data">
43
+ <div>
44
+ <label for="email" class="block text-sm font-medium text-gray-300">Email Address</label>
45
+ <input type="email" id="email" name="email" required class="w-full px-4 py-3 mt-2 bg-gray-700 text-white rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-opacity-50 transition duration-300 ease-in-out">
46
+ </div>
47
+ <button type="submit" class="w-full bg-green-600 hover:bg-green-700 text-white font-bold py-3 px-4 rounded-lg transition-all duration-300 ease-in-out transform hover:scale-102">
48
+ Create Account 🤗
49
+ </button>
50
+ </form>
51
+ <div id="error-message" class="mt-4 text-center text-red-500"></div>
52
+ </div>
53
+
54
+
55
+
56
+ </main>
57
+
58
+ <!-- <footer class="bg-gray-800 p-4 text-center">
59
+ <p>&copy; 2023 TechMart. Tous droits réservés.</p>
60
+ </footer> -->
61
+
62
+ <script>
63
+ document.addEventListener('DOMContentLoaded', function() {
64
+ // Animation pour la section vidéo avec un zoom-in et glissement vers le haut
65
+ ScrollReveal().reveal('.video-reveal', {
66
+ duration: 1200, // Durée de l'animation
67
+ scale: 0.9, // Zoom initial (90%)
68
+ distance: '30px', // Glissement vers le haut
69
+ origin: 'bottom', // Animation qui part du bas
70
+ opacity: 0, // Apparition progressive
71
+ easing: 'ease-in-out', // Transition fluide
72
+ delay: 200 // Délai avant le début
73
+ });
74
+
75
+ // Animation pour la section formulaire avec un glissement depuis la droite
76
+ ScrollReveal().reveal('.form-reveal', {
77
+ duration: 1200, // Durée de l'animation
78
+ distance: '60px', // Distance de glissement depuis la droite
79
+ origin: 'right', // Animation qui part de la droite
80
+ opacity: 0, // Apparition progressive
81
+ easing: 'ease-in-out', // Transition fluide
82
+ delay: 400 // Délai un peu plus long pour que l'effet soit coordonné
83
+ });
84
+
85
+ // Appelle la fonction de caméra et remplissage de l'email après le chargement de la page
86
+ fillEmailFromUrl();
87
+ startCamera();
88
+ });
89
+
90
+ // Add this function at the beginning of the script
91
+ function fillEmailFromUrl() {
92
+ const urlParams = new URLSearchParams(window.location.search);
93
+ const email = urlParams.get('email');
94
+ if (email) {
95
+ document.getElementById('email').value = decodeURIComponent(email);
96
+ }
97
+ }
98
+
99
+ // Call this function when the page loads
100
+ window.onload = function () {
101
+ fillEmailFromUrl();
102
+ startCamera();
103
+ };
104
+
105
+ let mediaRecorder;
106
+ let recordedChunks = [];
107
+ let stream;
108
+ let isRecording = false;
109
+ const recIndicator = document.getElementById('rec-indicator');
110
+
111
+ // Function to start the camera
112
+ async function startCamera() {
113
+ const videoPreview = document.getElementById('video-preview');
114
+
115
+ if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
116
+ try {
117
+ stream = await navigator.mediaDevices.getUserMedia({ video: true });
118
+ videoPreview.srcObject = stream;
119
+ } catch (error) {
120
+ console.error('Erreur lors de l\'accès à la caméra:', error);
121
+ document.getElementById('error-message').innerText = 'Erreur lors de l\'accès à la caméra. Veuillez vérifier vos permissions.';
122
+ }
123
+ } else {
124
+ document.getElementById('error-message').innerText = 'Votre navigateur ne supporte pas l\'accès à la caméra.';
125
+ }
126
+ }
127
+
128
+ document.getElementById('start-recording').addEventListener('click', function () {
129
+ if (stream) {
130
+ if (!isRecording) {
131
+ // Démarrer l'enregistrement
132
+ mediaRecorder = new MediaRecorder(stream);
133
+ recordedChunks = [];
134
+
135
+ mediaRecorder.ondataavailable = function (event) {
136
+ if (event.data.size > 0) {
137
+ recordedChunks.push(event.data);
138
+ }
139
+ };
140
+
141
+ mediaRecorder.start();
142
+ isRecording = true;
143
+ recIndicator.classList.remove('hidden');
144
+
145
+ // Ajouter une classe d'animation plus subtile pour indiquer que l'enregistrement est en cours
146
+ this.classList.remove('bg-red-600', 'hover:bg-red-700');
147
+ this.classList.add('bg-red-700', 'animate-pulse');
148
+ this.innerHTML = '<i class="fas fa-stop-circle"></i>';
149
+
150
+ // Arrêter automatiquement après 5 secondes
151
+ setTimeout(() => {
152
+ if (isRecording) {
153
+ stopRecording();
154
+ }
155
+ }, 5000);
156
+ } else {
157
+ // Arrêter l'enregistrement
158
+ stopRecording();
159
+ }
160
+ } else {
161
+ document.getElementById('error-message').innerText = 'Camera is not accessible. Please refresh the page.';
162
+ }
163
+ });
164
+
165
+ function stopRecording() {
166
+ mediaRecorder.stop();
167
+ isRecording = false;
168
+ recIndicator.classList.add('hidden');
169
+
170
+ // Remettre le bouton à son état initial (pulsation désactivée et retour au style initial)
171
+ const recordButton = document.getElementById('start-recording');
172
+ recordButton.classList.remove('bg-red-700', 'animate-pulse');
173
+ recordButton.classList.add('bg-red-600', 'hover:bg-red-700');
174
+ recordButton.innerHTML = '<i class="fas fa-video"></i>';
175
+ }
176
+
177
+ document.getElementById('create-account-form').addEventListener('submit', async function (event) {
178
+ event.preventDefault();
179
+
180
+ const email = document.getElementById('email').value;
181
+
182
+ // Créer un objet FormData pour envoyer la vidéo et l'email
183
+ const formData = new FormData();
184
+ formData.append('email', email);
185
+
186
+ // Créer un Blob de la vidéo enregistrée et l'ajouter au formData
187
+ const blob = new Blob(recordedChunks, { type: 'video/webm' });
188
+ formData.append('video', blob, 'video_user.webm');
189
+
190
+ try {
191
+ const response = await fetch('/submitAccount', {
192
+ method: 'POST',
193
+ body: formData
194
+ });
195
+
196
+ if (response.ok) {
197
+ window.location.href = '/index';
198
+ } else {
199
+ const errorData = await response.json();
200
+ document.getElementById('error-message').innerText = `Error occurred while creating the account: ${errorData.detail || 'Unknown error'}`;
201
+ }
202
+ } catch (error) {
203
+ document.getElementById('error-message').innerText = 'Une erreur est survenue. Veuillez réessayer.';
204
+ console.error('Error while submitting the form:', error);
205
+ }
206
+ });
207
+ </script>
208
+ </body>
209
+
210
+ </html>
client/static/css/styles.css ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #camera-feed, #captured-image {
2
+ width: 100%;
3
+ height: 100%;
4
+ object-fit: cover;
5
+ border: 1px solid black;
6
+ }
7
+
8
+ .scan-effect {
9
+ position: absolute;
10
+ top: -100%;
11
+ left: 0;
12
+ width: 100%;
13
+ height: 10px;
14
+ background: linear-gradient(to bottom,
15
+ rgba(0,255,255,0) 0%,
16
+ rgba(0,255,255,0.8) 50%,
17
+ rgba(0,255,255,0) 100%);
18
+ opacity: 0;
19
+ box-shadow: 0 0 10px rgba(0,255,255,0.5);
20
+ }
21
+
22
+ @keyframes pulse {
23
+ 0% { transform: scale(1); }
24
+ 50% { transform: scale(1.05); }
25
+ 100% { transform: scale(1); }
26
+ }
27
+
28
+ .pulse {
29
+ animation: pulse 2s infinite;
30
+ }
31
+
32
+ button {
33
+ margin: 10px 0;
34
+ padding: 10px 20px;
35
+ font-size: 16px;
36
+ }
37
+
38
+ input[type="text"] {
39
+ margin: 10px 0;
40
+ padding: 5px;
41
+ font-size: 16px;
42
+ }
43
+
44
+ .camera-container {
45
+ position: relative;
46
+ overflow: hidden;
47
+ border-radius: 0.5rem;
48
+ width: 320px;
49
+ height: 240px;
50
+ margin: 20px auto;
51
+ }
52
+
53
+ body {
54
+ background-image: url('/images/background.jpg');
55
+ background-size: cover;
56
+ background-position: center;
57
+ background-repeat: no-repeat;
58
+ }
59
+
60
+ .bg-transparent {
61
+ background-color: rgba(128, 128, 128, 0); /* Adjust the alpha value for desired transparency */
62
+ }
client/static/images/background.jpg ADDED

Git LFS Details

  • SHA256: 651f6d1facd13f29c62ee37bc7cf49496bf911325098664ebf4a0ce3fb11c421
  • Pointer size: 132 Bytes
  • Size of remote file: 2.49 MB
client/static/index.html ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>TechMart - Smart Shopping</title>
7
+ <script src="https://cdn.tailwindcss.com"></script>
8
+ <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css" rel="stylesheet">
9
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.11.4/gsap.min.js"></script>
10
+ <link rel="stylesheet" href="/static/css/styles.css">
11
+ <style>
12
+ /* Scan effect in a slight spiral motion */
13
+ .scan-effect {
14
+ position: absolute;
15
+ top: 0;
16
+ left: 0;
17
+ width: 100%;
18
+ height: 100%;
19
+ border-radius: 50%;
20
+ background: rgba(255, 255, 255, 0.1);
21
+ opacity: 0;
22
+ z-index: 1;
23
+ }
24
+
25
+ /* Animation spiral */
26
+ @keyframes spiral-scan {
27
+ 0% {
28
+ transform: scale(0.8) rotate(0deg);
29
+ opacity: 0;
30
+ }
31
+ 50% {
32
+ transform: scale(1.2) rotate(180deg);
33
+ opacity: 1;
34
+ }
35
+ 100% {
36
+ transform: scale(1.5) rotate(360deg);
37
+ opacity: 0;
38
+ }
39
+ }
40
+
41
+ /* Active class for scan effect */
42
+ .scan-active {
43
+ animation: spiral-scan 3s ease-in-out;
44
+ }
45
+ </style>
46
+ </head>
47
+ <body class="bg-gray-900 text-white min-h-screen flex flex-col" style="background-image: url('/static/images/background.jpg');">
48
+
49
+ <main class="flex-grow container mx-auto p-6 flex flex-col lg:flex-row items-center justify-center space-y-16 lg:space-y-0 lg:space-x-28">
50
+
51
+ <!-- Camera Section -->
52
+ <div class="max-w-md w-full flex justify-center items-center">
53
+ <div class="relative">
54
+ <!-- Vidéo circulaire centrée -->
55
+ <video id="camera-feed" autoplay class="w-full max-w-md max-h-md aspect-square bg-black rounded-full object-cover shadow-2xl ring-4 ring-gray-300 ring-opacity-50 hover:ring-blue-500 hover:ring-opacity-75 transition-all duration-300 ease-in-out transform hover:scale-105"></video>
56
+
57
+ <!-- Scan Effect -->
58
+ <div id="scan-effect" class="scan-effect"></div>
59
+ </div>
60
+ </div>
61
+
62
+ <!-- Form Section -->
63
+ <div class="max-w-sm w-full bg-gray-800/40 backdrop-blur-md rounded-xl shadow-lg p-6 lg:w-1/3 transform transition-all duration-300 hover:scale-102 hover:shadow-md">
64
+ <h2 class="text-xl font-semibold mb-4 text-center">Smart Check-In</h2>
65
+
66
+ <div class="text-center">
67
+ <!-- Taille réduite du bouton -->
68
+ <button id="begin-scan-btn" class="w-3/4 bg-blue-600 hover:bg-blue-700 text-white font-bold py-2 px-3 rounded-lg transition-all duration-300 ease-in-out transform hover:scale-102 text-sm">
69
+ Scan 🔍
70
+ </button>
71
+ </div>
72
+
73
+ <div id="error-message" class="mt-4 text-center text-red-500"></div>
74
+ </div>
75
+
76
+
77
+ </main>
78
+
79
+ <script src="/static/js/script.js"></script>
80
+ <script>
81
+ document.getElementById('begin-scan-btn').addEventListener('click', function () {
82
+ const scanEffect = document.getElementById('scan-effect');
83
+ // Toggle the animation class
84
+ scanEffect.classList.add('scan-active');
85
+
86
+ // Remove the class after 3 seconds
87
+ setTimeout(function () {
88
+ scanEffect.classList.remove('scan-active');
89
+ }, 3000); // L'effet dure 3 secondes
90
+ });
91
+ </script>
92
+ </body>
93
+ </html>
client/static/js/script.js ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const video = document.getElementById('camera-feed');
2
+ const canvas = document.getElementById('captured-image');
3
+ const captureBtn = document.getElementById('capture-btn');
4
+ const form = document.getElementById('upload-form');
5
+ const scanEffect = document.getElementById('scan-effect');
6
+ const statusDiv = document.getElementById('status');
7
+
8
+ // Request camera access with specific constraints
9
+ async function startCamera() {
10
+ const constraints = {
11
+ video: {
12
+ width: { ideal: 640 },
13
+ height: { ideal: 480 },
14
+ facingMode: "user"
15
+ }
16
+ };
17
+
18
+ try {
19
+ const stream = await navigator.mediaDevices.getUserMedia(constraints);
20
+ video.srcObject = stream;
21
+ } catch (err) {
22
+ console.error("Error accessing camera:", err);
23
+ statusDiv.textContent = "Camera access denied. Please check permissions.";
24
+ }
25
+ }
26
+
27
+ startCamera();
28
+
29
+ // Capture image
30
+ captureBtn.addEventListener('click', () => {
31
+ statusDiv.innerHTML = '<i class="fas fa-spinner fa-spin"></i> Processing...';
32
+ captureBtn.disabled = true;
33
+
34
+ // Animate scan effect
35
+ gsap.to(scanEffect, {
36
+ duration: 1,
37
+ top: "100%",
38
+ opacity: 1,
39
+ ease: "power1.inOut",
40
+ onComplete: processImage
41
+ });
42
+ });
43
+
44
+ function processImage() {
45
+ canvas.width = video.videoWidth;
46
+ canvas.height = video.videoHeight;
47
+ canvas.getContext('2d').drawImage(video, 0, 0, canvas.width, canvas.height);
48
+
49
+ // Convert canvas to blob and send to server
50
+ canvas.toBlob((blob) => {
51
+ const formData = new FormData();
52
+ formData.append('image', blob, 'captured_image.jpg');
53
+
54
+ fetch('/get_image', {
55
+ method: 'POST',
56
+ body: formData
57
+ }).then(response => response.json())
58
+ .then(data => {
59
+ console.log('Image saved:', data.filename);
60
+ // You can update the UI here if needed
61
+ }).catch(error => {
62
+ console.error('Error saving image:', error);
63
+ statusDiv.innerHTML = '<i class="fas fa-exclamation-circle text-red-500"></i> Error saving image';
64
+ });
65
+ }, 'image/jpeg');
66
+
67
+ // Simulate face recognition process
68
+ setTimeout(() => {
69
+ statusDiv.innerHTML = '<i class="fas fa-check-circle text-green-500"></i> Welcome, User!';
70
+ captureBtn.disabled = false;
71
+ }, 2000);
72
+
73
+ // Reset scan effect
74
+ gsap.set(scanEffect, { top: "-100%", opacity: 0 });
75
+ }
76
+
77
+ // Add pulse effect to capture button
78
+ gsap.to(captureBtn, { duration: 1, scale: 1.05, repeat: -1, yoyo: true, ease: "power1.inOut" });
79
+
80
+ // Handle form submission
81
+ form.addEventListener('submit', async (e) => {
82
+ e.preventDefault();
83
+
84
+ const formData = new FormData(form);
85
+ const userIdInput = document.getElementById('user-id');
86
+ const videoInput = document.getElementById('video-upload');
87
+
88
+ if (!userIdInput || !videoInput || !videoInput.files[0]) {
89
+ statusDiv.innerHTML = '<i class="fas fa-exclamation-circle text-red-500"></i> Please provide both user ID and video';
90
+ return;
91
+ }
92
+
93
+ formData.append('user_id', userIdInput.value);
94
+ formData.append('video', videoInput.files[0]);
95
+
96
+ // Log FormData contents
97
+ for (let [key, value] of formData.entries()) {
98
+ console.log(key, value);
99
+ }
100
+
101
+ statusDiv.innerHTML = '<i class="fas fa-spinner fa-spin"></i> Uploading...';
102
+
103
+ try {
104
+ const response = await fetch('/submitAccount', {
105
+ method: 'POST',
106
+ body: formData
107
+ });
108
+
109
+ console.log('Response status:', response.status);
110
+ console.log('Response headers:', response.headers);
111
+
112
+ if (!response.ok) {
113
+ const errorText = await response.text();
114
+ throw new Error(`Server response was not ok: ${response.status} ${response.statusText}\n${errorText}`);
115
+ }
116
+
117
+ const result = await response.json();
118
+ statusDiv.innerHTML = `<i class="fas fa-check-circle text-green-500"></i> ${result.message}`;
119
+ } catch (error) {
120
+ console.error('Error:', error);
121
+ statusDiv.innerHTML = '<i class="fas fa-exclamation-circle text-red-500"></i> Error submitting form';
122
+ }
123
+ });
client/static/login.html ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- app/static/login.html -->
2
+ <!DOCTYPE html>
3
+ <html lang="fr">
4
+ <head>
5
+ <meta charset="UTF-8">
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+ <title>TechMart - Connexion</title>
8
+ <script src="https://cdn.tailwindcss.com"></script>
9
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.11.4/gsap.min.js"></script>
10
+ <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css" rel="stylesheet">
11
+ <link rel="stylesheet" href="/static/css/styles.css">
12
+ </head>
13
+ <body class="text-white min-h-screen flex flex-col bg-cover bg-center" style="background-image: url('/static/images/background.jpg');">
14
+ <!-- <header class="bg-gray-800 p-4">
15
+ <h1 class="text-3xl font-bold text-center"><i class="fas fa-shopping-cart mr-2"></i>TechMart</h1>
16
+ </header> -->
17
+
18
+ <main class="flex-grow container mx-auto p-4 flex items-center justify-center">
19
+ <div class="max-w-md w-full bg-gray-800/40 backdrop-blur-md rounded-xl shadow-lg p-6 transform transition-all duration-300 hover:scale-102 hover:shadow-md reveal-box">
20
+ <h2 class="text-3xl font-semibold mb-6 text-center text-white">Connexion </h2>
21
+ <form id="login-form" method="POST" action="/login" class="space-y-6">
22
+ <div>
23
+ <label for="email" class="block text-sm font-medium text-gray-300"> Email Address </label>
24
+ <input type="email" id="email" name="email" required
25
+ class="w-full px-4 py-3 mt-2 bg-gray-700 text-white rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-opacity-50 transition duration-300 ease-in-out">
26
+ </div>
27
+ <button type="submit"
28
+ class="w-full bg-blue-600 hover:bg-blue-700 text-white font-bold py-3 px-4 rounded-lg transition-all duration-300 ease-in-out transform hover:scale-102">
29
+ <i class="fas fa-sign-in-alt mr-2"></i>Connexion
30
+ </button>
31
+ </form>
32
+ <div id="error-message" class="mt-4 text-center text-red-500"></div>
33
+ </div>
34
+ </main>
35
+
36
+
37
+ <!-- <footer class="bg-gray-800 p-4 text-center">
38
+ <p>&copy; 2023 TechMart. Tous droits réservés.</p>
39
+ </footer> -->
40
+
41
+ <script>
42
+
43
+ gsap.from('.reveal-box', {
44
+ duration: 1,
45
+ opacity: 0.5,
46
+ y: -300,
47
+ ease: "power10.out"
48
+ });
49
+
50
+ document.getElementById('login-form').addEventListener('submit', async function(event) {
51
+ event.preventDefault();
52
+
53
+
54
+ const email = document.getElementById('email').value;
55
+
56
+ try {
57
+ const emailEncoded = encodeURIComponent(email);
58
+
59
+ const url = `http://localhost:8000/check_user_exists?user_id=${emailEncoded}`;
60
+
61
+ const response = await fetch(url, {
62
+ method: 'POST',
63
+ headers: {
64
+ 'Content-Type': 'application/x-www-form-urlencoded',
65
+ },
66
+ });
67
+
68
+ if (response.ok) {
69
+ window.location.href = '/index';
70
+ } else {
71
+ window.location.href = `/createAccount?email=${emailEncoded}`;
72
+ }
73
+
74
+ } catch (error) {
75
+ document.getElementById('error-message').innerText = 'An error occurred. Please try again.';
76
+ console.error('Error during the request:', error);
77
+ }
78
+ });
79
+ </script>
80
+ </body>
81
+ </html>
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ scikit-learn
2
+ pandas
3
+ numpy
4
+ concrete-ml
5
+ deepface
6
+ xgboost
7
+ tf-keras
8
+ matplotlib
9
+ fastapi
10
+ pydantic-settings
11
+ pydantic==2.7.2
12
+ uvicorn
13
+ python-multipart
14
+ opencv-python