aniudupa commited on
Commit
d01f0ec
·
verified ·
1 Parent(s): 458d029

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +31 -33
Dockerfile CHANGED
@@ -1,40 +1,38 @@
1
- # Use an image with Python and build tools pre-installed
2
- FROM python:3.10-slim
3
-
4
- # Set environment variables
5
- ENV DEBIAN_FRONTEND=noninteractive
6
-
7
- # Install system dependencies for dlib and face_recognition
8
- RUN apt-get update && \
9
- apt-get install -y \
10
- build-essential \
11
- cmake \
12
- gfortran \
13
- libopenblas-dev \
14
- liblapack-dev \
15
- libx11-dev \
16
- libgtk-3-dev \
17
- libboost-python-dev \
18
- libboost-thread-dev \
19
- libboost-system-dev \
20
- python3-dev \
21
- wget \
22
- && rm -rf /var/lib/apt/lists/*
23
-
24
- # Set working directory
25
  WORKDIR /app
26
 
27
- # Copy only requirements first to cache dependencies
28
- COPY requirements.txt .
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- # Install Python dependencies (includes face_recognition and FastAPI)
31
- RUN pip install --upgrade pip && pip install --no-cache-dir -r requirements.txt
32
 
33
- # Copy the entire app
34
- COPY . .
 
35
 
36
- # Expose the app port
37
  EXPOSE 7860
38
 
39
- # Run the FastAPI app using uvicorn
40
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # Step 1: Use a base image with Python
2
+ FROM python:3.9-slim
3
+
4
+ # Step 2: Set environment variables for caching (optional, but included for consistency)
5
+ ENV HF_HOME=/app/.cache/huggingface
6
+ ENV TRANSFORMERS_CACHE=/app/.cache/huggingface/transformers
7
+ ENV SENTENCE_TRANSFORMERS_HOME=/app/.cache/sentence_transformers
8
+ ENV TORCH_HOME=/app/.cache/torch
9
+
10
+ # Step 3: Set the working directory inside the container
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  WORKDIR /app
12
 
13
+ # Step 4: Copy the requirements file to the container
14
+ COPY requirements.txt ./
15
+
16
+ # Step 5: Install the Python dependencies
17
+ RUN pip install --no-cache-dir -r requirements.txt
18
+
19
+ # Step 6: Install faiss-gpu separately (if needed)
20
+ RUN pip install --no-cache-dir faiss-gpu
21
+
22
+ # Step 7: Disable caching for models by setting cache_dir=None
23
+ RUN python -c "from transformers import AutoModel, AutoTokenizer; \
24
+ model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1', trust_remote_code=True, cache_dir=None); \
25
+ tokenizer = AutoTokenizer.from_pretrained('nomic-ai/nomic-embed-text-v1', trust_remote_code=True, cache_dir=None)"
26
 
27
+ # Step 8: Copy the entire codebase into the container
28
+ COPY . /app
29
 
30
+ # Step 9: Set permissions for the entire app directory
31
+ RUN chmod -R 777 /app
32
+ RUN chmod -R 777 /app/.cache
33
 
34
+ # Step 10: Expose the port that FastAPI will run on (default: 7860)
35
  EXPOSE 7860
36
 
37
+ # Step 11: Set the entry point to run FastAPI with Uvicorn
38
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860", "--reload"]