File size: 1,832 Bytes
08d7c19
6d01d5b
 
0889e65
6d01d5b
 
08d7c19
9fe59b0
 
dc871e9
 
0889e65
 
08d7c19
 
 
 
 
 
 
 
6d01d5b
dc871e9
 
 
 
 
 
 
 
 
7da65d8
 
9fe59b0
 
7da65d8
dc871e9
 
 
0889e65
08d7c19
6d01d5b
08d7c19
0889e65
08d7c19
 
0889e65
6d01d5b
 
08d7c19
 
 
0889e65
6d01d5b
 
e0993a0
 
 
08d7c19
 
 
0889e65
ad74cf1
e0993a0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# Use a lightweight Python image optimized for ML workloads
FROM python:3.10-slim

# Set working directory
WORKDIR /app

# Set environment variables for better performance
ENV PYTHONUNBUFFERED=1 \
    PYTHONDONTWRITEBYTECODE=1 \
    DEBIAN_FRONTEND=noninteractive

# Install system-level dependencies
RUN apt-get update && \
    apt-get install -y \
    ffmpeg \
    libsndfile1 \
    wget \
    curl \
    git \
    build-essential \
    && rm -rf /var/lib/apt/lists/*

# Create non-root user to avoid git permission issues
RUN useradd -m appuser && chown -R appuser /app

# Set HOME explicitly to match the non-root user
ENV HOME=/home/appuser

# Switch to appuser for git config
USER appuser

# Configure git to avoid permission issues during HF build
RUN git config --global --add safe.directory /app && \
    git config --global user.email "[email protected]" && \
    git config --global user.name "peace2024"

# Switch back to root to install dependencies
USER root

# Copy requirements first to leverage Docker layer caching
COPY requirements-hf.txt .

# Upgrade pip and install Python dependencies
RUN pip install --no-cache-dir --upgrade pip && \
    pip install --no-cache-dir -r requirements-hf.txt

# Copy the entire app source code
COPY . .

# Create necessary directories
RUN mkdir -p vector_store logs

# Expose port 7860 (used by Hugging Face Spaces)
EXPOSE 7860

# Install PyTorch with CUDA support (will fall back to CPU if no GPU)
RUN pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu

# Health check
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
    CMD curl -f http://localhost:7860/docs || exit 1

# Run the FastAPI app via Uvicorn
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"]