dubswayAgenticV2 / Dockerfile
peace2024's picture
adding worker daemon
e0993a0
raw
history blame
1.83 kB
# Use a lightweight Python image optimized for ML workloads
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Set environment variables for better performance
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
DEBIAN_FRONTEND=noninteractive
# Install system-level dependencies
RUN apt-get update && \
apt-get install -y \
ffmpeg \
libsndfile1 \
wget \
curl \
git \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# Create non-root user to avoid git permission issues
RUN useradd -m appuser && chown -R appuser /app
# Set HOME explicitly to match the non-root user
ENV HOME=/home/appuser
# Switch to appuser for git config
USER appuser
# Configure git to avoid permission issues during HF build
RUN git config --global --add safe.directory /app && \
git config --global user.email "[email protected]" && \
git config --global user.name "peace2024"
# Switch back to root to install dependencies
USER root
# Copy requirements first to leverage Docker layer caching
COPY requirements-hf.txt .
# Upgrade pip and install Python dependencies
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r requirements-hf.txt
# Copy the entire app source code
COPY . .
# Create necessary directories
RUN mkdir -p vector_store logs
# Expose port 7860 (used by Hugging Face Spaces)
EXPOSE 7860
# Install PyTorch with CUDA support (will fall back to CPU if no GPU)
RUN pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
# Health check
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
CMD curl -f http://localhost:7860/docs || exit 1
# Run the FastAPI app via Uvicorn
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"]