FROM python:3.12.5 # Install Ollama as root RUN curl -fsSL https://ollama.com/install.sh | sh # Start Ollama in the background and pull the model RUN ollama serve & sleep 5 && ollama pull llama3:8b # Create a new user and switch to it RUN useradd -m -u 1000 user USER user ENV PATH="/home/user/.local/bin:$PATH" # Set working directory WORKDIR /app # Copy and install dependencies COPY --chown=user ./requirements.txt requirements.txt RUN pip install --no-cache-dir --upgrade -r requirements.txt # Copy application code COPY --chown=user . /app # Expose FastAPI default port EXPOSE 7860 # Start Ollama and FastAPI together CMD ["sh", "-c", "uvicorn app:app --host 0.0.0.0 --port 7860"]