# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker # you will also find guides on how best to write your Dockerfile FROM nvidia/cuda:12.1.0-devel-ubuntu22.04 # Set compute capability for nerfacc and tiny-cuda-nn # See https://developer.nvidia.com/cuda-gpus and limit number to speed-up build ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0+PTX" ENV TCNN_CUDA_ARCHITECTURES=90;89;86;80;75;70;61;60 # Speed-up build for RTX 30xx # ENV TORCH_CUDA_ARCH_LIST="8.6" # ENV TCNN_CUDA_ARCHITECTURES=86 # Speed-up build for RTX 40xx # ENV TORCH_CUDA_ARCH_LIST="8.9" # ENV TCNN_CUDA_ARCHITECTURES=89 # apt install by root user RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ curl \ cmake \ git \ git-lfs \ ffmpeg \ libegl1-mesa-dev \ libgl1-mesa-dev \ libgles2-mesa-dev \ libglib2.0-0 \ libgl1-mesa-glx \ libsm6 \ libxext6 \ libxrender1 \ python-is-python3 \ python3.10-dev \ python3-pip \ rsync \ wget \ && rm -rf /var/lib/apt/lists/* RUN useradd -m -u 1000 user USER user ENV HOME=/home/user ENV CUDA_HOME=/usr/local/cuda ENV PATH=${CUDA_HOME}/bin:/home/user/.local/bin:${PATH} ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH} ENV LIBRARY_PATH=${CUDA_HOME}/lib64/stubs:${LIBRARY_PATH} WORKDIR $HOME/app RUN pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121 RUN pip install --no-cache-dir datasets "huggingface-hub>=0.19" "hf-transfer>=0.1.4" "protobuf<4" "click<8.1" "pydantic~=1.0" RUN pip install --no-cache-dir gradio[oauth]==4.44.1 "uvicorn>=0.14.0" spaces COPY --chown=user ./requirements.txt requirements.txt RUN pip install --no-cache-dir --upgrade -r requirements.txt COPY --chown=user . $HOME/app CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]