File size: 1,252 Bytes
17479d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# Use Ubuntu as the base image
FROM ubuntu:22.04

# Set the working directory in the container
WORKDIR /app

# Install system dependencies and Python
RUN apt-get update && apt-get install -y \

    python3 \
    python3-pip \
    curl \
    git \
    build-essential \
    cmake \
    ninja-build \
    wget \
    && rm -rf /var/lib/apt/lists/*

# Set Python3 as the default
RUN ln -s /usr/bin/python3 /usr/bin/python

# Copy the requirements file and install dependencies
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt

# Install llama.cpp using CMake
RUN git clone https://github.com/ggerganov/llama.cpp.git /app/llama.cpp && \

    cd /app/llama.cpp && \
    mkdir build && cd build && \
    cmake .. -G Ninja && ninja install

# Ensure llama.cpp binaries are in the system path
ENV PATH="/usr/local/bin:$PATH"

# Copy the Llama model into the Docker image
COPY Meta-Llama-3-8B-Instruct.Q4_0.gguf /app/

# Copy the application files
COPY . .

# Expose the FastAPI default port
EXPOSE 8000

# Start llama.cpp server, then start FastAPI
CMD ["sh", "-c", "/usr/local/bin/server -m /app/Meta-Llama-3-8B-Instruct.Q4_0.gguf & sleep 5 && uvicorn main:app --host 0.0.0.0 --port 8000"]