File size: 2,834 Bytes
7b2eca8
 
 
ee03864
 
 
9acc34b
 
 
7b2eca8
 
 
1017099
 
ac9c6c1
 
7b2eca8
 
 
bbf7f29
 
86103f1
7d83d86
79a071f
e497915
 
79a071f
e497915
 
79a071f
e497915
 
 
79a071f
b48ecb9
 
 
 
 
 
 
 
 
 
 
54c9e50
e3a01f4
 
 
 
 
54c9e50
e3a01f4
dddd691
0d739fa
 
5ab0e19
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# 🐍 Use official Python
FROM python:3.11-slim

ENV TRANSFORMERS_CACHE=/tmp
ENV HF_HOME=/tmp

# Install wget
RUN apt-get update && apt-get install -y wget

WORKDIR /app
COPY . .

RUN mkdir -p vit_captioning/artifacts && \
    wget https://huggingface.co/datasets/ClemSummer/clip-checkpoints/resolve/main/CLIPEncoder_40epochs_unfreeze12.pth \
    -O vit_captioning/artifacts/CLIPEncoder_40epochs_unfreeze12.pth

RUN pip install --upgrade pip
RUN pip install -r requirements.txt

RUN mkdir -p /models/clip && \
    python3 -c "from transformers import CLIPModel; CLIPModel.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')"
RUN python3 -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('bert-base-uncased').save_pretrained('/models/bert-tokenizer')"
RUN python3 -c "from transformers import CLIPProcessor; CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')"

# RUN mkdir -p /models/cbow && \
#     python3 -c "import gensim.downloader as api; model = api.load('glove-twitter-200'); model.save('/models/cbow_model.kv')"
RUN mkdir -p /models/cbow && \
    wget https://huggingface.co/datasets/ClemSummer/cbow-model-cache/resolve/main/cbow_model.kv -O /models/cbow/cbow_model.kv && \
    wget https://huggingface.co/datasets/ClemSummer/cbow-model-cache/resolve/main/cbow_model.kv.vectors.npy -O /models/cbow/cbow_model.kv.vectors.npy

# RUN mkdir -p /models/qwen && \
#     python3 -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('Qwen/Qwen3-0.6B-Base').save_pretrained('/models/qwen')"
# RUN python3 -c "from transformers import AutoModelForCausalLM; AutoModelForCausalLM.from_pretrained('Qwen/Qwen3-0.6B-Base').save_pretrained('/models/qwen')"

# RUN mkdir -p /models/qwen && \
#     python3 -c "from transformers import AutoTokenizer; \
#                 AutoTokenizer.from_pretrained('ClemSummer/qwen-model-cache', trust_remote_code=True).save_pretrained('/models/qwen')"
# RUN python3 -c "from transformers import AutoModelForCausalLM; \
#                 AutoModelForCausalLM.from_pretrained('ClemSummer/qwen-model-cache', trust_remote_code=True).save_pretrained('/models/qwen')"

# Install git & git-lfs, then clone your dataset repo into /models/qwen
RUN apt-get update && apt-get install -y git git-lfs && \
    git lfs install && \
    git clone https://huggingface.co/datasets/ClemSummer/qwen-model-cache /models/qwen

EXPOSE 7860

# Install curl if it's not already installed
RUN apt-get update && apt-get install -y curl

# Add the health check
HEALTHCHECK CMD curl --fail http://localhost:7860/ || exit 1

#CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "debug"]

# some change to trigger rebuild 2