Dockerfile 4.89 KB
Newer Older
Timothy J. Baek's avatar
Timothy J. Baek committed
1
# syntax=docker/dockerfile:1
2
# Initialize device type args
Jannik Streidl's avatar
grammar  
Jannik Streidl committed
3
# use build args in the docker build commmand with --build-arg="BUILDARG=true"
4
ARG USE_CUDA=false
Jannik Streidl's avatar
Jannik Streidl committed
5
ARG USE_OLLAMA=false
6
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
Jannik Streidl's avatar
Jannik Streidl committed
7
8
9
10
ARG USE_CUDA_VER=cu121
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard 
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
11
12
# IMPORTANT: If you change the default model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
Timothy J. Baek's avatar
Timothy J. Baek committed
13

Jannik Streidl's avatar
Jannik Streidl committed
14
######## WebUI frontend ########
Jun Siang Cheah's avatar
Jun Siang Cheah committed
15
FROM --platform=$BUILDPLATFORM node:21-alpine3.19 as build
16

Timothy J. Baek's avatar
Timothy J. Baek committed
17
18
WORKDIR /app

Xiaodong Ye's avatar
Xiaodong Ye committed
19
COPY package.json package-lock.json ./
20
RUN npm ci
21

22
23
COPY . .
RUN npm run build
Timothy J. Baek's avatar
Timothy J. Baek committed
24

Jannik Streidl's avatar
Jannik Streidl committed
25
######## WebUI backend ########
Timothy J. Baek's avatar
Timothy J. Baek committed
26
FROM python:3.11-slim-bookworm as base
Timothy J. Baek's avatar
Timothy J. Baek committed
27

28
# Use args
29
ARG USE_CUDA
Jannik Streidl's avatar
Jannik Streidl committed
30
31
32
ARG USE_OLLAMA
ARG USE_CUDA_VER
ARG USE_EMBEDDING_MODEL
33

Jannik Streidl's avatar
Jannik Streidl committed
34
35
## Basis ##
ENV ENV=prod \
36
    PORT=8080 \
Jannik Streidl's avatar
Jannik Streidl committed
37
    # pass build args to the build
Jannik Streidl's avatar
Jannik Streidl committed
38
39
40
41
    USE_OLLAMA_DOCKER=${USE_OLLAMA} \
    USE_CUDA_DOCKER=${USE_CUDA} \
    USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
    USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL}
Timothy J. Baek's avatar
Timothy J. Baek committed
42

Jannik Streidl's avatar
Jannik Streidl committed
43
44
45
## Basis URL Config ##
ENV OLLAMA_BASE_URL="/ollama" \
    OPENAI_API_BASE_URL=""
Timothy J. Baek's avatar
Timothy J. Baek committed
46

Jannik Streidl's avatar
Jannik Streidl committed
47
48
49
50
51
## API Key and Security Config ##
ENV OPENAI_API_KEY="" \
    WEBUI_SECRET_KEY="" \
    SCARF_NO_ANALYTICS=true \
    DO_NOT_TRACK=true
Timothy J. Baek's avatar
Timothy J. Baek committed
52

Timothy J. Baek's avatar
Timothy J. Baek committed
53
54
55
56
57
# Use locally bundled version of the LiteLLM cost map json
# to avoid repetitive startup connections
ENV LITELLM_LOCAL_MODEL_COST_MAP="True"


Jannik Streidl's avatar
Jannik Streidl committed
58
59
#### Other models #########################################################
## whisper TTS model settings ##
Jannik Streidl's avatar
Jannik Streidl committed
60
61
ENV WHISPER_MODEL="base" \
    WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
62

Jannik Streidl's avatar
Jannik Streidl committed
63
64
## RAG Embedding model settings ##
ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
Jannik Streidl's avatar
Jannik Streidl committed
65
    RAG_EMBEDDING_MODEL_DIR="/app/backend/data/cache/embedding/models" \
Jannik Streidl's avatar
Jannik Streidl committed
66
67
    SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
#### Other models ##########################################################
68

Timothy J. Baek's avatar
Timothy J. Baek committed
69
WORKDIR /app/backend
Timothy J. Baek's avatar
Timothy J. Baek committed
70

71
72
73
74
75
76
77
78
79
80
81
82
RUN if [ "$USE_OLLAMA" = "true" ]; then \
        apt-get update && \
        # Install pandoc and netcat
        apt-get install -y --no-install-recommends pandoc netcat-openbsd && \
        # for RAG OCR
        apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
        # install helper tools
        apt-get install -y --no-install-recommends curl && \
        # install ollama
        curl -fsSL https://ollama.com/install.sh | sh && \
        # cleanup
        rm -rf /var/lib/apt/lists/*; \
Jannik Streidl's avatar
Jannik Streidl committed
83
    else \
84
85
86
87
88
89
90
        apt-get update && \
        # Install pandoc and netcat
        apt-get install -y --no-install-recommends pandoc netcat-openbsd && \
        # for RAG OCR
        apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
        # cleanup
        rm -rf /var/lib/apt/lists/*; \
Jannik Streidl's avatar
Jannik Streidl committed
91
92
    fi

93
94
# install python dependencies
COPY ./backend/requirements.txt ./requirements.txt
95

Justin Hayes's avatar
Justin Hayes committed
96
97
RUN pip3 install uv && \
    if [ "$USE_CUDA" = "true" ]; then \
98
99
        # If you use CUDA the whisper and embedding model will be downloaded on first use
        pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
Justin Hayes's avatar
Justin Hayes committed
100
        uv pip install --system -r requirements.txt --no-cache-dir && \
101
102
        python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
        python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
103
    else \
104
        pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
Justin Hayes's avatar
Justin Hayes committed
105
        uv pip install --system -r requirements.txt --no-cache-dir && \
106
107
        python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
        python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
108
    fi
Timothy J. Baek's avatar
Timothy J. Baek committed
109

110

111

112
# copy embedding weight from build
Jannik Streidl's avatar
Jannik Streidl committed
113
114
# RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
# COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
115
116
117

# copy built frontend files
COPY --from=build /app/build /app/build
118
119
COPY --from=build /app/CHANGELOG.md /app/CHANGELOG.md
COPY --from=build /app/package.json /app/package.json
120
121

# copy backend files
Timothy J. Baek's avatar
Timothy J. Baek committed
122
123
COPY ./backend .

Jannik S's avatar
Jannik S committed
124
125
EXPOSE 8080

126
CMD [ "bash", "start.sh"]