Dockerfile 5.07 KB
Newer Older
Timothy J. Baek's avatar
Timothy J. Baek committed
1
# syntax=docker/dockerfile:1
2
3
4
# Initialize device type args
ARG USE_CUDA=false
ARG USE_MPS=false
Timothy J. Baek's avatar
Timothy J. Baek committed
5

Jannik Streidl's avatar
Jannik Streidl committed
6
######## WebUI frontend ########
Jannik S's avatar
Jannik S committed
7
FROM node:21-alpine3.19 as build
8

Timothy J. Baek's avatar
Timothy J. Baek committed
9
10
WORKDIR /app

11
12
13
14
#RUN apt-get update \ 
#    && apt-get install -y --no-install-recommends wget \ 
#    # cleanup
#    && rm -rf /var/lib/apt/lists/*
Jannik Streidl's avatar
Jannik Streidl committed
15

Timothy J. Baek's avatar
Timothy J. Baek committed
16
# wget embedding model weight from alpine (does not exist from slim-buster)
Jannik Streidl's avatar
Jannik Streidl committed
17
18
#RUN wget "https://chroma-onnx-models.s3.amazonaws.com/all-MiniLM-L6-v2/onnx.tar.gz" -O - | \
#    tar -xzf - -C /app
Timothy J. Baek's avatar
Timothy J. Baek committed
19

Xiaodong Ye's avatar
Xiaodong Ye committed
20
COPY package.json package-lock.json ./
21
RUN npm ci
22

23
24
COPY . .
RUN npm run build
Timothy J. Baek's avatar
Timothy J. Baek committed
25

Jannik Streidl's avatar
Jannik Streidl committed
26
######## WebUI backend ########
Timothy J. Baek's avatar
Timothy J. Baek committed
27
FROM python:3.11-slim-bookworm as base
Timothy J. Baek's avatar
Timothy J. Baek committed
28

29
30
31
32
# Use args
ARG USE_CUDA
ARG USE_MPS

Jannik Streidl's avatar
Jannik Streidl committed
33
34
35
## Basis ##
ENV ENV=prod \
    PORT=8080
Timothy J. Baek's avatar
Timothy J. Baek committed
36

Jannik Streidl's avatar
Jannik Streidl committed
37
38
39
## Basis URL Config ##
ENV OLLAMA_BASE_URL="/ollama" \
    OPENAI_API_BASE_URL=""
Timothy J. Baek's avatar
Timothy J. Baek committed
40

Jannik Streidl's avatar
Jannik Streidl committed
41
42
43
44
45
## API Key and Security Config ##
ENV OPENAI_API_KEY="" \
    WEBUI_SECRET_KEY="" \
    SCARF_NO_ANALYTICS=true \
    DO_NOT_TRACK=true
Timothy J. Baek's avatar
Timothy J. Baek committed
46

Jannik Streidl's avatar
Jannik Streidl committed
47
#### Preloaded models #########################################################
Jannik Streidl's avatar
Jannik Streidl committed
48
49
50
## whisper TTS Settings ##
ENV WHISPER_MODEL="base" \
    WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
51

Jannik Streidl's avatar
Jannik Streidl committed
52
## RAG Embedding Model Settings ##
53
54
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard 
55
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
56
# IMPORTANT: If you change the default model (all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
Jannik Streidl's avatar
Jannik Streidl committed
57
58
ENV RAG_EMBEDDING_MODEL="all-MiniLM-L6-v2" \
    RAG_EMBEDDING_MODEL_DIR="/app/backend/data/cache/embedding/models" \
Jannik Streidl's avatar
Jannik Streidl committed
59
60
61
62
63
    SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models" \
    # device type for whisper tts and embbeding models - "cpu" (default) or "mps" (apple silicon) - choosing this right can lead to better performance
    # Important:
    #  If you want to use CUDA you need to install the nvidia-container-toolkit (https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) 
    #  you can set this to "cuda" but its recomended to use --build-arg CUDA_ENABLED=true flag when building the image
64
65
    RAG_EMBEDDING_MODEL_DEVICE_TYPE="cpu" \
    DEVICE_COMPUTE_TYPE="int8"
Jannik Streidl's avatar
Jannik Streidl committed
66
# device type for whisper tts and embbeding models - "cpu" (default), "cuda" (nvidia gpu and CUDA required) or "mps" (apple silicon) - choosing this right can lead to better performance
Jannik Streidl's avatar
Jannik Streidl committed
67
#### Preloaded models ##########################################################
68

Timothy J. Baek's avatar
Timothy J. Baek committed
69
WORKDIR /app/backend
70
# install python dependencies
Timothy J. Baek's avatar
Timothy J. Baek committed
71
COPY ./backend/requirements.txt ./requirements.txt
Timothy J. Baek's avatar
Timothy J. Baek committed
72

73
74
75
76
77
78
79
80
81
82
RUN if [ "$USE_CUDA" = "true" ]; then \
        export DEVICE_TYPE="cuda" && \
        pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 --no-cache-dir && \
        pip3 install -r requirements.txt --no-cache-dir; \
    elif [ "$USE_MPS" = "true" ]; then \
        export DEVICE_TYPE="mps" && \
        pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
        pip3 install -r requirements.txt --no-cache-dir && \
        python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \
        python -c "import os; from chromadb.utils import embedding_functions; sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=os.environ['RAG_EMBEDDING_MODEL'], device=os.environ['DEVICE_TYPE'])"; \
Jannik Streidl's avatar
Jannik Streidl committed
83
    else \
84
        export DEVICE_TYPE="cpu" && \
Jannik Streidl's avatar
Jannik Streidl committed
85
        pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
86
87
88
        pip3 install -r requirements.txt --no-cache-dir && \
        python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \
        python -c "import os; from chromadb.utils import embedding_functions; sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=os.environ['RAG_EMBEDDING_MODEL'], device=os.environ['DEVICE_TYPE'])"; \
Jannik Streidl's avatar
Jannik Streidl committed
89
90
    fi

Jannik Streidl's avatar
Jannik Streidl committed
91
#  install required packages
Timothy J. Baek's avatar
Timothy J. Baek committed
92
RUN apt-get update \
Jannik Streidl's avatar
Jannik Streidl committed
93
94
95
96
97
    # Install pandoc and netcat
    && apt-get install -y --no-install-recommends pandoc netcat-openbsd \
    # for RAG OCR
    && apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 \
    # cleanup
Timothy J. Baek's avatar
Timothy J. Baek committed
98
99
    && rm -rf /var/lib/apt/lists/*

Jannik Streidl's avatar
Jannik Streidl committed
100

101

102
# copy embedding weight from build
Jannik Streidl's avatar
Jannik Streidl committed
103
104
# RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
# COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
105
106
107

# copy built frontend files
COPY --from=build /app/build /app/build
108
109
COPY --from=build /app/CHANGELOG.md /app/CHANGELOG.md
COPY --from=build /app/package.json /app/package.json
110
111

# copy backend files
Timothy J. Baek's avatar
Timothy J. Baek committed
112
113
COPY ./backend .

Jannik S's avatar
Jannik S committed
114
115
EXPOSE 8080

116
CMD [ "bash", "start.sh"]