"launch/dynamo-run/src/lib.rs" did not exist on "37a8ebaf7a8e9a3d503936cca34cb9546994015b"
Dockerfile.vllm 16.9 KB
Newer Older
1
2
3
# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0

4
ARG BASE_IMAGE="nvcr.io/nvidia/cuda-dl-base"
5
ARG BASE_IMAGE_TAG="25.03-cuda12.8-devel-ubuntu24.04"
6
ARG RELEASE_BUILD
7
8
ARG RUNTIME_IMAGE="nvcr.io/nvidia/cuda"
ARG RUNTIME_IMAGE_TAG="12.8.1-runtime-ubuntu24.04"
Hongkuan Zhou's avatar
Hongkuan Zhou committed
9
10
# TODO: Move to published pypi tags
ARG GENAI_PERF_TAG="e67e853413a07a778dd78a55e299be7fba9c9c24"
11

12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# Define general architecture ARGs for supporting both x86 and aarch64 builds.
#   ARCH: Used for package suffixes (e.g., amd64, arm64)
#   ARCH_ALT: Used for Rust targets, manylinux suffix (e.g., x86_64, aarch64)
#
# Default values are for x86/amd64:
#   --build-arg ARCH=amd64 --build-arg ARCH_ALT=x86_64
#
# For arm64/aarch64, build with:
#   --build-arg ARCH=arm64 --build-arg ARCH_ALT=aarch64
#
# NOTE: There isn't an easy way to define one of these values based on the other value
# without adding if statements everywhere, so just define both as ARGs for now.
ARG ARCH=amd64
ARG ARCH_ALT=x86_64

27
FROM ${BASE_IMAGE}:${BASE_IMAGE_TAG} AS nixl_base
28
29
30
31
32

# Redeclare ARCH and ARCH_ALT so they're available in this stage
ARG ARCH
ARG ARCH_ALT

33
34
35
36
37
38
39
40
WORKDIR /opt/nixl
# Add a cache hint that only changes when the nixl commit changes
ARG NIXL_COMMIT
# This line acts as a cache key - it only changes when NIXL_COMMIT changes
RUN echo "NIXL commit: ${NIXL_COMMIT}" > /opt/nixl/commit.txt
# Copy the nixl source
COPY --from=nixl . .

41
##################################
42
########## Base Image ############
43
44
##################################

45
FROM ${BASE_IMAGE}:${BASE_IMAGE_TAG} AS base
46

47
48
49
# Redeclare ARCH and ARCH_ALT so they're available in this stage
ARG ARCH
ARG ARCH_ALT
50

51
USER root
52
ARG PYTHON_VERSION=3.12
53
54
55
56

RUN apt-get update -y && \
    apt-get install -y \
    # NIXL build dependencies
57
    cmake \
58
59
    meson \
    ninja-build \
60
    pybind11-dev \
61
62
63
64
    # Rust build dependencies
    libclang-dev \
    # Install utilities
    nvtop \
65
    tmux \
66
    vim
67
68
69

WORKDIR /workspace

70
### NIXL SETUP ###
71
72
73
# Copy nixl source, and use commit hash as cache hint
COPY --from=nixl_base /opt/nixl /opt/nixl
COPY --from=nixl_base /opt/nixl/commit.txt /opt/nixl/commit.txt
74

75
### NATS & ETCD SETUP ###
76
# nats
77
78
RUN wget --tries=3 --waitretry=5 https://github.com/nats-io/nats-server/releases/download/v2.10.24/nats-server-v2.10.24-${ARCH}.deb && \
    dpkg -i nats-server-v2.10.24-${ARCH}.deb && rm nats-server-v2.10.24-${ARCH}.deb
79
80
# etcd
ENV ETCD_VERSION="v3.5.18"
81
RUN wget --tries=3 --waitretry=5 https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-${ARCH}.tar.gz -O /tmp/etcd.tar.gz && \
82
83
84
    mkdir -p /usr/local/bin/etcd && \
    tar -xvf /tmp/etcd.tar.gz -C /usr/local/bin/etcd --strip-components=1 && \
    rm /tmp/etcd.tar.gz
85
86
87
88
ENV PATH=/usr/local/bin/etcd/:$PATH


### VIRTUAL ENVIRONMENT SETUP ###
89
90
91

# Install uv and create virtualenv
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
Neelay Shah's avatar
Neelay Shah committed
92
93
RUN mkdir /opt/dynamo && \
    uv venv /opt/dynamo/venv --python 3.12
94
95

# Activate virtual environment
Neelay Shah's avatar
Neelay Shah committed
96
ENV VIRTUAL_ENV=/opt/dynamo/venv
97
98
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"

99
100
101
102
103
104
105
106
# Install NIXL Python module
# TODO: Move gds_path selection based on arch into NIXL build
RUN if [ "$ARCH" = "arm64" ]; then \
        cd /opt/nixl && uv pip install . --config-settings=setup-args="-Dgds_path=/usr/local/cuda/targets/sbsa-linux/"; \
    else \
        cd /opt/nixl && uv pip install . ; \
    fi

107
108
# Install patched vllm - keep this early in Dockerfile to avoid
# rebuilds from unrelated source code changes
109
ARG VLLM_REF="0.8.4"
110
ARG VLLM_PATCH="vllm_v${VLLM_REF}-dynamo-kv-disagg-patch.patch"
111
ARG VLLM_PATCHED_PACKAGE_NAME="ai_dynamo_vllm"
112
ARG VLLM_PATCHED_PACKAGE_VERSION="0.8.4"
113
ARG VLLM_MAX_JOBS=4
114
RUN --mount=type=bind,source=./container/deps/,target=/tmp/deps \
115
    --mount=type=cache,target=/root/.cache/uv \
116
117
    mkdir /tmp/vllm && \
    uv pip install pip wheel && \
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
    # NOTE: vLLM build from source on ARM can take several hours, see VLLM_MAX_JOBS details.
    if [ "$ARCH" = "arm64" ]; then \
        # PyTorch 2.7 supports CUDA 12.8 and aarch64 installs
        uv pip install torch==2.7.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128 && \
        # Download vLLM source with version matching patch
        git clone --branch v${VLLM_REF} --depth 1 https://github.com/vllm-project/vllm.git /tmp/vllm/vllm-${VLLM_REF} && \
        cd /tmp/vllm/vllm-${VLLM_REF}/ && \
        # Patch vLLM source with dynamo additions
        patch -p1 < /tmp/deps/vllm/${VLLM_PATCH} && \
        # WAR: Set package version check to 'vllm' instead of 'ai_dynamo_vllm' to avoid
        # platform detection issues on ARM install.
        # TODO: Rename package from vllm to ai_dynamo_vllm like x86 path below to remove this WAR.
        sed -i 's/version("ai_dynamo_vllm")/version("vllm")/g' vllm/platforms/__init__.py && \
        # Remove pytorch from vllm install dependencies
        python use_existing_torch.py && \
        # Build/install vllm from source
        uv pip install -r requirements/build.txt && \
        # MAX_JOBS set to avoid running OOM on vllm-flash-attn build, this can
        # significantly impact the overall build time. Each job can take up
        # to -16GB RAM each, so tune according to available system memory.
        MAX_JOBS=${VLLM_MAX_JOBS} uv pip install . --no-build-isolation ; \
    # Handle x86_64: Download wheel, unpack, setup for later steps
    else \
        python -m pip download --only-binary=:all: --no-deps --dest /tmp/vllm vllm==v${VLLM_REF} && \
        # Patch vLLM pre-built download with dynamo additions
        cd /tmp/vllm && \
        wheel unpack *.whl && \
        cd vllm-${VLLM_REF}/ && \
        patch -p1 < /tmp/deps/vllm/${VLLM_PATCH} && \
        # Rename the package from vllm to ai_dynamo_vllm
        mv vllm-${VLLM_REF}.dist-info ${VLLM_PATCHED_PACKAGE_NAME}-${VLLM_PATCHED_PACKAGE_VERSION}.dist-info && \
        sed -i "s/^Name: vllm/Name: ${VLLM_PATCHED_PACKAGE_NAME}/g" ${VLLM_PATCHED_PACKAGE_NAME}-${VLLM_PATCHED_PACKAGE_VERSION}.dist-info/METADATA && \
        sed -i "s/^Version: ${VLLM_REF}/Version: ${VLLM_PATCHED_PACKAGE_VERSION}/g" ${VLLM_PATCHED_PACKAGE_NAME}-${VLLM_PATCHED_PACKAGE_VERSION}.dist-info/METADATA && \
        # Update wheel tag from linux_${ARCH_ALT} to manylinux1_${ARCH_ALT} in WHEEL file
        sed -i 's/Tag: cp38-abi3-linux_${ARCH_ALT}/Tag: cp38-abi3-manylinux1_${ARCH_ALT}/g' ${VLLM_PATCHED_PACKAGE_NAME}-${VLLM_PATCHED_PACKAGE_VERSION}.dist-info/WHEEL && \
        # Also update the tag in RECORD file to match
        sed -i "s/-cp38-abi3-linux_${ARCH_ALT}.whl/-cp38-abi3-manylinux1_${ARCH_ALT}.whl/g" ${VLLM_PATCHED_PACKAGE_NAME}-${VLLM_PATCHED_PACKAGE_VERSION}.dist-info/RECORD && \
        mkdir -p /workspace/dist && \
        wheel pack . --dest-dir /workspace/dist && \
        uv pip install /workspace/dist/${VLLM_PATCHED_PACKAGE_NAME}-*.whl ; \
    fi
159

160
161
162
163
# Common dependencies
RUN --mount=type=bind,source=./container/deps/requirements.txt,target=/tmp/requirements.txt \
    uv pip install --requirement /tmp/requirements.txt

164
165
166
# Install test dependencies
RUN --mount=type=bind,source=./container/deps/requirements.test.txt,target=/tmp/requirements.txt \
    uv pip install --requirement /tmp/requirements.txt
167

168
# ### MISC UTILITY SETUP ###
169
170
171

# Finish pyright install
RUN pyright --help > /dev/null 2>&1
172

173
174
175
176
177
# Enable Git operations in the /workspace directory
RUN printf "[safe]\n      directory=/workspace\n" > /root/.gitconfig

RUN ln -sf /bin/bash /bin/sh

178
179
180
181
### BUILDS ###

# Rust build/dev dependencies
RUN apt update -y && \
182
    apt install --no-install-recommends -y \
183
    build-essential \
Biswa Panda's avatar
Biswa Panda committed
184
    protobuf-compiler \
Neelay Shah's avatar
Neelay Shah committed
185
186
    cmake \
    libssl-dev \
187
188
189
190
191
    pkg-config

ENV RUSTUP_HOME=/usr/local/rustup \
    CARGO_HOME=/usr/local/cargo \
    PATH=/usr/local/cargo/bin:$PATH \
192
    RUST_VERSION=1.86.0
193

194
195
196
197
# Define Rust target based on ARCH_ALT ARG
ARG RUSTARCH=${ARCH_ALT}-unknown-linux-gnu

# Install Rust using RUSTARCH derived from ARCH_ALT
198
RUN wget --tries=3 --waitretry=5 "https://static.rust-lang.org/rustup/archive/1.28.1/${RUSTARCH}/rustup-init" && \
199
    # TODO: Add SHA check back based on RUSTARCH
200
201
202
203
    chmod +x rustup-init && \
    ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${RUSTARCH} && \
    rm rustup-init && \
    chmod -R a+w $RUSTUP_HOME $CARGO_HOME
204

205
206
207
208
209
210
ARG CARGO_BUILD_JOBS
# Set CARGO_BUILD_JOBS to 16 if not provided
# This is to prevent cargo from building $(nproc) jobs in parallel,
# which might exceed the number of opened files limit.
ENV CARGO_BUILD_JOBS=${CARGO_BUILD_JOBS:-16}

211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
#######################################
########## Local Development ##########
#######################################

FROM base AS local-dev

# https://code.visualstudio.com/remote/advancedcontainers/add-nonroot-user
# Will use the default ubuntu user, but give sudo access
# Needed so files permissions aren't set to root ownership when writing from inside container

# Don't want ubuntu to be editable, just change uid and gid. User ubuntu is hardcoded in .devcontainer
ENV USERNAME=ubuntu
ARG USER_UID=1000
ARG USER_GID=1000

RUN apt-get update && apt-get install -y sudo gnupg2 gnupg1 \
    && echo "$USERNAME ALL=(root) NOPASSWD:ALL" > /etc/sudoers.d/$USERNAME \
    && chmod 0440 /etc/sudoers.d/$USERNAME \
    && mkdir -p /home/$USERNAME \
    && chown -R $USERNAME:$USERNAME /home/$USERNAME \
    && rm -rf /var/lib/apt/lists/* \
    && chsh -s /bin/bash $USERNAME

# This is a slow operation (~40s on my cpu)
# Much better than chown -R $USERNAME:$USERNAME /opt/dynamo/venv (~10min on my cpu)
COPY --from=base --chown=$USER_UID:$USER_GID /opt/dynamo/venv/ /opt/dynamo/venv/
237
RUN chown $USERNAME:$USERNAME /opt/dynamo/venv
238
239
240
241
242
243
244
245
246
247
248
249
250
251
COPY --from=base --chown=$USERNAME:$USERNAME /usr/local/bin /usr/local/bin

USER $USERNAME
ENV HOME=/home/$USERNAME
WORKDIR $HOME

# https://code.visualstudio.com/remote/advancedcontainers/persist-bash-history
RUN SNIPPET="export PROMPT_COMMAND='history -a' && export HISTFILE=$HOME/.commandhistory/.bash_history" \
    && mkdir -p $HOME/.commandhistory \
    && touch $HOME/.commandhistory/.bash_history \
    && echo "$SNIPPET" >> "$HOME/.bashrc"

RUN mkdir -p /home/$USERNAME/.cache/

252
ENV VLLM_KV_CAPI_PATH=$HOME/dynamo/.build/target/debug/libdynamo_llm_capi.so
253
254
255
256

ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"]

##################################
257
##### Wheel Build Image ##########
258
259
##################################

260
261
262
263
# Redeclare ARCH_ALT ARG so it's available for interpolation in the FROM instruction
ARG ARCH_ALT

FROM quay.io/pypa/manylinux_2_28_${ARCH_ALT} AS wheel_builder
264

265
266
267
268
269
270
271
ARG CARGO_BUILD_JOBS
# Set CARGO_BUILD_JOBS to 16 if not provided
# This is to prevent cargo from building $(nproc) jobs in parallel,
# which might exceed the number of opened files limit.
ENV CARGO_BUILD_JOBS=${CARGO_BUILD_JOBS:-16}
# Use build arg RELEASE_BUILD = true to generate wheels for Python 3.10, 3.11 and 3.12.
ARG RELEASE_BUILD
272

273
274
WORKDIR /workspace

275
RUN yum update -y \
276
    && yum install -y python3.12-devel \
277
278
279
    && yum install -y protobuf-compiler \
    && yum clean all \
    && rm -rf /var/cache/yum
280
281
282

ENV RUSTUP_HOME=/usr/local/rustup \
    CARGO_HOME=/usr/local/cargo \
283
284
    CARGO_TARGET_DIR=/workspace/target \
    VIRTUAL_ENV=/opt/dynamo/venv
285

286
287
288
289
290
COPY --from=base $RUSTUP_HOME $RUSTUP_HOME
COPY --from=base $CARGO_HOME $CARGO_HOME
COPY --from=base /workspace /workspace
COPY --from=base $VIRTUAL_ENV $VIRTUAL_ENV
ENV PATH=$CARGO_HOME/bin:$VIRTUAL_ENV/bin:$PATH
291

292
293
294
295
296
297
298
299
# Copy configuration files
COPY pyproject.toml /workspace/
COPY README.md /workspace/
COPY LICENSE /workspace/
COPY Cargo.toml /workspace/
COPY Cargo.lock /workspace/
COPY rust-toolchain.toml /workspace/
COPY hatch_build.py /workspace/
300

301
302
303
304
305
306
307
308
309
310
311
312
313
# Copy source code
COPY lib/ /workspace/lib/
COPY components /workspace/components
COPY launch /workspace/launch
COPY deploy/dynamo/sdk /workspace/deploy/dynamo/sdk

# Build Rust crate binaries packaged with the wheel
RUN cargo build --release --locked --features mistralrs,sglang,vllm,python \
    -p dynamo-run \
    -p llmctl \
    # Multiple http named crates are present in dependencies, need to specify the path
    -p file://$PWD/components/http \
    -p metrics
314
315

# Build dynamo wheel
316
RUN uv build --wheel --out-dir /workspace/dist && \
317
318
319
320
321
    cd /workspace/lib/bindings/python && \
    uv build --wheel --out-dir /workspace/dist --python 3.12 && \
    if [ "$RELEASE_BUILD" = "true" ]; then \
        uv build --wheel --out-dir /workspace/dist --python 3.11 && \
        uv build --wheel --out-dir /workspace/dist --python 3.10; \
322
    fi
323

324
325
326
#######################################
########## CI Minimum Image ###########
#######################################
327
FROM base AS ci_minimum
328

329
ENV DYNAMO_HOME=/workspace
330
331
332
ENV CARGO_TARGET_DIR=/workspace/target

WORKDIR /workspace
333

334
COPY --from=wheel_builder /workspace/dist/ /workspace/dist/
335
336
337
338
339
340
341
342
343
344
345
346
347
COPY --from=wheel_builder /workspace/target/ /workspace/target/
# Copy Cargo cache to avoid re-downloading dependencies
COPY --from=wheel_builder $CARGO_HOME $CARGO_HOME

COPY . /workspace

# Build rest of the crates
# Need to figure out rust caching to avoid rebuilding and remove exclude flags
RUN cargo build --release --locked --workspace \
    --exclude dynamo-run \
    --exclude llmctl \
    --exclude file://$PWD/components/http \
    --exclude metrics
348
349
350
351
352
353

# Package the bindings
RUN mkdir -p /opt/dynamo/bindings/wheels && \
    mkdir /opt/dynamo/bindings/lib && \
    cp dist/ai_dynamo*cp312*.whl /opt/dynamo/bindings/wheels/. && \
    cp target/release/libdynamo_llm_capi.so /opt/dynamo/bindings/lib/. && \
354
355
356
357
358
359
    cp -r lib/bindings/c/include /opt/dynamo/bindings/.  && \
    cp target/release/dynamo-run /usr/local/bin && \
    cp target/release/http /usr/local/bin && \
    cp target/release/llmctl /usr/local/bin && \
    cp target/release/metrics /usr/local/bin && \
    cp target/release/mock_worker /usr/local/bin
360
361

RUN uv pip install /workspace/dist/ai_dynamo_runtime*cp312*.whl && \
362
    uv pip install /workspace/dist/ai_dynamo*any.whl
363

364
365
366
367
368
# Copy launch banner
RUN --mount=type=bind,source=./container/launch_message.txt,target=/workspace/launch_message.txt \
    sed '/^#\s/d' /workspace/launch_message.txt > ~/.launch_screen && \
    echo "cat ~/.launch_screen" >> ~/.bashrc

369
# Tell vllm to use the Dynamo LLM C API for KV Cache Routing
370
ENV VLLM_KV_CAPI_PATH=/opt/dynamo/bindings/lib/libdynamo_llm_capi.so
371

372
373
374
375
##########################################
########## Perf Analyzer Image ###########
##########################################
FROM ${BASE_IMAGE}:${BASE_IMAGE_TAG} AS perf_analyzer
376

377
378
379
380
381
382
383
384
385
386
387
388
389
ARG GENAI_PERF_TAG

WORKDIR /workspace

# Build and install Perf Analyzer for benchmarking
RUN apt-get update -y && apt-get -y install cmake g++ libssl-dev python3 rapidjson-dev zlib1g-dev
RUN git clone https://github.com/triton-inference-server/perf_analyzer.git
RUN git -C perf_analyzer checkout ${GENAI_PERF_TAG}
RUN mkdir perf_analyzer/build
RUN cmake -B perf_analyzer/build -S perf_analyzer -D TRITON_ENABLE_PERF_ANALYZER_OPENAI=ON
RUN cmake --build perf_analyzer/build -- -j8
RUN mkdir bin &&  \
    cp -r perf_analyzer/build/perf_analyzer/src/perf-analyzer-build /workspace/bin/
390

391
########################################
392
########## Development Image ###########
393
########################################
394
FROM ci_minimum AS dev
395
396
397

ARG GENAI_PERF_TAG

398
399
400
COPY --from=perf_analyzer /workspace/bin/perf-analyzer-build/ /perf/bin
COPY --from=perf_analyzer /workspace/perf_analyzer /perf_analyzer
ENV PATH="/perf/bin:${PATH}"
401
402
403
404
405
406
407
408
409
410
411
412
413
414

# Install genai-perf for benchmarking
RUN uv pip install "git+https://github.com/triton-inference-server/perf_analyzer.git@${GENAI_PERF_TAG}#subdirectory=genai-perf"
RUN uv pip uninstall tritonclient

COPY . /workspace

ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"]

CMD []

####################################
########## Runtime Image ###########
####################################
415
416
417
418

FROM ${RUNTIME_IMAGE}:${RUNTIME_IMAGE_TAG} AS runtime

WORKDIR /workspace
419
ENV DYNAMO_HOME=/workspace
420
ENV VIRTUAL_ENV=/opt/dynamo/venv
421
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
422
423
424

# Setup the python environment
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
425
426
427
RUN apt-get update && \
    DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends python3-dev && \
    rm -rf /var/lib/apt/lists/* && \
428
    uv venv $VIRTUAL_ENV --python 3.12 && \
429
430
    echo "source $VIRTUAL_ENV/bin/activate" >> ~/.bashrc

431
432
# Install the wheels and symlink executables to /usr/local/bin so dynamo components can use them
# Dynamo components currently do not have the VIRTUAL_ENV in their PATH, so we need to symlink the executables
433
COPY --from=wheel_builder /workspace/dist/*.whl wheelhouse/
434
435
RUN uv pip install ai-dynamo[vllm] --find-links wheelhouse && \
    ln -sf $VIRTUAL_ENV/bin/* /usr/local/bin/ && \
436
    rm -r wheelhouse
437
438
439

# Tell vllm to use the Dynamo LLM C API for KV Cache Routing
ENV VLLM_KV_CAPI_PATH="/opt/dynamo/bindings/lib/libdynamo_llm_capi.so"
440

441
442
443
444
445
446
# Copy launch banner
RUN --mount=type=bind,source=./container/launch_message.txt,target=/workspace/launch_message.txt \
    sed '/^#\s/d' /workspace/launch_message.txt > ~/.launch_screen && \
    echo "cat ~/.launch_screen" >> ~/.bashrc

# Copy examples
447
COPY ./examples examples/
448

449
450
ENTRYPOINT [ "/usr/bin/bash" ]
CMD []