Commit 9c9f0086 authored by aflowers's avatar aflowers Committed by Alec
Browse files

[fix] add kv routing to vllm dockerfile

parent d0d35a9e
...@@ -78,14 +78,26 @@ COPY runtime /workspace/runtime ...@@ -78,14 +78,26 @@ COPY runtime /workspace/runtime
RUN cd runtime/rust && \ RUN cd runtime/rust && \
cargo build --release --locked && cargo doc --no-deps cargo build --release --locked && cargo doc --no-deps
# Generate C bindings for kv cache routing in vLLM
COPY llm /workspace/llm
RUN cd llm/rust/ && \
cargo build --release --locked && cargo doc --no-deps
# Build triton_distributed_rs wheel # Build triton_distributed_rs wheel
RUN cd runtime/rust/python-wheel && \ COPY python-wheel /workspace/python-wheel
RUN cd python-wheel && \
uv build && \ uv build && \
uv pip install dist/triton_distributed_rs*cp312*.whl uv pip install dist/triton_distributed_rs*cp312*.whl
# Package the bindings
RUN mkdir -p /opt/triton/llm_binding/wheels && mkdir /opt/triton/llm_binding/lib
RUN cp python-wheel/dist/triton_distributed_rs*cp312*.whl /opt/triton/llm_binding/wheels/.
RUN cp llm/rust/target/release/libtriton_llm_capi.so /opt/triton/llm_binding/lib/.
RUN cp -r llm/rust/libtriton-llm/include /opt/triton/llm_binding/.
# Install patched vllm # Install patched vllm
ARG VLLM_REF="v0.7.2" ARG VLLM_REF="v0.7.2"
ARG VLLM_PATCH="vllm_${VLLM_REF}.patch" ARG VLLM_PATCH="vllm_${VLLM_REF}-triton-kv-disagg-patch.patch"
RUN --mount=type=bind,source=./container/deps/,target=/tmp/deps \ RUN --mount=type=bind,source=./container/deps/,target=/tmp/deps \
bash /tmp/deps/vllm/install.sh --patch /tmp/deps/vllm/${VLLM_PATCH} --ref ${VLLM_REF} --install-cmd "uv pip install --editable" --use-precompiled --installation-dir /opt/vllm bash /tmp/deps/vllm/install.sh --patch /tmp/deps/vllm/${VLLM_PATCH} --ref ${VLLM_REF} --install-cmd "uv pip install --editable" --use-precompiled --installation-dir /opt/vllm
...@@ -106,6 +118,8 @@ COPY . /workspace ...@@ -106,6 +118,8 @@ COPY . /workspace
# Environment setup # Environment setup
ENV PYTHONPATH="${PYTHONPATH}:/workspace/examples/python:/opt/tritonserver/python/openai/openai_frontend" ENV PYTHONPATH="${PYTHONPATH}:/workspace/examples/python:/opt/tritonserver/python/openai/openai_frontend"
ENV RAPIDS_LIBUCX_PREFER_SYSTEM_LIBRARY=true ENV RAPIDS_LIBUCX_PREFER_SYSTEM_LIBRARY=true
# Tell vllm to use the Triton LLM C API for KV Cache Routing
ENV VLLM_KV_CAPI_PATH="/opt/triton/llm_binding/lib/libtriton_llm_capi.so"
CMD [] CMD []
ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"] ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment