Dockerfile.xpu 6.52 KB
Newer Older
raojy's avatar
raojy committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
FROM intel/deep-learning-essentials:2025.3.2-0-devel-ubuntu24.04 AS vllm-base

WORKDIR /workspace/

ARG PYTHON_VERSION=3.12
ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/xpu"

RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \
    echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list

RUN apt clean && apt-get update -y && \
    apt-get install -y --no-install-recommends --fix-missing \
    curl \
    ffmpeg \
    git \
    libsndfile1 \
    libsm6 \
    libxext6 \
    libgl1 \
    lsb-release \
    libaio-dev \
    numactl \
    wget \
    vim \
    python3.12 \
    python3.12-dev \
    python3-pip

RUN apt update && apt upgrade -y && \
    apt install -y intel-oneapi-compiler-dpcpp-cpp-2025.3

# Install UMD
RUN mkdir neo && \
    cd neo && \
    wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.24.8/intel-igc-core-2_2.24.8+20344_amd64.deb && \
    wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.24.8/intel-igc-opencl-2_2.24.8+20344_amd64.deb && \
    wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/intel-ocloc_25.48.36300.8-0_amd64.deb && \
    wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/intel-opencl-icd_25.48.36300.8-0_amd64.deb && \
    wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/libigdgmm12_22.8.2_amd64.deb && \
    wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/libze-intel-gpu1_25.48.36300.8-0_amd64.deb && \
    wget https://github.com/oneapi-src/level-zero/releases/download/v1.26.0/level-zero_1.26.0+u24.04_amd64.deb && \
    dpkg -i *.deb && \
    cd .. && \
    rm -rf neo

ENV PATH="/root/.local/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
ENV PATH="$VIRTUAL_ENV/bin:$PATH"

# This oneccl contains the BMG support which is not the case for default version of oneapi 2025.2.
ARG ONECCL_INSTALLER="intel-oneccl-2021.15.7.8_offline.sh"
RUN wget "https://github.com/uxlfoundation/oneCCL/releases/download/2021.15.7/${ONECCL_INSTALLER}" && \
    bash "${ONECCL_INSTALLER}" -a --silent --eula accept && \
    rm "${ONECCL_INSTALLER}" && \
    echo "source /opt/intel/oneapi/setvars.sh --force" >> /root/.bashrc && \
    echo "source /opt/intel/oneapi/ccl/2021.15/env/vars.sh --force" >> /root/.bashrc
RUN rm -f /opt/intel/oneapi/ccl/latest && \
    ln -s /opt/intel/oneapi/ccl/2021.15 /opt/intel/oneapi/ccl/latest

SHELL ["bash", "-c"]
CMD ["bash", "-c", "source /root/.bashrc && exec bash"]

WORKDIR /workspace/vllm

ENV UV_HTTP_TIMEOUT=500

# Configure package index for XPU
ENV PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
ENV UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
ENV UV_INDEX_STRATEGY="unsafe-best-match"
ENV UV_LINK_MODE="copy"

RUN --mount=type=cache,target=/root/.cache/uv \
    --mount=type=bind,src=requirements/common.txt,target=/workspace/vllm/requirements/common.txt \
    --mount=type=bind,src=requirements/xpu.txt,target=/workspace/vllm/requirements/xpu.txt \
    uv pip install --upgrade pip && \
    uv pip install -r requirements/xpu.txt

 # used for suffix method speculative decoding
 # build deps for proto + nanobind-based extensions to set up the build environment
RUN --mount=type=cache,target=/root/.cache/uv \
    uv pip install grpcio-tools protobuf nanobind
 # arctic-inference is built from source which needs torch-xpu properly installed first
RUN --mount=type=cache,target=/root/.cache/uv \
    source /opt/intel/oneapi/setvars.sh --force && \
    source /opt/intel/oneapi/ccl/2021.15/env/vars.sh --force && \
    export CMAKE_PREFIX_PATH="$(python -c 'import site; print(site.getsitepackages()[0])'):${CMAKE_PREFIX_PATH}" && \
    uv pip install --no-build-isolation arctic-inference==0.1.1

ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/"

COPY . .
ARG GIT_REPO_CHECK=0
RUN --mount=type=bind,source=.git,target=.git \
    if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh; fi

ENV VLLM_TARGET_DEVICE=xpu
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn

RUN --mount=type=cache,target=/root/.cache/uv \
    --mount=type=bind,source=.git,target=.git \
    uv pip install --no-build-isolation .

CMD ["/bin/bash"]

FROM vllm-base AS vllm-openai

# install additional dependencies for openai api server
RUN --mount=type=cache,target=/root/.cache/uv \
    uv pip install accelerate hf_transfer pytest pytest_asyncio lm_eval[api] modelscope

# install development dependencies (for testing)
RUN uv pip install -e tests/vllm_test_utils

# install NIXL and UCX from source code
ARG UCX_VERSION=e5d98879705239d254ede40b4a52891850cb5349
ARG NIXL_VERSION=0.7.0

RUN apt-get update && apt-get install -y \
    pciutils \
    net-tools \
    iproute2 \
    hwloc \
    numactl \
    wget \
    curl \
    git \
    build-essential \
    autoconf \
    automake \
    libtool \
    pkg-config \
    rdma-core \
    libibverbs-dev \
    ibverbs-utils \
    libibverbs1 \
    librdmacm-dev \
    librdmacm1 \
    libibumad-dev \
    libibumad3 \
    libibmad-dev \
    libibmad5 \
    infiniband-diags \
    perftest \
    ibutils \
    libmlx5-1 \
    libmlx4-1 \
    ibverbs-providers \
    librdmacm1t64

ENV PKG_CONFIG_PATH=/tmp/ucx_install/lib/pkgconfig:${PKG_CONFIG_PATH}
ENV LD_LIBRARY_PATH=/tmp/ucx_install/lib:${LD_LIBRARY_PATH}
RUN --mount=type=cache,target=/root/.cache/uv \
    git clone https://github.com/openucx/ucx /tmp/ucx_source && \
    cd /tmp/ucx_source && git checkout "${UCX_VERSION}" && \
    bash autogen.sh && \
    ./configure --prefix=/tmp/ucx_install --with-ze=yes --enable-examples --enable-mt && \
    make CFLAGS="-Wno-error=incompatible-pointer-types" -j8 && make install && \
    git clone https://github.com/ai-dynamo/nixl /tmp/nixl_source && \
    cd /tmp/nixl_source && git checkout "${NIXL_VERSION}" && \
    cd /tmp/nixl_source && \
    uv pip install --upgrade meson pybind11 patchelf && \
    uv pip install -r requirements.txt && \
    uv pip install . && \
    rm -rf /tmp/ucx_source /tmp/nixl_source

# FIX triton
RUN --mount=type=cache,target=/root/.cache/uv \
    uv pip uninstall triton triton-xpu && \
    uv pip install triton-xpu==3.6.0

# remove torch bundled oneccl to avoid conflicts
RUN --mount=type=cache,target=/root/.cache/uv \
    uv pip uninstall oneccl oneccl-devel

ENTRYPOINT ["vllm", "serve"]