Commit 65798994 authored by dongjw's avatar dongjw
Browse files

update Dockerfile

parent 56a18ad0
FROM node:20.16.0 as web_compile
WORKDIR /home
RUN <<EOF
git clone https://github.com/kvcache-ai/ktransformers.git &&
cd ktransformers/ktransformers/website/ &&
npm install @vue/cli &&
npm run build &&
rm -rf node_modules
EOF
FROM pytorch/pytorch:2.5.1-cuda12.1-cudnn9-devel as compile_server
# 设置代理
ENV http_proxy=http://127.0.0.1:20181
ENV https_proxy=http://127.0.0.1:20181
ENV all_proxy=http://127.0.0.1:20181
FROM pytorch/pytorch:2.5.1-cuda12.1-cudnn9-devel as compile_server
ARG CPU_INSTRUCT=NATIVE
# 设置工作目录和 CUDA 路径
WORKDIR /workspace
ENV CUDA_HOME /usr/local/cuda
COPY --from=web_compile /home/ktransformers /workspace/ktransformers
RUN <<EOF
apt update -y && apt install -y --no-install-recommends \
ENV CUDA_HOME=/usr/local/cuda
# 安装依赖
RUN apt update -y
RUN apt install -y --no-install-recommends \
libtbb-dev \
libssl-dev \
libcurl4-openssl-dev \
libaio1 \
libaio-dev \
libfmt-dev \
libgflags-dev \
zlib1g-dev \
patchelf \
git \
wget \
vim \
gcc \
g++ \
cmake &&
rm -rf /var/lib/apt/lists/* &&
cd ktransformers &&
git submodule init &&
git submodule update &&
pip install --upgrade pip &&
pip install ninja pyproject numpy cpufeature &&
pip install flash-attn &&
CPU_INSTRUCT=${CPU_INSTRUCT} KTRANSFORMERS_FORCE_BUILD=TRUE TORCH_CUDA_ARCH_LIST="8.0;8.6;8.7;8.9;9.0+PTX" pip install . --no-build-isolation --verbose &&
pip cache purge &&
cp /usr/lib/x86_64-linux-gnu/libstdc++.so.6 /opt/conda/lib/
EOF
ENTRYPOINT ["tail", "-f", "/dev/null"]
\ No newline at end of file
cmake
# 拷贝代码
RUN git clone https://github.com/kvcache-ai/ktransformers.git
# 清理 apt 缓存
RUN rm -rf /var/lib/apt/lists/*
# 进入项目目录
WORKDIR /workspace/ktransformers
RUN git checkout work-concurrent
# 初始化子模块
RUN git submodule update --init --recursive
# 升级 pip
RUN pip install --upgrade pip
# 安装构建依赖
RUN pip install ninja pyproject numpy cpufeature aiohttp zmq openai
# 安装 flash-attn(提前装可以避免后续某些编译依赖出错)
RUN pip install flash-attn
# 安装 ktransformers 本体(含编译)
RUN CPU_INSTRUCT=${CPU_INSTRUCT} \
USE_BALANCE_SERVE=1 \
KTRANSFORMERS_FORCE_BUILD=TRUE \
TORCH_CUDA_ARCH_LIST="8.0;8.6;8.7;8.9;9.0+PTX" \
pip install . --no-build-isolation --verbose
RUN pip install third_party/custom_flashinfer/
# 清理 pip 缓存
RUN pip cache purge
# 拷贝 C++ 运行时库
RUN cp /usr/lib/x86_64-linux-gnu/libstdc++.so.6 /opt/conda/lib/
# 保持容器运行(调试用)
ENTRYPOINT ["tail", "-f", "/dev/null"]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment