ci_install_dependency.sh 3.85 KB
Newer Older
1
#!/bin/bash
2
# Install the dependency in CI.
3
set -euxo pipefail
Lianmin Zheng's avatar
Lianmin Zheng committed
4

5
IS_BLACKWELL=${IS_BLACKWELL:-0}
6
RUN_DEEPSEEK_V32=${RUN_DEEPSEEK_V32:-0}
Lianmin Zheng's avatar
Lianmin Zheng committed
7
CU_VERSION="cu128"
Cheng Wan's avatar
Cheng Wan committed
8

9
# Kill existing processes
10
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
11
bash "${SCRIPT_DIR}/../killall_sglang.sh"
12
echo "CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-}"
13

Lianmin Zheng's avatar
Lianmin Zheng committed
14
15
16
# Clear torch compilation cache
python3 -c 'import os, shutil, tempfile, getpass; cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR") or os.path.join(tempfile.gettempdir(), "torchinductor_" + getpass.getuser()); shutil.rmtree(cache_dir, ignore_errors=True)'

17
18
19
20
21
22
23
24
25
# Install apt packages
apt install -y git libnuma-dev

# Install uv
if [ "$IS_BLACKWELL" = "1" ]; then
    # The blackwell CI runner has some issues with pip and uv,
    # so we can only use pip with `--break-system-packages`
    PIP_CMD="pip"
    PIP_INSTALL_SUFFIX="--break-system-packages"
fzyzcjy's avatar
fzyzcjy committed
26

27
    # Clean up existing installations
Lianmin Zheng's avatar
Lianmin Zheng committed
28
    $PIP_CMD uninstall -y flashinfer_python sgl-kernel sglang vllm $PIP_INSTALL_SUFFIX || true
29
30
else
    # In normal cases, we use uv, which is much faster than pip.
Cheng Wan's avatar
Cheng Wan committed
31
    pip install --upgrade pip
32
33
    pip install uv
    export UV_SYSTEM_PYTHON=true
34

35
36
37
38
    PIP_CMD="uv pip"
    PIP_INSTALL_SUFFIX="--index-strategy unsafe-best-match"

    # Clean up existing installations
Lianmin Zheng's avatar
Lianmin Zheng committed
39
    $PIP_CMD uninstall flashinfer_python sgl-kernel sglang vllm || true
40
fi
Xiaoyu Zhang's avatar
Xiaoyu Zhang committed
41
42

# Install the main package
43
$PIP_CMD install -e "python[dev]" --extra-index-url https://download.pytorch.org/whl/${CU_VERSION} $PIP_INSTALL_SUFFIX --force-reinstall
Xiaoyu Zhang's avatar
Xiaoyu Zhang committed
44

45
46
47
# Install router for pd-disagg test
SGLANG_ROUTER_BUILD_NO_RUST=1 $PIP_CMD install -e "sgl-router" $PIP_INSTALL_SUFFIX

Lianmin Zheng's avatar
Lianmin Zheng committed
48
# Install sgl-kernel
49
50
51
SGL_KERNEL_VERSION_FROM_KERNEL=$(grep -Po '(?<=^version = ")[^"]*' sgl-kernel/pyproject.toml)
SGL_KERNEL_VERSION_FROM_SRT=$(grep -Po -m1 '(?<=sgl-kernel==)[0-9A-Za-z\.\-]+' python/pyproject.toml)
echo "SGL_KERNEL_VERSION_FROM_KERNEL=${SGL_KERNEL_VERSION_FROM_KERNEL} SGL_KERNEL_VERSION_FROM_SRT=${SGL_KERNEL_VERSION_FROM_SRT}"
52

53
54
if [ "${CUSTOM_BUILD_SGL_KERNEL:-}" = "true" ]; then
    ls -alh sgl-kernel/dist
Lianmin Zheng's avatar
Lianmin Zheng committed
55
    $PIP_CMD install sgl-kernel/dist/sgl_kernel-${SGL_KERNEL_VERSION_FROM_KERNEL}-cp310-abi3-manylinux2014_x86_64.whl --force-reinstall $PIP_INSTALL_SUFFIX
56
else
Lianmin Zheng's avatar
Lianmin Zheng committed
57
    $PIP_CMD install sgl-kernel==${SGL_KERNEL_VERSION_FROM_SRT} --force-reinstall $PIP_INSTALL_SUFFIX
58
59
fi

60
# Show current packages
61
$PIP_CMD list
62

Xiaoyu Zhang's avatar
Xiaoyu Zhang committed
63
# Install additional dependencies
64
$PIP_CMD install mooncake-transfer-engine==0.3.6.post1 nvidia-cuda-nvrtc-cu12 py-spy huggingface_hub[hf_xet] $PIP_INSTALL_SUFFIX
65

66
if [ "$IS_BLACKWELL" != "1" ]; then
fzyzcjy's avatar
fzyzcjy committed
67
68
    # For lmms_evals evaluating MMMU
    git clone --branch v0.3.3 --depth 1 https://github.com/EvolvingLMMs-Lab/lmms-eval.git
69
    $PIP_CMD install -e lmms-eval/ $PIP_INSTALL_SUFFIX
70

fzyzcjy's avatar
fzyzcjy committed
71
    # Install xformers
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
    $PIP_CMD install xformers --index-url https://download.pytorch.org/whl/${CU_VERSION} --no-deps $PIP_INSTALL_SUFFIX
fi

# Install dependencies for deepseek-v3.2
if [ "$RUN_DEEPSEEK_V32" = "1" ]; then
    # Install flashmla
    FLASHMLA_COMMIT="1408756a88e52a25196b759eaf8db89d2b51b5a1"
    FLASH_MLA_DISABLE_SM100="0"
    if [ "$IS_BLACKWELL" != "1" ]; then
        FLASH_MLA_DISABLE_SM100="1"
    fi
    git clone https://github.com/deepseek-ai/FlashMLA.git flash-mla
    cd flash-mla
    git checkout ${FLASHMLA_COMMIT}
    git submodule update --init --recursive
    FLASH_MLA_DISABLE_SM100=${FLASH_MLA_DISABLE_SM100} $PIP_CMD install -v . $PIP_INSTALL_SUFFIX --no-build-isolation
    cd ..

    # Install fast-hadamard-transform
    FAST_HADAMARD_TRANSFORM_COMMIT="7fd811c2b47f63b0b08d2582619f939e14dad77c"
    git clone https://github.com/Dao-AILab/fast-hadamard-transform
    cd fast-hadamard-transform
    git checkout ${FAST_HADAMARD_TRANSFORM_COMMIT}
    $PIP_CMD install . $PIP_INSTALL_SUFFIX --no-build-isolation
    cd ..
fzyzcjy's avatar
fzyzcjy committed
97
fi
98
99

# Show current packages
100
$PIP_CMD list
Lianmin Zheng's avatar
Lianmin Zheng committed
101
python3 -c "import torch; print(torch.version.cuda)"