Unverified Commit e7aa4664 authored by Johnny's avatar Johnny Committed by GitHub
Browse files

[NVIDIA] Build CUDA 13 (#11299)


Co-authored-by: default avatarishandhanani <ishandhanani@gmail.com>
Co-authored-by: default avatarBaizhou Zhang <sobereddiezhang@gmail.com>
parent 4d4feccb
......@@ -12,10 +12,10 @@ on:
description: "FlashInfer version"
required: true
type: choice
default: 'release'
default: "release"
options:
- 'release'
- 'nightly'
- "release"
- "nightly"
concurrency:
group: pr-test-${{ github.ref }}
......@@ -68,6 +68,8 @@ jobs:
include:
- python-version: "3.10"
cuda-version: "12.9"
- python-version: "3.10"
cuda-version: "13.0"
name: Build Wheel (CUDA ${{ matrix.cuda-version }})
steps:
- name: Cleanup
......@@ -84,7 +86,6 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Build wheel for Python ${{ matrix.python-version }} and CUDA ${{ matrix.cuda-version }}
if: github.event_name != 'push' || (matrix.cuda-version != '11.8')
run: |
cd sgl-kernel
./build.sh "${{ matrix.python-version }}" "${{ matrix.cuda-version }}"
......@@ -197,12 +198,43 @@ jobs:
echo "All benchmark tests completed!"
# Adding a single CUDA13 smoke test to verify that the kernel builds and runs
# TODO: Add back this test when it can pass on CI
# cuda13-kernel-smoke-test:
# needs: [check-changes, sgl-kernel-build-wheels]
# if: needs.check-changes.outputs.sgl_kernel == 'true'
# runs-on: x64-cu13-kernel-tests
# steps:
# - uses: actions/checkout@v4
# - name: Cleanup
# run: |
# ls -alh sgl-kernel/dist || true
# rm -rf sgl-kernel/dist/* || true
# - name: Download CUDA 13.0 artifacts
# uses: actions/download-artifact@v4
# with:
# path: sgl-kernel/dist/
# merge-multiple: true
# pattern: wheel-python3.10-cuda13.0
# - name: Install dependencies
# run: |
# CUSTOM_BUILD_SGL_KERNEL=${{needs.check-changes.outputs.sgl_kernel}} bash scripts/ci/ci_install_dependency.sh
# - name: Run kernel unit tests
# timeout-minutes: 30
# run: |
# cd sgl-kernel
# pytest tests/
# =============================================== primary ====================================================
unit-test-frontend:
needs: [check-changes, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 1-gpu-runner
steps:
- name: Checkout code
......@@ -229,7 +261,7 @@ jobs:
unit-test-backend-1-gpu:
needs: [check-changes, unit-test-frontend, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 1-gpu-runner
strategy:
fail-fast: false
......@@ -260,7 +292,7 @@ jobs:
unit-test-backend-2-gpu:
needs: [check-changes, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 2-gpu-runner
strategy:
fail-fast: false
......@@ -291,7 +323,7 @@ jobs:
unit-test-backend-4-gpu:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 4-gpu-h100
strategy:
fail-fast: false
......@@ -322,7 +354,7 @@ jobs:
unit-test-backend-8-gpu-h200:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 8-gpu-h200
strategy:
fail-fast: false
......@@ -353,7 +385,7 @@ jobs:
unit-test-backend-8-gpu-h20:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 8-gpu-h20
env:
SGLANG_CI_RDMA_ALL_DEVICES: "mlx5_1,mlx5_2,mlx5_3,mlx5_4"
......@@ -386,7 +418,7 @@ jobs:
performance-test-1-gpu-part-1:
needs: [check-changes, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 1-gpu-runner
steps:
- name: Checkout code
......@@ -445,7 +477,7 @@ jobs:
performance-test-1-gpu-part-2:
needs: [check-changes, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 1-gpu-runner
steps:
- name: Checkout code
......@@ -496,7 +528,7 @@ jobs:
performance-test-1-gpu-part-3:
needs: [check-changes, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 1-gpu-runner
steps:
- name: Checkout code
......@@ -529,7 +561,7 @@ jobs:
performance-test-2-gpu:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 2-gpu-runner
steps:
- name: Checkout code
......@@ -586,7 +618,7 @@ jobs:
accuracy-test-1-gpu:
needs: [check-changes, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 1-gpu-runner
steps:
- name: Checkout code
......@@ -616,7 +648,7 @@ jobs:
accuracy-test-2-gpu:
needs: [check-changes, accuracy-test-1-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 2-gpu-runner
steps:
- name: Checkout code
......@@ -646,7 +678,7 @@ jobs:
unit-test-deepep-4-gpu:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 4-gpu-h100
steps:
- name: Checkout code
......@@ -673,7 +705,7 @@ jobs:
unit-test-deepep-8-gpu:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 8-gpu-h200
steps:
- name: Checkout code
......@@ -700,7 +732,7 @@ jobs:
unit-test-backend-8-gpu-deepseek-v32:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 8-gpu-h200
steps:
- name: Checkout code
......@@ -727,7 +759,7 @@ jobs:
unit-test-backend-4-gpu-b200:
needs: [check-changes, unit-test-backend-2-gpu, sgl-kernel-build-wheels]
if: always() && !failure() && !cancelled() &&
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
((needs.check-changes.outputs.main_package == 'true') || (needs.check-changes.outputs.sgl_kernel == 'true'))
runs-on: 4-gpu-b200
strategy:
fail-fast: false
......@@ -754,20 +786,30 @@ jobs:
python3 run_suite.py --suite per-commit-4-gpu-b200 --auto-partition-id 0 --auto-partition-size 1 --timeout-per-file 3600
pr-test-finish:
needs: [
check-changes,
sgl-kernel-build-wheels,
sgl-kernel-unit-test, sgl-kernel-mla-test, sgl-kernel-benchmark-test,
unit-test-frontend, unit-test-backend-1-gpu,
unit-test-backend-2-gpu, unit-test-backend-4-gpu, unit-test-backend-8-gpu-h200,
performance-test-1-gpu-part-1, performance-test-1-gpu-part-2, performance-test-1-gpu-part-3,
performance-test-2-gpu,
accuracy-test-1-gpu, accuracy-test-2-gpu,
unit-test-deepep-4-gpu, unit-test-deepep-8-gpu,
unit-test-backend-4-gpu-b200,
]
needs:
[
check-changes,
sgl-kernel-build-wheels,
sgl-kernel-unit-test,
sgl-kernel-mla-test,
sgl-kernel-benchmark-test,
unit-test-frontend,
unit-test-backend-1-gpu,
unit-test-backend-2-gpu,
unit-test-backend-4-gpu,
unit-test-backend-8-gpu-h200,
performance-test-1-gpu-part-1,
performance-test-1-gpu-part-2,
performance-test-1-gpu-part-3,
performance-test-2-gpu,
accuracy-test-1-gpu,
accuracy-test-2-gpu,
unit-test-deepep-4-gpu,
unit-test-deepep-8-gpu,
unit-test-backend-4-gpu-b200,
]
if: always()
runs-on: ubuntu-latest
steps:
......
......@@ -17,13 +17,18 @@ concurrency:
cancel-in-progress: true
jobs:
build-cu129:
build-cu129-matrix:
if: github.repository == 'sgl-project/sglang'
runs-on: x64-kernel-build-node
strategy:
matrix:
python-version: ["3.10"]
cuda-version: ["12.9"]
include:
- arch: x86_64
runner: x64-kernel-build-node
- arch: aarch64
runner: arm-kernel-build-node
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v4
with:
......@@ -38,7 +43,7 @@ jobs:
run: |
cd sgl-kernel
chmod +x ./build.sh
./build.sh "${{ matrix.python-version }}" "${{ matrix.cuda-version }}"
./build.sh "${{ matrix.python-version }}" "${{ matrix.cuda-version }}" ${{ matrix.arch == 'aarch64' && 'aarch64' || '' }}
- name: Upload to PyPI
working-directory: sgl-kernel
......@@ -49,11 +54,11 @@ jobs:
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: wheel-python${{ matrix.python-version }}-cuda${{ matrix.cuda-version }}
name: wheel-python${{ matrix.python-version }}-cuda${{ matrix.cuda-version }}${{ matrix.arch == 'aarch64' && '-aarch64' || '' }}
path: sgl-kernel/dist/*
release-cu129:
needs: build-cu129
needs: build-cu129-matrix
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
......@@ -101,13 +106,19 @@ jobs:
git commit -m "update whl index"
git push
build-cu129-aarch64:
# for now we do not release CUDA 13.0 wheels to pypi
build-cu130-matrix:
if: github.repository == 'sgl-project/sglang'
runs-on: arm-kernel-build-node
strategy:
matrix:
python-version: ["3.10"]
cuda-version: ["12.9"]
cuda-version: ["13.0"]
include:
- arch: x86_64
runner: x64-kernel-build-node
- arch: aarch64
runner: arm-kernel-build-node
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v4
with:
......@@ -122,22 +133,16 @@ jobs:
run: |
cd sgl-kernel
chmod +x ./build.sh
./build.sh "${{ matrix.python-version }}" "${{ matrix.cuda-version }}" aarch64
- name: Upload to PyPI
working-directory: sgl-kernel
run: |
pip install twine
python3 -m twine upload --skip-existing dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN }}
./build.sh "${{ matrix.python-version }}" "${{ matrix.cuda-version }}" ${{ matrix.arch == 'aarch64' && 'aarch64' || '' }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: wheel-python${{ matrix.python-version }}-cuda${{ matrix.cuda-version }}-aarch64
name: wheel-python${{ matrix.python-version }}-cuda${{ matrix.cuda-version }}${{ matrix.arch == 'aarch64' && '-aarch64' || '' }}
path: sgl-kernel/dist/*
release-cu129-aarch64:
needs: build-cu129-aarch64
release-cu130:
needs: build-cu130-matrix
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
......@@ -174,7 +179,7 @@ jobs:
WHL_TOKEN: ${{ secrets.WHL_TOKEN }}
- name: Update wheel index
run: python3 scripts/update_kernel_whl_index.py --cuda 129
run: python3 scripts/update_kernel_whl_index.py --cuda 130
- name: Push wheel index
run: |
......
......@@ -47,7 +47,7 @@ PACKAGE_LIST = [
"tiktoken",
"anthropic",
"litellm",
"decord",
"decord2",
]
......
......@@ -4,7 +4,13 @@ set -euxo pipefail
IS_BLACKWELL=${IS_BLACKWELL:-0}
RUN_DEEPSEEK_V32=${RUN_DEEPSEEK_V32:-0}
CU_VERSION="cu128"
CU_VERSION="cu129"
if [ "$CU_VERSION" = "cu130" ]; then
NVRTC_SPEC="nvidia-cuda-nvrtc"
else
NVRTC_SPEC="nvidia-cuda-nvrtc-cu12"
fi
# Kill existing processes
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
......@@ -70,12 +76,11 @@ fi
# Show current packages
$PIP_CMD list
# Install additional dependencies
$PIP_CMD install mooncake-transfer-engine==0.3.6.post1 nvidia-cuda-nvrtc-cu12 py-spy scipy huggingface_hub[hf_xet] $PIP_INSTALL_SUFFIX
$PIP_CMD install mooncake-transfer-engine==0.3.6.post1 "${NVRTC_SPEC}" py-spy scipy huggingface_hub[hf_xet] $PIP_INSTALL_SUFFIX
if [ "$IS_BLACKWELL" != "1" ]; then
# For lmms_evals evaluating MMMU
git clone --branch v0.4.1 --depth 1 https://github.com/EvolvingLMMs-Lab/lmms-eval.git
git clone --branch v0.5 --depth 1 https://github.com/EvolvingLMMs-Lab/lmms-eval.git
$PIP_CMD install -e lmms-eval/ $PIP_INSTALL_SUFFIX
# Install xformers
......
......@@ -224,6 +224,12 @@ if (ENABLE_BELOW_SM90)
"-gencode=arch=compute_80,code=sm_80"
"-gencode=arch=compute_89,code=sm_89"
)
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
list(APPEND SGL_KERNEL_CUDA_FLAGS
"-gencode=arch=compute_87,code=sm_87"
)
endif()
endif()
if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.8" OR SGL_KERNEL_ENABLE_SM100A)
......@@ -231,19 +237,24 @@ if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "12.8" OR SGL_KERNEL_ENABLE_SM100A)
"-gencode=arch=compute_100a,code=sm_100a"
"-gencode=arch=compute_120a,code=sm_120a"
)
# refer sm_121, sm_110 and sm_101 description https://github.com/pytorch/pytorch/pull/156176
if ("${CUDA_VERSION}" VERSION_GREATER_EQUAL "13.0")
list(APPEND SGL_KERNEL_CUDA_FLAGS
"-gencode=arch=compute_103a,code=sm_103a"
"-gencode=arch=compute_110a,code=sm_110a"
"-gencode=arch=compute_121a,code=sm_121a"
"--compress-mode=size"
)
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
list(APPEND SGL_KERNEL_CUDA_FLAGS
"-gencode=arch=compute_110a,code=sm_110a"
"-gencode=arch=compute_121a,code=sm_121a"
)
endif()
else()
list(APPEND SGL_KERNEL_CUDA_FLAGS
"-gencode=arch=compute_101a,code=sm_101a"
)
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
list(APPEND SGL_KERNEL_CUDA_FLAGS
"-gencode=arch=compute_101a,code=sm_101a"
)
endif()
endif()
endif()
......
......@@ -20,7 +20,10 @@ else
BUILDER_NAME="pytorch/manylinux2_28-builder"
fi
if [ ${CUDA_VERSION} = "12.9" ]; then
if [ ${CUDA_VERSION} = "13.0" ]; then
DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}"
TORCH_INSTALL="pip install --no-cache-dir torch==2.9.0 --index-url https://download.pytorch.org/whl/cu130"
elif [ ${CUDA_VERSION} = "12.9" ]; then
DOCKER_IMAGE="${BUILDER_NAME}:cuda${CUDA_VERSION}"
TORCH_INSTALL="pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/cu129"
elif [ ${CUDA_VERSION} = "12.8" ]; then
......@@ -148,6 +151,8 @@ docker run --rm \
export CUDA_VERSION=${CUDA_VERSION} && \
mkdir -p /usr/lib/${ARCH}-linux-gnu/ && \
ln -s /usr/local/cuda-${CUDA_VERSION}/targets/${LIBCUDA_ARCH}-linux/lib/stubs/libcuda.so /usr/lib/${ARCH}-linux-gnu/libcuda.so && \
export CPLUS_INCLUDE_PATH=/usr/local/cuda/include/cccl${CPLUS_INCLUDE_PATH:+:${CPLUS_INCLUDE_PATH}} && \
export C_INCLUDE_PATH=/usr/local/cuda/include/cccl${C_INCLUDE_PATH:+:${C_INCLUDE_PATH}} && \
cd /sgl-kernel && \
ls -la ${PYTHON_ROOT_PATH}/lib/python${PYTHON_VERSION}/site-packages/wheel/ && \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment