# # Tests layout # Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: # - `tests/trainer` for testing functionality related to `verl/trainer` # - `tests/models` for testing functionality related to `verl/models` # - ... # There are a few folders with `special_` prefix, created for special purposes: # - `special_distributed`: unit tests that must run with multiple GPUs # - `special_e2e`: end-to-end tests with training/generation scripts # - `special_npu`: tests for NPUs # - `special_sanity`: a suite of quick sanity tests # - `special_standalone`: a set of test that are designed to run in dedicated environments # Accelerators for tests # - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. # - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. # # Workflow layout # All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: # 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` # 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` # 3. End-to-end tests: `e2e_*.yml` # 4. Unit tests # - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` # - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. # - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when # - new workflow yaml is added to `.github/workflows` # - new tests are added to workflow mentioned in 2. name: e2e_ascend on: # Trigger the workflow on push or pull request, # but only for the main branch push: branches: - main - v0.* pull_request: branches: - main paths: - "**/*.py" - "requirements-npu.txt" # Other entrypoints - "!examples/**" - "!tests/**" - "!verl/trainer/main_*.py" - "!verl/trainer/fsdp_sft_trainer.py" # Recipes - "!recipe/**" # Entrypoints - ".github/workflows/e2e_ascend.yml" - "examples/data_preprocess/gsm8k.py" - "examples/data_preprocess/geo3k.py" - "tests/special_e2e/ppo_trainer" - "verl/trainer/main_ppo.py" - "verl/trainer/config/ppo_trainer.yaml" # Cancel jobs on the same ref if a new one is triggered concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} permissions: contents: read jobs: test: name: verl Ascend test (self-host) runs-on: [self-hosted, npu-0] timeout-minutes: 40 # Increase this timeout value as needed container: image: crispig/verl_npu:cann8.1rc1-py3.10-torch2.5.1-vllm-ascend0.7.3.post1-250616 volumes: - /usr/local/dcmi:/usr/local/dcmi - /usr/local/bin/npu-smi:/usr/local/bin/npu-smi - /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ - /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info - /etc/ascend_install.info:/etc/ascend_install.info # Use self-host cache speed up pip and model download # - /home/action/actions-runner/_work/cache:/github/home/.cache/ options: >- --device /dev/davinci0 --device /dev/davinci_manager --device /dev/devmm_svm --device /dev/hisi_hdc --network host --privileged --shm-size 16g env: HTTP_PROXY: ${{ secrets.PROXY_HTTP }} HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" HF_ENDPOINT: "https://hf-mirror.com" HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable steps: - name: Check npu and CANN info run: | cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info npu-smi info - name: Checkout volcengine/verl repo uses: actions/checkout@v4 - name: Install the current repository run: | pip3 install hf_transfer peft pip3 install -r requirements-npu.txt pip install -e . - name: Install torchviison run: | pip install torchvision==0.20.1+cpu --index-url https://download.pytorch.org/whl/cpu - name: Prepare gsm8k dataset run: | ray stop --force python3 examples/data_preprocess/gsm8k.py - name: Prepare geo3k dataset run: | ray stop --force python3 examples/data_preprocess/geo3k.py - name: Running gsm8k e2e training tests with peft sft on ASCEND NPU run: | ray stop --force bash tests/special_npu/run_qwen2_5_05b_sft_peft_sp2.sh rm -rf $HOME/ckpts - name: Running gsm8k e2e training tests with GRPO on ASCEND NPU run: | ray stop --force bash tests/special_npu/run_qwen2_5_05b_grpo.sh rm -rf $HOME/ckpts - name: Running geo3k e2e training tests with GRPO on ASCEND NPU run: | ray stop --force bash tests/special_npu/run_qwen2_5_vl_3b_npu.sh rm -rf $HOME/ckpts - name: Running gsm8k e2e training tests with DAPO on ASCEND NPU run: | ray stop --force bash tests/special_npu/run_qwen2_5_05b_dapo.sh rm -rf $HOME/ckpts