name: e2e_ppo_trainer on: # Trigger the workflow on push or pull request, # but only for the main branch pull_request: branches: - main - v0.2.x paths: - "**/*.py" # Entrypoints - ".github/workflows/e2e_ppo_trainer.yml" - "examples/data_preprocess/gsm8k.py" - "examples/data_preprocess/geo3k.py" - "tests/e2e/ppo_trainer" - "verl/trainer/main_ppo.py" - "verl/trainer/config/ppo_trainer.yaml" - "!examples" - "!verl/trainer/main_*.py" - "!verl/trainer/fsdp_sft_trainer.py" # Recipes - "!recipe" # Megatron - "!verl/workers/**/megatron_*.py" # Cancel jobs on the same ref if a new one is triggered concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} # Declare permissions just read content. permissions: contents: read jobs: e2e_ppo_trainer_vllm: runs-on: [L20x8] timeout-minutes: 40 # Increase this timeout value as needed env: HTTP_PROXY: ${{ secrets.PROXY_HTTP }} HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} NO_PROXY: "localhost,127.0.0.1" HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable container: image: whatcanyousee/verl:ngc-th2.6.0-cu124-vllm0.8.2-mcore0.11.0-te2.0 options: --gpus all --shm-size=10g steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 - name: Install the current repository run: | pip3 install -e .[test,vllm] - name: Prepare GSM8K dataset run: | ray stop --force python3 examples/data_preprocess/gsm8k.py # Function RM - name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with validation and saving run: | ray stop --force VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 bash tests/e2e/ppo_trainer/run_function_reward.sh - name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm after resuming run: | ray stop --force RESUME_MODE=auto bash tests/e2e/ppo_trainer/run_function_reward.sh - name: Running GSM8K E2E without rmpad using function rm run: | ray stop --force RM_PAD=False bash tests/e2e/ppo_trainer/run_function_reward.sh - name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm (GRPO) run: | ray stop --force ADV_ESTIMATOR=grpo USE_KL=True bash tests/e2e/ppo_trainer/run_function_reward.sh - name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm (ReMax) run: | ray stop --force ADV_ESTIMATOR=remax USE_KL=True bash tests/e2e/ppo_trainer/run_function_reward.sh - name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using customized reward function run: | ray stop --force CUSTOM_REWARD_FN=True bash tests/e2e/ppo_trainer/run_function_reward.sh - name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with in-reward kl and kl loss run: | ray stop --force USE_KL=True bash tests/e2e/ppo_trainer/run_function_reward.sh # Model RM - name: Running GRPO GSM8K E2E training tests with FSDP on 8 L20 GPUs (DeepSeek) run: | ray stop --force MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct bash tests/e2e/ppo_trainer/run_function_reward.sh - name: Running GSM8K E2E with rmpad using model rm run: | ray stop --force bash tests/e2e/ppo_trainer/run_model_reward.sh - name: Running GSM8K E2E without rmpad using model rm run: | ray stop --force RM_PAD=False bash tests/e2e/ppo_trainer/run_model_reward.sh - name: Running GSM8K E2E with rmpad using model rm and ulysses sp=2 run: | ray stop --force SP_SIZE=2 bash tests/e2e/ppo_trainer/run_model_reward.sh - name: Running GSM8K E2E with rmpad using model rm and dynamic batch size run: | ray stop --force SEQ_BALANCE=True bash tests/e2e/ppo_trainer/run_model_reward.sh - name: Running GSM8K E2E with rmpad using model rm with Liger Kernel enabled run: | ray stop --force LIGER=True bash tests/e2e/ppo_trainer/run_model_reward.sh e2e_ppo_trainer_vllm_vlm: runs-on: [L20x8] timeout-minutes: 40 # Increase this timeout value as needed env: HTTP_PROXY: ${{ secrets.PROXY_HTTP }} HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} NO_PROXY: "localhost,127.0.0.1" HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable container: image: hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0 options: --gpus all --shm-size=50g # Visual dataloader requires large memory steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 - name: Install the current repository run: | pip3 install -e .[test,geo,vllm] # Geo3k - name: Prepare Geo3k dataset run: | ray stop --force python3 examples/data_preprocess/geo3k.py - name: Running Geo3k VLM E2E training tests on 8 L20 GPUs with rmpad using function rm run: | ray stop --force TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \ ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ bash tests/e2e/ppo_trainer/run_function_reward.sh e2e_ppo_trainer_sglang: runs-on: [L20x8] timeout-minutes: 40 # Increase this timeout value as needed env: HTTP_PROXY: ${{ secrets.PROXY_HTTP }} HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} NO_PROXY: "localhost,127.0.0.1" HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable container: image: ocss884/verl-sglang:ngc-th2.5.1-cu126-sglang0.4.4.post4 options: --gpus all --shm-size=10g steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 - name: Install the current repository run: | pip3 install -e .[test,gpu,sglang] --no-deps - name: Prepare gsm8k dataset run: | ray stop --force python3 examples/data_preprocess/gsm8k.py - name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm and save ckpt run: | ray stop --force ENGINE=sglang bash tests/e2e/ppo_trainer/run_function_reward.sh e2e_ppo_trainer_sglang_vlm: runs-on: [L20x8] timeout-minutes: 40 # Increase this timeout value as needed env: HTTP_PROXY: ${{ secrets.PROXY_HTTP }} HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} NO_PROXY: "localhost,127.0.0.1" HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable container: image: ocss884/verl-sglang:ngc-th2.5.1-cu126-sglang0.4.4.post4 options: --gpus all --shm-size=50g # Visual dataloader requires large memory steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 - name: Install the current repository run: | pip3 install -e .[test,geo,gpu,sglang] # Geo3k - name: Prepare Geo3k dataset run: | ray stop --force python3 examples/data_preprocess/geo3k.py - name: Running Geo3k VLM E2E training tests on 8 L20 GPUs with rmpad using function rm run: | ray stop --force TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \ MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \ MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \ ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \ ENGINE=sglang bash tests/e2e/ppo_trainer/run_function_reward.sh