"vscode:/vscode.git/clone" did not exist on "aefc080e3037ec930ca45a9ec6731c75329cd876"
Unverified Commit bc068e96 authored by Lianmin Zheng's avatar Lianmin Zheng Committed by GitHub
Browse files

[CI] Move AMD test to a separate file (#1500)

parent 8d4ed42a
name: PR Test (AMD)
on:
push:
branches: [ main ]
paths:
- "python/sglang/**"
- "test/**"
pull_request:
branches: [ main ]
paths:
- "python/sglang/**"
- "test/**"
workflow_dispatch:
concurrency:
group: pr-test-${{ github.ref }}
cancel-in-progress: true
jobs:
accuracy-test-1-gpu:
if: github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request'
runs-on: 1-gpu-runner-amd
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Install dependencies
run: |
pip install --upgrade pip
pip install -e "python[all]" --no-deps
git clone https://github.com/merrymercy/human-eval.git
cd human-eval
pip install -e .
- name: Evaluate Accuracy
timeout-minutes: 20
run: |
cd test/srt
python3 test_eval_accuracy_large.py
finish:
needs: [
accuracy-test-1-gpu
]
runs-on: ubuntu-latest
steps:
- name: Finish
run: echo "This is an empty step to ensure that all jobs are completed."
...@@ -187,7 +187,6 @@ jobs: ...@@ -187,7 +187,6 @@ jobs:
cd test/srt cd test/srt
python3 -m unittest test_bench_latency.TestBenchLatency.test_moe_default python3 -m unittest test_bench_latency.TestBenchLatency.test_moe_default
accuracy-test-1-gpu: accuracy-test-1-gpu:
if: github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request' if: github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request'
runs-on: 1-gpu-runner runs-on: 1-gpu-runner
...@@ -247,28 +246,6 @@ jobs: ...@@ -247,28 +246,6 @@ jobs:
cd test/srt cd test/srt
python3 test_data_parallelism.py python3 test_data_parallelism.py
accuracy-test-1-gpu-amd:
if: github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request'
runs-on: 1-gpu-runner-amd
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Install dependencies
run: |
pip install --upgrade pip
pip install -e "python[all]" --no-deps
git clone https://github.com/merrymercy/human-eval.git
cd human-eval
pip install -e .
- name: Evaluate Accuracy
timeout-minutes: 20
run: |
cd test/srt
python3 test_eval_accuracy_large.py
finish: finish:
needs: [ needs: [
unit-test-frontend, unit-test-backend-part-1, unit-test-backend-part-2, unit-test-backend-part-3, unit-test-frontend, unit-test-backend-part-1, unit-test-backend-part-2, unit-test-backend-part-3,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment