Unverified Commit 74bc9184 authored by Yineng Zhang's avatar Yineng Zhang Committed by GitHub
Browse files

minor: add random use case (#2408)

parent 0f8eb153
...@@ -2,6 +2,10 @@ name: Experiment Runner ...@@ -2,6 +2,10 @@ name: Experiment Runner
on: on:
workflow_dispatch: workflow_dispatch:
inputs:
script:
description: "Experiment Runner Script"
default: "configs/sharegpt_config.yaml"
concurrency: concurrency:
group: experiment-runner-${{ github.ref }} group: experiment-runner-${{ github.ref }}
...@@ -20,7 +24,7 @@ jobs: ...@@ -20,7 +24,7 @@ jobs:
bash scripts/ci_install_dependency.sh bash scripts/ci_install_dependency.sh
- name: Test experiment runner - name: Test experiment runner
timeout-minutes: 10 timeout-minutes: 120
run: | run: |
cd test/srt cd test/srt
python3 experiment_runner.py --config configs/sharegpt_config.yaml python3 experiment_runner.py --config ${{ inputs.script }}
tasks:
- name: sglang-128-4
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 128 --random-output 4 --request-rate 24 --num-prompt 1440
- name: vllm-128-4
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 128 --random-output 4 --request-rate 24 --num-prompt 1440
- name: sglang-2000-100
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 2000 --random-output 100 --request-rate 2 --num-prompt 120
- name: vllm-2000-100
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 2000 --random-output 100 --request-rate 2 --num-prompt 120
- name: sglang-4000-200
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 4000 --random-output 200 --request-rate 8 --num-prompt 480
- name: vllm-4000-200
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 4000 --random-output 200 --request-rate 8 --num-prompt 480
- name: sglang-32000-100
server_cmd: python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --disable-radix-cache
client_cmd: python3 -m sglang.bench_serving --backend sglang --dataset-name random --random-input 32000 --random-output 100 --request-rate 1 --num-prompt 60
- name: vllm-32000-100
server_cmd: python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-3.1-8B-Instruct --disable-log-requests
client_cmd: python3 -m sglang.bench_serving --backend vllm --dataset-name random --random-input 32000 --random-output 100 --request-rate 1 --num-prompt 60
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment