"git@developer.sourcefind.cn:yangql/googletest.git" did not exist on "447d58b4ee8ea96b4757a5bb5f0b3be75af6c2a1"
Commit 23d4b8c8 authored by sunzhq2's avatar sunzhq2
Browse files

init aisbench-tools

parents
# AISBench benchmark 链接
- https://gitee.com/aisbench/benchmark
- git clone https://gitee.com/aisbench/benchmark.git
- AISBench benchmark安装:具体安装可以参考 AISBench 对应的README
# aisbench-tools工具
- monitor_gpu.sh 用于记录DCU的使用状态
- run.sh
- 随机输入输出
```
bash ./bench-test.sh --gpus 3 \
--model vllm_api_stream_chat \
--dataset synthetic_gen \
--model-name test_model \
--batch-size "8 16 32" \
--input-len 512 \
--max-out-len 512 \
--ais-model /data2/models/qwen3-8B \
--model-path /data2/models/qwen3-8B \
--host-port 23456
```
- 对应数据集
```
bash ./bench-test.sh --gpus 3 \
--model vllm_api_stream_chat \
--dataset aime2025_gen \
--model-name test_model \
--batch-size "16" \
--max-out-len 512 \
--ais-model /data2/models/qwen3-8B \
--model-path /data2/models/qwen3-8B \
--host-port 23456
```
- 仅跑eval指令
```
ais_bench --models vllm_api_stream_chat \
--datasets aime2025_gen --mode eval \
--debug --reuse xxxxxx
```
\ No newline at end of file
#!/bin/bash
# perf+eval 连续运行脚本
# 先跑 perf,自动获取 --reuse 路径,再跑 eval
set -e
# ============ 配置 ============
MONITOR_SCRIPT="./monitor_gpu.sh" # monitor_gpu.sh 路径
TARGET_GPUS="3" # 目标GPU
AISBENCH_BASE_DIR="./"
# 推理参数
MODEL="vllm_api_stream_chat"
DATASET="aime2025_gen"
BATCH_SIZES="32" # 多个batch_size用空格分隔,如 "1 4 8"
MAX_OUT_LEN="512"
INPUT_LEN="512" # 输入长度(仅synthetic数据集使用)
AIS_MODEL="/data2/models/qwen3-8B" # vLLM服务实际部署的模型名,如 "Qwen/Qwen2.5-7B-Instruct"。为空时自动探测
MODEL_PATH="/data2/models/qwen3-8B" # 模型本地路径,如 "/data/models/Qwen2.5-7B"。为空时不设
HOST_PORT="23456" # vLLM服务端口
# 配置文件路径
SYNTHETIC_CONFIG="${AISBENCH_BASE_DIR}ais_bench/datasets/synthetic/synthetic_config.py"
SYNTHETIC_CONFIG_BAK="${SYNTHETIC_CONFIG}.bak"
VLLM_CONFIG="${AISBENCH_BASE_DIR}ais_bench/benchmark/configs/models/vllm_api/${MODEL}.py"
VLLM_CONFIG_BAK="${VLLM_CONFIG}.bak"
# 输出目录命名参数
MODEL_NAME="test_model"
while [[ $# -gt 0 ]]; do
case $1 in
--gpus)
TARGET_GPUS="$2"
shift 2
;;
--model)
MODEL="$2"
shift 2
;;
--dataset)
DATASET="$2"
shift 2
;;
--output-dir)
BASE_OUTPUT_DIR="$2"
shift 2
;;
--monitor-script)
MONITOR_SCRIPT="$2"
shift 2
;;
--batch-size)
BATCH_SIZES="$2"
shift 2
;;
--max-out-len)
MAX_OUT_LEN="$2"
shift 2
;;
--input-len)
INPUT_LEN="$2"
shift 2
;;
--ais-model)
AIS_MODEL="$2"
shift 2
;;
--model-path)
MODEL_PATH="$2"
shift 2
;;
--host-port)
HOST_PORT="$2"
shift 2
;;
--model-name)
MODEL_NAME="$2"
shift 2
;;
*)
echo "未知参数: $1"
echo "用法: $0 [--gpus 4,5,6,7] \
[--model vllm_api_stream_chat] \
[--dataset aime2025_gen] \
[--model-name test_model] \
[--batch-size \"1 4 8\"] \
[--max-out-len 512] \
[--input-len 512] \
[--ais-model Qwen/Qwen2.5-7B-Instruct] \
[--model-path /data/models/Qwen2.5-7B] \
[--host-port 8080]"
exit 1
;;
esac
done
if [ -z "$SUB_DIR" ]; then
if [ "$DATASET" = "synthetic_gen" ]; then
SUB_DIR="synthetic_gen/input-${INPUT_LEN}-output-${MAX_OUT_LEN}"
else
SUB_DIR="${DATASET}"
fi
fi
echo "=========================================="
echo "Perf + Eval 连续运行脚本"
echo "=========================================="
echo "目标GPU: $TARGET_GPUS"
echo "模型: $MODEL"
echo "模型名: $MODEL_NAME"
echo "数据集: $DATASET"
echo "子目录: $SUB_DIR"
echo "BatchSizes: $BATCH_SIZES"
echo "MaxOutLen: $MAX_OUT_LEN"
if [[ "$DATASET" == *synthetic* ]]; then
echo "InputLen: $INPUT_LEN"
fi
echo "=========================================="
echo ""
OVERALL_EXIT=0
IS_SYNTHETIC=0
if [[ "$DATASET" == *synthetic* ]]; then
IS_SYNTHETIC=1
echo ">>> 检测到synthetic数据集,将动态修改 synthetic_config.py 并仅运行Perf"
fi
echo "[Setup] 备份 vllm_api_stream_chat.py ..."
cp "$VLLM_CONFIG" "$VLLM_CONFIG_BAK"
for BS in $BATCH_SIZES; do
export BS="$BS"
export INPUT_LEN="$INPUT_LEN"
export MAX_OUT_LEN="$MAX_OUT_LEN"
export AIS_MODEL="$AIS_MODEL"
export MODEL_PATH="$MODEL_PATH"
export HOST_PORT="$HOST_PORT"
export SYNTHETIC_CONFIG="$SYNTHETIC_CONFIG"
export VLLM_CONFIG="$VLLM_CONFIG"
if [ $IS_SYNTHETIC -eq 1 ]; then
export IGNORE_EOS="True"
else
export IGNORE_EOS="False"
fi
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
BASE_OUTPUT_DIR="./test_outputs/${MODEL_NAME}/${SUB_DIR}/bs_${BS}_bench_results_${TIMESTAMP}"
mkdir -p "$BASE_OUTPUT_DIR"
BASE_OUTPUT_DIR=$(cd "$BASE_OUTPUT_DIR" && pwd)
echo ""
echo "###############################################"
echo "### BatchSize=$BS"
echo "### 输出目录: $BASE_OUTPUT_DIR"
echo "###############################################"
echo ""
# ============ 动态修改 vllm_api_stream_chat.py ============
echo "[BS=$BS] 重写 vllm_api_stream_chat.py: batch_size=$BS, max_out_len=$MAX_OUT_LEN, model=$AIS_MODEL, model_path=$MODEL_PATH, host_port=$HOST_PORT, ignore_eos=$IGNORE_EOS"
python3 -c "
import os
bs = int(os.environ.get('BS', '1'))
max_out_len = int(os.environ.get('MAX_OUT_LEN', '512'))
model = os.environ.get('AIS_MODEL', '')
model_path = os.environ.get('MODEL_PATH', '')
host_port = int(os.environ.get('HOST_PORT', '8080'))
ignore_eos = os.environ.get('IGNORE_EOS', 'False')
config_path = os.environ.get('VLLM_CONFIG', '')
content = '''from ais_bench.benchmark.models import VLLMCustomAPIChatStream
from ais_bench.benchmark.utils.model_postprocessors import extract_non_reasoning_content
models = [
dict(
attr=\"service\",
type=VLLMCustomAPIChatStream,
abbr='vllm-api-stream-chat',
path=\"%s\",
model=\"%s\",
request_rate = 0,
retry = 2,
host_ip = \"localhost\",
host_port = %d,
max_out_len = %d,
batch_size = %d,
trust_remote_code=True,
generation_kwargs = dict(
temperature = 0.5,
top_k = 10,
top_p = 0.95,
seed = None,
repetition_penalty = 1.03,
ignore_eos = %s,
),
pred_postprocessor=dict(type=extract_non_reasoning_content)
)
]
''' % (model_path, model, host_port, max_out_len, bs, ignore_eos)
with open(config_path, 'w') as f:
f.write(content)
print('vllm_api_stream_chat.py 已更新')
"
echo ""
# ============ synthetic数据集:动态修改配置 ============
if [ $IS_SYNTHETIC -eq 1 ]; then
echo "[BS=$BS] 修改 synthetic_config.py: Type=string, RequestCount=$BS, InputLen=$INPUT_LEN, OutputLen=$MAX_OUT_LEN"
cp "$SYNTHETIC_CONFIG" "$SYNTHETIC_CONFIG_BAK"
python3 -c "
import os
bs = int(os.environ.get('BS', '1'))
input_len = int(os.environ.get('INPUT_LEN', '512'))
max_out_len = int(os.environ.get('MAX_OUT_LEN', '512'))
config_path = os.environ.get('SYNTHETIC_CONFIG', '')
input_min = max(1, input_len - 8)
input_max = max(1, input_len - 8)
content = '''synthetic_config = {
\"Type\": \"string\",
\"RequestCount\": %d,
\"TrustRemoteCode\": False,
\"StringConfig\": {
\"Input\": {
\"Method\": \"uniform\",
\"Params\": {\"MinValue\": %d, \"MaxValue\": %d}
},
\"Output\": {
\"Method\": \"gaussian\",
\"Params\": {\"Mean\": 100, \"Var\": 200, \"MinValue\": %d, \"MaxValue\": %d}
}
},
\"TokenIdConfig\": {
\"RequestSize\": 10
}
}
''' % (bs, input_min, input_max, max_out_len, max_out_len)
with open(config_path, 'w') as f:
f.write(content)
print('synthetic_config.py 已更新')
"
echo ""
fi
# ============ 第一步:运行 Perf(带GPU监控) ============
echo "=========================================="
echo "[BS=$BS] 第一步: 运行 Perf 测试(带GPU监控)"
echo "=========================================="
PERF_DIR="$BASE_OUTPUT_DIR/perf"
mkdir -p "$PERF_DIR"
set +e
bash "$MONITOR_SCRIPT" \
--gpus "$TARGET_GPUS" \
--log-name perf_test.log \
--output-dir "$PERF_DIR" \
--bench-dir "$AISBENCH_BASE_DIR" \
ais_bench \
--models "$MODEL" \
--datasets "$DATASET" \
--mode perf \
--debug
PERF_EXIT_CODE=$?
set -e
if [ $PERF_EXIT_CODE -ne 0 ]; then
echo ""
echo "❌ [BS=$BS] Perf 测试失败 (退出码: $PERF_EXIT_CODE),跳过eval"
OVERALL_EXIT=1
if [ $IS_SYNTHETIC -eq 1 ] && [ -f "$SYNTHETIC_CONFIG_BAK" ]; then
mv "$SYNTHETIC_CONFIG_BAK" "$SYNTHETIC_CONFIG"
fi
continue
fi
echo ""
echo "✓ [BS=$BS] Perf 测试完成"
# ============ synthetic数据集:恢复配置,跳过eval ============
if [ $IS_SYNTHETIC -eq 1 ]; then
if [ -f "$SYNTHETIC_CONFIG_BAK" ]; then
mv "$SYNTHETIC_CONFIG_BAK" "$SYNTHETIC_CONFIG"
echo "[BS=$BS] 已恢复 synthetic_config.py,跳过Eval"
fi
echo ""
continue
fi
# ============ 获取 --reuse 路径 ============
echo ""
echo "=========================================="
echo "[BS=$BS] 获取 ais_bench 输出路径用于 --reuse"
echo "=========================================="
DIR_NAME=$(grep -oP 'outputs/default/\d{8}_\d{6}' "$PERF_DIR/perf_test.log" | head -1 | xargs basename)
REUSE_DIR="$PERF_DIR/aisbench_output_${DIR_NAME}"
if [ ! -d "$REUSE_DIR" ]; then
echo "❌ [BS=$BS] 错误: --reuse 路径不存在: $REUSE_DIR"
echo ""
echo "调试信息:"
echo "提取到的时间戳: $DIR_NAME"
echo ""
echo "perf 目录内容:"
ls -la "$PERF_DIR/"
OVERALL_EXIT=1
continue
fi
echo "[BS=$BS] 获取到 --reuse 路径: $REUSE_DIR"
echo ""
# ============ 第二步:运行 Eval(不做GPU监控) ============
echo "=========================================="
echo "[BS=$BS] 第二步: 运行 Eval 测试(无GPU监控)"
echo "=========================================="
EVAL_DIR="$BASE_OUTPUT_DIR/eval"
mkdir -p "$EVAL_DIR"
echo "等待GPU资源释放..."
sleep 10
EVAL_LOG="$EVAL_DIR/eval_test.log"
set +e
ais_bench \
--models "$MODEL" \
--datasets "$DATASET" \
--mode eval \
--debug \
--reuse "$REUSE_DIR" \
> "$EVAL_LOG" 2>&1
EVAL_EXIT_CODE=$?
set -e
if [ $EVAL_EXIT_CODE -eq 0 ]; then
echo "✓ [BS=$BS] Eval 测试完成"
else
echo "⚠ [BS=$BS] Eval 测试退出码: $EVAL_EXIT_CODE"
OVERALL_EXIT=1
fi
done
echo "[Cleanup] 恢复 vllm_api_stream_chat.py ..."
if [ -f "$VLLM_CONFIG_BAK" ]; then
mv "$VLLM_CONFIG_BAK" "$VLLM_CONFIG"
echo "[Cleanup] vllm_api_stream_chat.py 已恢复"
fi
echo ""
echo "=========================================="
echo "全部运行完成"
echo "=========================================="
if [ $OVERALL_EXIT -eq 0 ]; then
echo "✓ 所有BatchSize测试完成"
else
echo "⚠ 部分BatchSize测试出现问题,请检查输出"
fi
exit $OVERALL_EXIT
\ No newline at end of file
This diff is collapsed.
bash ./bench-test.sh --gpus 3 \
--model vllm_api_stream_chat \
--dataset aime2025_gen \
--model-name test_model \
--batch-size "16" \
--input-len 512 \
--max-out-len 512 \
--ais-model /data2/models/qwen3-8B \
--model-path /data2/models/qwen3-8B \
--host-port 23456
# bash ./bench-test.sh --gpus 3 \
# --model vllm_api_stream_chat \
# --dataset synthetic_gen \
# --model-name test_model \
# --batch-size "8 16 32" \
# --input-len 512 \
# --max-out-len 512 \
# --ais-model /data2/models/qwen3-8B \
# --model-path /data2/models/qwen3-8B \
# --host-port 23456
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment