Commit 21591ae8 authored by jerrrrry's avatar jerrrrry
Browse files

Update .gitlab-ci.yml

parent faf4ee06
......@@ -5,51 +5,86 @@ image: image.sourcefind.cn:5000/dcu/admin/base/vllm:0.9.2-ubuntu22.04-dtk25.04.1
stages:
- test
# === 唯一的作业:使用预置文件运行基准测试 ===
# === 唯一的作业:完整前置操作 + for 循环串行测试 ===
benchmark_hunyuan-dit:
stage: test
tags:
- demos
# 【核心修改 1】使用矩阵策略,自动创建多个并行 Job
# GitLab 会为 BATCH_SIZE 列表中的每个值创建一个独立的 Job
parallel:
matrix:
- BATCH_SIZE: [1, 2, 4, 8]
script:
- echo "========================================="
- echo "Step 1:Setting up the environment for BATCH_SIZE = $BATCH_SIZE"
- echo "Step 1:Setting up the environment from /workspace/packages/hunyuan-dit (One-time setup)"
- echo "========================================="
# 定义预置文件的根目录
- export PACKAGE_DIR="/workspace/packages/hunyuan-dit"
# 从预置目录解压库文件
# 从预置目录解压库文件 (这些操作只做一次!)
- tar -xzf "$PACKAGE_DIR/hipblaslt-install0925.tar.gz"
- tar -xzf "$PACKAGE_DIR/package_0915_ubuntu.tar.gz"
- echo "========================================="
- echo "Step 2:Installing Python packages from /workspace/packages/hunyuan-dit"
- echo "Step 2:Installing Python packages from /workspace/packages/hunyuan-dit (One-time setup)"
- echo "========================================="
# 从预置目录安装 Python 包
# 从预置目录安装 Python 包 (这些操作也只做一次!)
- pip install "$PACKAGE_DIR/apex-1.5.0+das.opt1.dtk25041-cp310-cp310-linux_x86_64.whl" "$PACKAGE_DIR/deepspeed-0.14.2+das.opt1.dtk25041-cp310-cp310-manylinux_2_28_x86_64.whl" "$PACKAGE_DIR/lightop-0.5.0+das.dtk25041.unknown-cp310-cp310-linux_x86_64.whl"
- echo "========================================="
- echo "Step 3:Running the benchmark with BATCH_SIZE = $BATCH_SIZE"
- echo "Step 3:Starting serial benchmark with a for-loop"
- echo "========================================="
# 【核心】定义要测试的批次大小列表
- BATCH_SIZES=(1 2 4 8)
# 创建一个最终的产物目录,用于存放所有批次的测试结果
- mkdir -p all_results
# 【核心】使用 for 循环串行执行
- |
for bs in "${BATCH_SIZES[@]}"; do
echo "============================================================"
echo ">>> Running benchmark for BATCH_SIZE = $bs"
echo "============================================================"
# 设置本次运行的环境变量 (这些变量在每次循环中都需要)
export LD_LIBRARY_PATH=/workspace/packages/hunyuan-dit/hipblaslt-install/lib/:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/workspace/packages/hunyuan-dit/package/miopen/lib/:$LD_LIBRARY_PATH
# 运行测试脚本,将 $bs 作为 batch-size 参数
python sample_t2i_dcu.py \
--model-root /workspace/OEM_ADVTG_TEST/hunyuan/HunyuanDiT-v1.2/ \
--batch-size $bs \
--infer-mode fa \
--prompt "青花瓷风格,一只可爱的哈士奇" \
--no-enhance \
--load-key module \
--image-size 1024 1024 \
--infer-steps 20
# 【关键】整理本次循环的产物
# 检查 results 目录是否存在且不为空
if [ -d "results" ] && [ "$(ls -A results)" ]; then
echo ">>> Saving results for batch size $bs..."
# 将本次的 results 目录重命名并移动到 all_results 中
mv results "all_results/results_bs_$bs"
else
echo ">>> No results found for batch size $bs, skipping."
fi
# 清理环境,为下一次循环做准备(可选,但推荐)
rm -rf results # 确保没有残留的空目录
echo ">>> Benchmark for batch size $bs finished."
echo ""
done
- echo "========================================="
- echo "All benchmarks finished."
- echo "Final result directory structure:"
- ls -R all_results # 打印最终的目录结构,方便检查
- echo "========================================="
# 设置环境变量,模型路径指向预置目录
- export LD_LIBRARY_PATH=/workspace/packages/hunyuan-dit/hipblaslt-install/lib/:$LD_LIBRARY_PATH
- export LD_LIBRARY_PATH=/workspace/packages/hunyuan-dit/package/miopen/lib/:$LD_LIBRARY_PATH
# 【核心修改 2】使用矩阵变量 $BATCH_SIZE
# 运行测试脚本,batch-size 从固定的 4 改为动态的 $BATCH_SIZE
- python sample_t2i_dcu.py --model-root /workspace/OEM_ADVTG_TEST/hunyuan/HunyuanDiT-v1.2/ --batch-size $BATCH_SIZE --infer-mode fa --prompt "青花瓷风格,一只可爱的哈士奇" --no-enhance --load-key module --image-size 1024 1024 --infer-steps 20
# 【核心修改 3】优化产物定义
# 【优化】定义最终的产物
artifacts:
# 为产物命名,包含批次大小,方便在 GitLab UI 上区分
name: "benchmark-hunyuan-dit-bs-$BATCH_SIZE"
# 【优化】指定 'results/' 目录,因为你的 Python 脚本把图片保存在这里
# 这比 "*.png" 更精确,能确保只收集我们想要的文件
name: "benchmark-hunyuan-dit-serial-results"
# 只上传包含所有批次结果的最终目录
paths:
- results/
- all_results/
expire_in: 1 week
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment