run_benchmark_det.sh 2.88 KB
Newer Older
LDOUBLEV's avatar
LDOUBLEV committed
1
#!/usr/bin/env bash
hysunflower's avatar
hysunflower committed
2
set -xe
LDOUBLEV's avatar
LDOUBLEV committed
3
4
5
6
7
# 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode}
# 参数说明
function _set_params(){
    run_mode=${1:-"sp"}          # 单卡sp|多卡mp
    batch_size=${2:-"64"}
hysunflower's avatar
hysunflower committed
8
9
10
    fp_item=${3:-"fp32"}         # fp32|fp16
    max_epoch=${4:-"10"}         # 可选,如果需要修改代码提前中断
    model_item=${5:-"model_item"}
LDOUBLEV's avatar
LDOUBLEV committed
11
    run_log_path=${TRAIN_LOG_DIR:-$(pwd)}  # TRAIN_LOG_DIR 后续QA设置该参数
hysunflower's avatar
hysunflower committed
12
13
14
15
16
17
18
19
20
#   日志解析所需参数
    base_batch_size=${batch_size}
    mission_name="OCR"
    direction_id="0"
    ips_unit="instance/sec"
    skip_steps=2                 # 解析日志,有些模型前几个step耗时长,需要跳过                                    (必填)
    keyword="ips:"               # 解析日志,筛选出数据所在行的关键字                                             (必填)
    index="1"
    model_name=${model_item}_${run_mode}_bs${batch_size}_${fp_item}        # model_item 用于yml文件名匹配,model_name 用于数据入库前端展示
LDOUBLEV's avatar
LDOUBLEV committed
21
22
23
24
#   以下不用修改   
    device=${CUDA_VISIBLE_DEVICES//,/ }
    arr=(${device})
    num_gpu_devices=${#arr[*]}
hysunflower's avatar
hysunflower committed
25
    log_file=${run_log_path}/${model_item}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices}
LDOUBLEV's avatar
LDOUBLEV committed
26
27
28
29
30
}
function _train(){
    echo "Train on ${num_gpu_devices} GPUs"
    echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"

hysunflower's avatar
hysunflower committed
31
    train_cmd="-c configs/det/${model_item}.yml -o Train.loader.batch_size_per_card=${batch_size} Global.epoch_num=${max_epoch} Global.eval_batch_step=[0,20000] Global.print_batch_step=2"
LDOUBLEV's avatar
LDOUBLEV committed
32
33
    case ${run_mode} in
      sp) 
hysunflower's avatar
hysunflower committed
34
        train_cmd="python tools/train.py "${train_cmd}""
LDOUBLEV's avatar
LDOUBLEV committed
35
36
        ;;
      mp)
hysunflower's avatar
hysunflower committed
37
        train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --gpus=$CUDA_VISIBLE_DEVICES tools/train.py ${train_cmd}"
LDOUBLEV's avatar
LDOUBLEV committed
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
        ;;
      *) echo "choose run_mode(sp or mp)"; exit 1;
    esac
# 以下不用修改
    timeout 15m ${train_cmd} > ${log_file} 2>&1
    if [ $? -ne 0 ];then
            echo -e "${model_name}, FAIL"
        export job_fail_flag=1
    else
        echo -e "${model_name}, SUCCESS"
        export job_fail_flag=0
    fi

    if [ $run_mode = "mp" -a -d mylog ]; then
        rm ${log_file}
        cp mylog/workerlog.0 ${log_file}
    fi
WenmuZhou's avatar
WenmuZhou committed
55
}
LDOUBLEV's avatar
LDOUBLEV committed
56

hysunflower's avatar
hysunflower committed
57
source ${BENCHMARK_ROOT}/scripts/run_model.sh      # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在连调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开
LDOUBLEV's avatar
LDOUBLEV committed
58
_set_params $@
hysunflower's avatar
hysunflower committed
59
60
#_train      # 如果只想产出训练log,不解析,可取消注释
_run         # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开