test.sh 8.55 KB
Newer Older
LDOUBLEV's avatar
LDOUBLEV committed
1
#!/bin/bash
LDOUBLEV's avatar
LDOUBLEV committed
2
FILENAME=$1
LDOUBLEV's avatar
LDOUBLEV committed
3
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
LDOUBLEV's avatar
LDOUBLEV committed
4
5
6
MODE=$2

dataline=$(cat ${FILENAME})
LDOUBLEV's avatar
LDOUBLEV committed
7

LDOUBLEV's avatar
LDOUBLEV committed
8
9
10
# parser params
IFS=$'\n'
lines=(${dataline})
LDOUBLEV's avatar
LDOUBLEV committed
11
12
13
14
15
16
17
18
function func_parser_key(){
    strs=$1
    IFS=":"
    array=(${strs})
    tmp=${array[0]}
    echo ${tmp}
}
function func_parser_value(){
LDOUBLEV's avatar
LDOUBLEV committed
19
    strs=$1
LDOUBLEV's avatar
LDOUBLEV committed
20
    IFS=":"
LDOUBLEV's avatar
LDOUBLEV committed
21
22
23
24
    array=(${strs})
    tmp=${array[1]}
    echo ${tmp}
}
LDOUBLEV's avatar
LDOUBLEV committed
25
function status_check(){
LDOUBLEV's avatar
LDOUBLEV committed
26
    last_status=$1   # the exit code
LDOUBLEV's avatar
LDOUBLEV committed
27
28
    run_command=$2
    run_log=$3
LDOUBLEV's avatar
LDOUBLEV committed
29
    if [ $last_status -eq 0 ]; then
LDOUBLEV's avatar
LDOUBLEV committed
30
        echo -e "\033[33m Run successfully with command - ${run_command}!  \033[0m" | tee -a ${run_log}
LDOUBLEV's avatar
LDOUBLEV committed
31
    else
LDOUBLEV's avatar
LDOUBLEV committed
32
        echo -e "\033[33m Run failed with command - ${run_command}!  \033[0m" | tee -a ${run_log}
LDOUBLEV's avatar
LDOUBLEV committed
33
34
    fi
}
LDOUBLEV's avatar
LDOUBLEV committed
35

LDOUBLEV's avatar
LDOUBLEV committed
36
37
38
39
40
41
42
43
IFS=$'\n'
# The training params
model_name=$(func_parser_value "${lines[0]}")
python=$(func_parser_value "${lines[1]}")
gpu_list=$(func_parser_value "${lines[2]}")
autocast_list=$(func_parser_value "${lines[3]}")
autocast_key=$(func_parser_key "${lines[3]}")
epoch_key=$(func_parser_key "${lines[4]}")
LDOUBLEV's avatar
LDOUBLEV committed
44
epoch_num=$(func_parser_value "${lines[4]}")
LDOUBLEV's avatar
LDOUBLEV committed
45
save_model_key=$(func_parser_key "${lines[5]}")
LDOUBLEV's avatar
LDOUBLEV committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
train_batch_key=$(func_parser_key "${lines[6]}")
train_use_gpu_key=$(func_parser_key "${lines[7]}")
pretrain_model_key=$(func_parser_key "${lines[8]}")
pretrain_model_value=$(func_parser_value "${lines[8]}")

trainer_list=$(func_parser_value "${lines[9]}")
norm_trainer=$(func_parser_value "${lines[10]}")
pact_trainer=$(func_parser_value "${lines[11]}")
fpgm_trainer=$(func_parser_value "${lines[12]}")
distill_trainer=$(func_parser_value "${lines[13]}")

eval_py=$(func_parser_value "${lines[14]}")

save_infer_key=$(func_parser_key "${lines[15]}")
export_weight=$(func_parser_key "${lines[16]}")
norm_export=$(func_parser_value "${lines[17]}")
pact_export=$(func_parser_value "${lines[18]}")
fpgm_export=$(func_parser_value "${lines[19]}")
distill_export=$(func_parser_value "${lines[20]}")

inference_py=$(func_parser_value "${lines[21]}")
use_gpu_key=$(func_parser_key "${lines[22]}")
use_gpu_list=$(func_parser_value "${lines[22]}")
use_mkldnn_key=$(func_parser_key "${lines[23]}")
use_mkldnn_list=$(func_parser_value "${lines[23]}")
cpu_threads_key=$(func_parser_key "${lines[24]}")
cpu_threads_list=$(func_parser_value "${lines[24]}")
batch_size_key=$(func_parser_key "${lines[25]}")
batch_size_list=$(func_parser_value "${lines[25]}")
use_trt_key=$(func_parser_key "${lines[26]}")
use_trt_list=$(func_parser_value "${lines[26]}")
precision_key=$(func_parser_key "${lines[27]}")
precision_list=$(func_parser_value "${lines[27]}")
infer_model_key=$(func_parser_key "${lines[28]}")
infer_model=$(func_parser_value "${lines[28]}")
image_dir_key=$(func_parser_key "${lines[29]}")
infer_img_dir=$(func_parser_value "${lines[29]}")
save_log_key=$(func_parser_key "${lines[30]}")
LDOUBLEV's avatar
LDOUBLEV committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

LOG_PATH="./test/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"


function func_inference(){
    IFS='|'
    _python=$1
    _script=$2
    _model_dir=$3
    _log_path=$4
    _img_dir=$5
    
    # inference 
    for use_gpu in ${use_gpu_list[*]}; do 
        if [ ${use_gpu} = "False" ]; then
            for use_mkldnn in ${use_mkldnn_list[*]}; do
                for threads in ${cpu_threads_list[*]}; do
                    for batch_size in ${batch_size_list[*]}; do
                        _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}"
LDOUBLEV's avatar
LDOUBLEV committed
105
                        command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir}  ${save_log_key}=${_save_log_path} --benchmark=True"
LDOUBLEV's avatar
LDOUBLEV committed
106
107
108
109
110
                        eval $command
                        status_check $? "${command}" "${status_log}"
                    done
                done
            done
LDOUBLEV's avatar
LDOUBLEV committed
111
        else
LDOUBLEV's avatar
LDOUBLEV committed
112
113
114
115
            for use_trt in ${use_trt_list[*]}; do
                for precision in ${precision_list[*]}; do
                    if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then
                        continue
LDOUBLEV's avatar
LDOUBLEV committed
116
                    fi
LDOUBLEV's avatar
LDOUBLEV committed
117
118
                    for batch_size in ${batch_size_list[*]}; do
                        _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}"
LDOUBLEV's avatar
LDOUBLEV committed
119
                        command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir}  ${save_log_key}=${_save_log_path}  --benchmark=True"
LDOUBLEV's avatar
LDOUBLEV committed
120
121
122
                        eval $command
                        status_check $? "${command}" "${status_log}"
                    done
LDOUBLEV's avatar
LDOUBLEV committed
123
124
                done
            done
LDOUBLEV's avatar
LDOUBLEV committed
125
126
127
128
129
130
131
132
        fi
    done
}

if [ ${MODE} != "infer" ]; then

IFS="|"
for gpu in ${gpu_list[*]}; do
LDOUBLEV's avatar
LDOUBLEV committed
133
    use_gpu=True
LDOUBLEV's avatar
LDOUBLEV committed
134
    if [ ${gpu} = "-1" ];then
LDOUBLEV's avatar
LDOUBLEV committed
135
        use_gpu=False
LDOUBLEV's avatar
LDOUBLEV committed
136
137
138
        env=""
    elif [ ${#gpu} -le 1 ];then
        env="export CUDA_VISIBLE_DEVICES=${gpu}"
LDOUBLEV's avatar
set env  
LDOUBLEV committed
139
        eval ${env}
LDOUBLEV's avatar
LDOUBLEV committed
140
141
142
143
144
145
146
147
148
149
150
    elif [ ${#gpu} -le 15 ];then
        IFS=","
        array=(${gpu})
        env="export CUDA_VISIBLE_DEVICES=${array[0]}"
        IFS="|"
    else
        IFS=";"
        array=(${gpu})
        ips=${array[0]}
        gpu=${array[1]}
        IFS="|"
LDOUBLEV's avatar
LDOUBLEV committed
151
        env=" "
LDOUBLEV's avatar
LDOUBLEV committed
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
    fi
    for autocast in ${autocast_list[*]}; do 
        for trainer in ${trainer_list[*]}; do 
            if [ ${trainer} = "pact" ]; then
                run_train=${pact_trainer}
                run_export=${pact_export}
            elif [ ${trainer} = "fpgm" ]; then
                run_train=${fpgm_trainer}
                run_export=${fpgm_export}
            elif [ ${trainer} = "distill" ]; then
                run_train=${distill_trainer}
                run_export=${distill_export}
            else
                run_train=${norm_trainer}
                run_export=${norm_export}
            fi

            if [ ${run_train} = "null" ]; then
                continue
            fi
            if [ ${run_export} = "null" ]; then
                continue
            fi

LDOUBLEV's avatar
LDOUBLEV committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
            # not set autocast when autocast is null
            if [ ${autocast} = "null" ]; then
                set_autocast=" "
            else
                set_autocast="${autocast_key}=${autocast}"
            fi
            # not set epoch when whole_train_infer
            if [ ${MODE} != "whole_train_infer" ]; then
                set_epoch="${epoch_key}=${epoch_num}"
            else
                set_epoch=" "
            fi
            # set pretrain
            if [ ${pretrain_model_value} != "null" ]; then
                set_pretrain="${pretrain_model_key}=${pretrain_model_value}"
LDOUBLEV's avatar
LDOUBLEV committed
191
            else
LDOUBLEV's avatar
LDOUBLEV committed
192
193
194
195
196
197
198
199
200
201
                set_pretrain=" "
            fi

            save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
            if [ ${#gpu} -le 2 ];then  # train with cpu or single gpu
                cmd="${python} ${run_train} ${train_use_gpu_key}=${use_gpu}  ${save_model_key}=${save_log} ${set_epoch} ${set_pretrain} ${set_autocast}"
            elif [ ${#gpu} -le 15 ];then  # train with multi-gpu
                cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${save_model_key}=${save_log}  ${set_epoch} ${set_pretrain} ${set_autocast}"
            else     # train with multi-machine
                cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${save_model_key}=${save_log} ${set_pretrain} ${set_epoch} ${set_autocast}"
LDOUBLEV's avatar
LDOUBLEV committed
202
203
204
205
206
207
208
209
210
211
212
213
            fi
            # run train
            eval $cmd
            status_check $? "${cmd}" "${status_log}"

            # run eval
            eval_cmd="${python} ${eval_py} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest" 
            eval $eval_cmd
            status_check $? "${eval_cmd}" "${status_log}"

            # run export model
            save_infer_path="${save_log}"
LDOUBLEV's avatar
LDOUBLEV committed
214
            export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${export_weight}=${save_log}/latest ${save_infer_key}=${save_infer_path}"
LDOUBLEV's avatar
LDOUBLEV committed
215
216
217
218
            eval $export_cmd
            status_check $? "${export_cmd}" "${status_log}"

            #run inference
LDOUBLEV's avatar
set env  
LDOUBLEV committed
219
            eval $env
LDOUBLEV's avatar
LDOUBLEV committed
220
221
            save_infer_path="${save_log}"
            func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}"
LDOUBLEV's avatar
LDOUBLEV committed
222
223
224
        done
    done
done
LDOUBLEV's avatar
LDOUBLEV committed
225
226

else
LDOUBLEV's avatar
LDOUBLEV committed
227
228
229
230
231
232
233
    GPUID=$3
    if [ ${#GPUID} -le 0 ];then
        env=" "
    else
        env="export CUDA_VISIBLE_DEVICES=${GPUID}"
    fi
    echo $env
LDOUBLEV's avatar
LDOUBLEV committed
234
    #run inference
LDOUBLEV's avatar
LDOUBLEV committed
235
    func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}"
LDOUBLEV's avatar
LDOUBLEV committed
236
fi