test.sh 8.33 KB
Newer Older
LDOUBLEV's avatar
LDOUBLEV committed
1
#!/bin/bash
LDOUBLEV's avatar
LDOUBLEV committed
2
FILENAME=$1
LDOUBLEV's avatar
LDOUBLEV committed
3
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
LDOUBLEV's avatar
LDOUBLEV committed
4
5
6
MODE=$2

dataline=$(cat ${FILENAME})
LDOUBLEV's avatar
LDOUBLEV committed
7

LDOUBLEV's avatar
LDOUBLEV committed
8
9
10
# parser params
IFS=$'\n'
lines=(${dataline})
LDOUBLEV's avatar
LDOUBLEV committed
11
12
13
14
15
16
17
18
function func_parser_key(){
    strs=$1
    IFS=":"
    array=(${strs})
    tmp=${array[0]}
    echo ${tmp}
}
function func_parser_value(){
LDOUBLEV's avatar
LDOUBLEV committed
19
    strs=$1
LDOUBLEV's avatar
LDOUBLEV committed
20
    IFS=":"
LDOUBLEV's avatar
LDOUBLEV committed
21
22
23
24
    array=(${strs})
    tmp=${array[1]}
    echo ${tmp}
}
LDOUBLEV's avatar
LDOUBLEV committed
25
function status_check(){
LDOUBLEV's avatar
LDOUBLEV committed
26
    last_status=$1   # the exit code
LDOUBLEV's avatar
LDOUBLEV committed
27
28
    run_command=$2
    run_log=$3
LDOUBLEV's avatar
LDOUBLEV committed
29
    if [ $last_status -eq 0 ]; then
LDOUBLEV's avatar
LDOUBLEV committed
30
        echo -e "\033[33m Run successfully with command - ${run_command}!  \033[0m" | tee -a ${run_log}
LDOUBLEV's avatar
LDOUBLEV committed
31
    else
LDOUBLEV's avatar
LDOUBLEV committed
32
        echo -e "\033[33m Run failed with command - ${run_command}!  \033[0m" | tee -a ${run_log}
LDOUBLEV's avatar
LDOUBLEV committed
33
34
    fi
}
LDOUBLEV's avatar
LDOUBLEV committed
35

LDOUBLEV's avatar
LDOUBLEV committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
IFS=$'\n'
# The training params
model_name=$(func_parser_value "${lines[0]}")
python=$(func_parser_value "${lines[1]}")
gpu_list=$(func_parser_value "${lines[2]}")
autocast_list=$(func_parser_value "${lines[3]}")
autocast_key=$(func_parser_key "${lines[3]}")
epoch_key=$(func_parser_key "${lines[4]}")
save_model_key=$(func_parser_key "${lines[5]}")
save_infer_key=$(func_parser_key "${lines[6]}")
train_batch_key=$(func_parser_key "${lines[7]}")
train_use_gpu_key=$(func_parser_key "${lines[8]}")
pretrain_model_key=$(func_parser_key "${lines[9]}")

trainer_list=$(func_parser_value "${lines[10]}")
norm_trainer=$(func_parser_value "${lines[11]}")
pact_trainer=$(func_parser_value "${lines[12]}")
fpgm_trainer=$(func_parser_value "${lines[13]}")
distill_trainer=$(func_parser_value "${lines[14]}")

eval_py=$(func_parser_value "${lines[15]}")
norm_export=$(func_parser_value "${lines[16]}")
pact_export=$(func_parser_value "${lines[17]}")
fpgm_export=$(func_parser_value "${lines[18]}")
distill_export=$(func_parser_value "${lines[19]}")

inference_py=$(func_parser_value "${lines[20]}")
use_gpu_key=$(func_parser_key "${lines[21]}")
use_gpu_list=$(func_parser_value "${lines[21]}")
use_mkldnn_key=$(func_parser_key "${lines[22]}")
use_mkldnn_list=$(func_parser_value "${lines[22]}")
cpu_threads_key=$(func_parser_key "${lines[23]}")
cpu_threads_list=$(func_parser_value "${lines[23]}")
batch_size_key=$(func_parser_key "${lines[24]}")
batch_size_list=$(func_parser_value "${lines[24]}")
use_trt_key=$(func_parser_key "${lines[25]}")
use_trt_list=$(func_parser_value "${lines[25]}")
precision_key=$(func_parser_key "${lines[26]}")
precision_list=$(func_parser_value "${lines[26]}")
model_dir_key=$(func_parser_key "${lines[27]}")
image_dir_key=$(func_parser_key "${lines[28]}")
save_log_key=$(func_parser_key "${lines[29]}")

LOG_PATH="./test/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"

if [ ${MODE} = "lite_train_infer" ]; then
    export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/"
    export epoch_num=10
elif [ ${MODE} = "whole_infer" ]; then
    export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/"
    export epoch_num=10
elif [ ${MODE} = "whole_train_infer" ]; then
    export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/"
    export epoch_num=300
else
    export infer_img_dir="./inference/ch_det_data_50/all-sum-510"
    export infer_model_dir="./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy"
fi


function func_inference(){
    IFS='|'
    _python=$1
    _script=$2
    _model_dir=$3
    _log_path=$4
    _img_dir=$5
    
    # inference 
    for use_gpu in ${use_gpu_list[*]}; do 
        if [ ${use_gpu} = "False" ]; then
            for use_mkldnn in ${use_mkldnn_list[*]}; do
                for threads in ${cpu_threads_list[*]}; do
                    for batch_size in ${batch_size_list[*]}; do
                        _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}"
LDOUBLEV's avatar
LDOUBLEV committed
113
                        command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir}  ${save_log_key}=${_save_log_path}  --benchmark=True "
LDOUBLEV's avatar
LDOUBLEV committed
114
115
116
117
118
                        eval $command
                        status_check $? "${command}" "${status_log}"
                    done
                done
            done
LDOUBLEV's avatar
LDOUBLEV committed
119
        else
LDOUBLEV's avatar
LDOUBLEV committed
120
121
122
123
            for use_trt in ${use_trt_list[*]}; do
                for precision in ${precision_list[*]}; do
                    if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then
                        continue
LDOUBLEV's avatar
LDOUBLEV committed
124
                    fi
LDOUBLEV's avatar
LDOUBLEV committed
125
126
                    for batch_size in ${batch_size_list[*]}; do
                        _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}"
LDOUBLEV's avatar
LDOUBLEV committed
127
                        command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir}  ${save_log_key}=${_save_log_path}  --benchmark=True "
LDOUBLEV's avatar
LDOUBLEV committed
128
129
130
                        eval $command
                        status_check $? "${command}" "${status_log}"
                    done
LDOUBLEV's avatar
LDOUBLEV committed
131
132
                done
            done
LDOUBLEV's avatar
LDOUBLEV committed
133
134
135
136
137
138
139
140
        fi
    done
}

if [ ${MODE} != "infer" ]; then

IFS="|"
for gpu in ${gpu_list[*]}; do
LDOUBLEV's avatar
LDOUBLEV committed
141
    train_use_gpu=True
LDOUBLEV's avatar
LDOUBLEV committed
142
    if [ ${gpu} = "-1" ];then
LDOUBLEV's avatar
LDOUBLEV committed
143
        train_use_gpu=False
LDOUBLEV's avatar
LDOUBLEV committed
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
        env=""
    elif [ ${#gpu} -le 1 ];then
        env="export CUDA_VISIBLE_DEVICES=${gpu}"
    elif [ ${#gpu} -le 15 ];then
        IFS=","
        array=(${gpu})
        env="export CUDA_VISIBLE_DEVICES=${array[0]}"
        IFS="|"
    else
        IFS=";"
        array=(${gpu})
        ips=${array[0]}
        gpu=${array[1]}
        IFS="|"
    fi
    for autocast in ${autocast_list[*]}; do 
        for trainer in ${trainer_list[*]}; do 
            if [ ${trainer} = "pact" ]; then
                run_train=${pact_trainer}
                run_export=${pact_export}
            elif [ ${trainer} = "fpgm" ]; then
                run_train=${fpgm_trainer}
                run_export=${fpgm_export}
            elif [ ${trainer} = "distill" ]; then
                run_train=${distill_trainer}
                run_export=${distill_export}
            else
                run_train=${norm_trainer}
                run_export=${norm_export}
            fi

            if [ ${run_train} = "null" ]; then
                continue
            fi
            if [ ${run_export} = "null" ]; then
                continue
            fi

            save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
            if [ ${#gpu} -le 2 ];then  # epoch_num #TODO
LDOUBLEV's avatar
LDOUBLEV committed
184
                cmd="${python} ${run_train} ${train_use_gpu_key}=${train_use_gpu} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log} "
LDOUBLEV's avatar
LDOUBLEV committed
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
            elif [ ${#gpu} -le 15 ];then
                cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num}  ${save_model_key}=${save_log}"
            else
                cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}"
            fi
            # run train
            eval $cmd
            status_check $? "${cmd}" "${status_log}"

            # run eval
            eval_cmd="${python} ${eval_py} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest" 
            eval $eval_cmd
            status_check $? "${eval_cmd}" "${status_log}"

            # run export model
            save_infer_path="${save_log}"
            export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest ${save_infer_key}=${save_infer_path}"
            eval $export_cmd
            status_check $? "${export_cmd}" "${status_log}"

            #run inference
            save_infer_path="${save_log}"
            func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}"
LDOUBLEV's avatar
LDOUBLEV committed
208
209
210
        done
    done
done
LDOUBLEV's avatar
LDOUBLEV committed
211
212
213
214
215
216
217
218
219
220
221

else
    save_infer_path="${LOG_PATH}/${MODE}"
    run_export=${norm_export}
    export_cmd="${python} ${run_export} ${save_model_key}=${save_infer_path} ${pretrain_model_key}=${infer_model_dir} ${save_infer_key}=${save_infer_path}"
    eval $export_cmd
    status_check $? "${export_cmd}" "${status_log}"

    #run inference
    func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}"
fi