Commit 721c76b4 authored by LDOUBLEV's avatar LDOUBLEV
Browse files

fix conflict

parents 98162be4 b77f9ec0
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
dataline=$(awk 'NR==1, NR==16{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser cpp inference model
model_name=$(func_parser_value "${lines[1]}")
use_opencv=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir_list=$(func_parser_value "${lines[3]}")
cpp_infer_is_quant=$(func_parser_value "${lines[4]}")
# parser cpp inference
inference_cmd=$(func_parser_value "${lines[5]}")
cpp_use_gpu_key=$(func_parser_key "${lines[6]}")
cpp_use_gpu_list=$(func_parser_value "${lines[6]}")
cpp_use_mkldnn_key=$(func_parser_key "${lines[7]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[7]}")
cpp_cpu_threads_key=$(func_parser_key "${lines[8]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[8]}")
cpp_batch_size_key=$(func_parser_key "${lines[9]}")
cpp_batch_size_list=$(func_parser_value "${lines[9]}")
cpp_use_trt_key=$(func_parser_key "${lines[10]}")
cpp_use_trt_list=$(func_parser_value "${lines[10]}")
cpp_precision_key=$(func_parser_key "${lines[11]}")
cpp_precision_list=$(func_parser_value "${lines[11]}")
cpp_infer_model_key=$(func_parser_key "${lines[12]}")
cpp_image_dir_key=$(func_parser_key "${lines[13]}")
cpp_infer_img_dir=$(func_parser_value "${lines[13]}")
cpp_infer_key1=$(func_parser_key "${lines[14]}")
cpp_infer_value1=$(func_parser_value "${lines[14]}")
cpp_benchmark_key=$(func_parser_key "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[15]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp.log"
function func_cpp_inference(){
IFS='|'
_script=$1
_model_dir=$2
_log_path=$3
_img_dir=$4
_flag_quant=$5
# inference
for use_gpu in ${cpp_use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpp_cpu_threads_list[*]}; do
for batch_size in ${cpp_batch_size_list[*]}; do
precision="fp32"
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
precison="int8"
fi
_save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for use_trt in ${cpp_use_trt_list[*]}; do
for precision in ${cpp_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${cpp_precision_key}" "${precision}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
cd deploy/cpp_infer
if [ ${use_opencv} = "True" ]; then
if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
echo "################### build opencv skipped ###################"
else
echo "################### build opencv ###################"
rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
tar -xf opencv-3.4.7.tar.gz
cd opencv-3.4.7/
install_path=$(pwd)/opencv3
rm -rf build
mkdir build
cd build
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DWITH_IPP=OFF \
-DBUILD_IPP_IW=OFF \
-DWITH_LAPACK=OFF \
-DWITH_EIGEN=OFF \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DWITH_ZLIB=ON \
-DBUILD_ZLIB=ON \
-DWITH_JPEG=ON \
-DBUILD_JPEG=ON \
-DWITH_PNG=ON \
-DBUILD_PNG=ON \
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON
make -j
make install
cd ../
echo "################### build opencv finished ###################"
fi
fi
echo "################### build PaddleOCR demo ####################"
if [ ${use_opencv} = "True" ]; then
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
else
OPENCV_DIR=''
fi
LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
make -j
cd ../../../
echo "################### build PaddleOCR demo finished ###################"
# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env
echo "################### run test ###################"
export Count=0
IFS="|"
infer_quant_flag=(${cpp_infer_is_quant})
for infer_model in ${cpp_infer_model_dir_list[*]}; do
#run inference
is_quant=${infer_quant_flag[Count]}
func_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_infer_img_dir}" ${is_quant}
Count=$(($Count + 1))
done
#!/bin/bash
source test_tipc/common_func.sh
source test_tipc/test_train_inference_python.sh
FILENAME=$1
# MODE be one of ['whole_infer']
MODE=$2
dataline=$(awk 'NR==1, NR==17{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
infer_model_dir_list=$(func_parser_value "${lines[3]}")
infer_export_list=$(func_parser_value "${lines[4]}")
infer_is_quant=$(func_parser_value "${lines[5]}")
# parser inference
inference_py=$(func_parser_value "${lines[6]}")
use_gpu_key=$(func_parser_key "${lines[7]}")
use_gpu_list=$(func_parser_value "${lines[7]}")
use_mkldnn_key=$(func_parser_key "${lines[8]}")
use_mkldnn_list=$(func_parser_value "${lines[8]}")
cpu_threads_key=$(func_parser_key "${lines[9]}")
cpu_threads_list=$(func_parser_value "${lines[9]}")
batch_size_key=$(func_parser_key "${lines[10]}")
batch_size_list=$(func_parser_value "${lines[10]}")
use_trt_key=$(func_parser_key "${lines[11]}")
use_trt_list=$(func_parser_value "${lines[11]}")
precision_key=$(func_parser_key "${lines[12]}")
precision_list=$(func_parser_value "${lines[12]}")
infer_model_key=$(func_parser_key "${lines[13]}")
image_dir_key=$(func_parser_key "${lines[14]}")
infer_img_dir=$(func_parser_value "${lines[14]}")
save_log_key=$(func_parser_key "${lines[15]}")
benchmark_key=$(func_parser_key "${lines[16]}")
benchmark_value=$(func_parser_value "${lines[16]}")
infer_key1=$(func_parser_key "${lines[17]}")
infer_value1=$(func_parser_value "${lines[17]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log"
if [ ${MODE} = "whole_infer" ]; then
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
# set CUDA_VISIBLE_DEVICES
eval $env
export Count=0
IFS="|"
infer_run_exports=(${infer_export_list})
infer_quant_flag=(${infer_is_quant})
for infer_model in ${infer_model_dir_list[*]}; do
# run export
if [ ${infer_run_exports[Count]} != "null" ];then
save_infer_dir=$(dirname $infer_model)
set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
echo ${infer_run_exports[Count]}
echo $export_cmd
eval $export_cmd
status_export=$?
status_check $status_export "${export_cmd}" "${status_log}"
else
save_infer_dir=${infer_model}
fi
#run inference
is_quant=${infer_quant_flag[Count]}
if [ ${MODE} = "klquant_infer" ]; then
is_quant="True"
fi
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
Count=$(($Count + 1))
done
fi
#!/bin/bash
source ./common_func.sh
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
FILENAME=$1
dataline=$(cat $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser lite inference
inference_cmd=$(func_parser_value "${lines[1]}")
runtime_device=$(func_parser_value "${lines[2]}")
det_model_list=$(func_parser_value "${lines[3]}")
rec_model_list=$(func_parser_value "${lines[4]}")
cls_model_list=$(func_parser_value "${lines[5]}")
cpu_threads_list=$(func_parser_value "${lines[6]}")
det_batch_size_list=$(func_parser_value "${lines[7]}")
rec_batch_size_list=$(func_parser_value "${lines[8]}")
infer_img_dir_list=$(func_parser_value "${lines[9]}")
config_dir=$(func_parser_value "${lines[10]}")
rec_dict_dir=$(func_parser_value "${lines[11]}")
benchmark_value=$(func_parser_value "${lines[12]}")
if [[ $inference_cmd =~ "det" ]]; then
lite_model_list=${det_lite_model_list}
elif [[ $inference_cmd =~ "rec" ]]; then
lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
elif [[ $inference_cmd =~ "system" ]]; then
lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]})
else
echo "inference_cmd is wrong, please check."
exit 1
fi
LOG_PATH="./output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
function func_test_det(){
IFS='|'
_script=$1
_det_model=$2
_log_path=$3
_img_dir=$4
_config=$5
if [[ $_det_model =~ "slim" ]]; then
precision="INT8"
else
precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_runtime_device_${runtime_device}_precision_${precision}_det_batchsize_${det_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${runtime_device} ${precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_rec(){
IFS='|'
_script=$1
_rec_model=$2
_cls_model=$3
_log_path=$4
_img_dir=$5
_config=$6
_rec_dict_dir=$7
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_rec_model}_${cls_model}_runtime_device_${runtime_device}_precision_${_precision}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${rec_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
}
function func_test_system(){
IFS='|'
_script=$1
_det_model=$2
_rec_model=$3
_cls_model=$4
_log_path=$5
_img_dir=$6
_config=$7
_rec_dict_dir=$8
if [[ $_det_model =~ "slim" ]]; then
_precision="INT8"
else
_precision="FP32"
fi
# lite inference
for num_threads in ${cpu_threads_list[*]}; do
for det_batchsize in ${det_batch_size_list[*]}; do
for rec_batchsize in ${rec_batch_size_list[*]}; do
_save_log_path="${_log_path}/lite_${_det_model}_${_rec_model}_${_cls_model}_runtime_device_${runtime_device}_precision_${_precision}_det_batchsize_${det_batchsize}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log"
command="${_script} ${_det_model} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
done
}
echo "################### run test ###################"
if [[ $inference_cmd =~ "det" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_det "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "rec" ]]; then
IFS="|"
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_rec "${inference_cmd}" "${rec_model}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${rec_dict_dir}" "${config_dir}"
done
done
elif [[ $inference_cmd =~ "system" ]]; then
IFS="|"
det_model_list=(${det_model_list[*]})
rec_model_list=(${rec_model_list[*]})
cls_model_list=(${cls_model_list[*]})
for i in {0..1}; do
#run lite inference
for img_dir in ${infer_img_dir_list[*]}; do
func_test_system "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${rec_model_list[i]}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}" "${rec_dict_dir}"
done
done
fi
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
dataline=$(cat ${FILENAME})
lines=(${dataline})
# common params
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
# parser params
dataline=$(awk 'NR==1, NR==12{print}' $FILENAME)
IFS=$'\n'
lines=(${dataline})
# parser paddle2onnx
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
padlle2onnx_cmd=$(func_parser_value "${lines[3]}")
infer_model_dir_key=$(func_parser_key "${lines[4]}")
infer_model_dir_value=$(func_parser_value "${lines[4]}")
model_filename_key=$(func_parser_key "${lines[5]}")
model_filename_value=$(func_parser_value "${lines[5]}")
params_filename_key=$(func_parser_key "${lines[6]}")
params_filename_value=$(func_parser_value "${lines[6]}")
save_file_key=$(func_parser_key "${lines[7]}")
save_file_value=$(func_parser_value "${lines[7]}")
opset_version_key=$(func_parser_key "${lines[8]}")
opset_version_value=$(func_parser_value "${lines[8]}")
enable_onnx_checker_key=$(func_parser_key "${lines[9]}")
enable_onnx_checker_value=$(func_parser_value "${lines[9]}")
# parser onnx inference
inference_py=$(func_parser_value "${lines[10]}")
use_gpu_key=$(func_parser_key "${lines[11]}")
use_gpu_value=$(func_parser_value "${lines[11]}")
det_model_key=$(func_parser_key "${lines[12]}")
image_dir_key=$(func_parser_key "${lines[13]}")
image_dir_value=$(func_parser_value "${lines[13]}")
LOG_PATH="./test_tipc/output"
mkdir -p ./test_tipc/output
status_log="${LOG_PATH}/results_paddle2onnx.log"
function func_paddle2onnx(){
IFS='|'
_script=$1
# paddle2onnx
_save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log"
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_save_model=$(func_set_params "${save_file_key}" "${save_file_value}")
set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}")
set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}")
trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker}"
eval $trans_model_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${trans_model_cmd}" "${status_log}"
# python inference
set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu_value}")
set_model_dir=$(func_set_params "${det_model_key}" "${save_file_value}")
set_img_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
infer_model_cmd="${python} ${inference_py} ${set_gpu} ${set_img_dir} ${set_model_dir} --use_onnx=True > ${_save_log_path} 2>&1 "
eval $infer_model_cmd
status_check $last_status "${infer_model_cmd}" "${status_log}"
}
echo "################### run test ###################"
export Count=0
IFS="|"
func_paddle2onnx
\ No newline at end of file
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
dataline=$(awk 'NR==1, NR==18{print}' $FILENAME)
# parser params
IFS=$'\n'
lines=(${dataline})
# parser serving
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
trans_model_py=$(func_parser_value "${lines[3]}")
infer_model_dir_key=$(func_parser_key "${lines[4]}")
infer_model_dir_value=$(func_parser_value "${lines[4]}")
model_filename_key=$(func_parser_key "${lines[5]}")
model_filename_value=$(func_parser_value "${lines[5]}")
params_filename_key=$(func_parser_key "${lines[6]}")
params_filename_value=$(func_parser_value "${lines[6]}")
serving_server_key=$(func_parser_key "${lines[7]}")
serving_server_value=$(func_parser_value "${lines[7]}")
serving_client_key=$(func_parser_key "${lines[8]}")
serving_client_value=$(func_parser_value "${lines[8]}")
serving_dir_value=$(func_parser_value "${lines[9]}")
web_service_py=$(func_parser_value "${lines[10]}")
web_use_gpu_key=$(func_parser_key "${lines[11]}")
web_use_gpu_list=$(func_parser_value "${lines[11]}")
web_use_mkldnn_key=$(func_parser_key "${lines[12]}")
web_use_mkldnn_list=$(func_parser_value "${lines[12]}")
web_cpu_threads_key=$(func_parser_key "${lines[13]}")
web_cpu_threads_list=$(func_parser_value "${lines[13]}")
web_use_trt_key=$(func_parser_key "${lines[14]}")
web_use_trt_list=$(func_parser_value "${lines[14]}")
web_precision_key=$(func_parser_key "${lines[15]}")
web_precision_list=$(func_parser_value "${lines[15]}")
pipeline_py=$(func_parser_value "${lines[16]}")
image_dir_key=$(func_parser_key "${lines[17]}")
image_dir_value=$(func_parser_value "${lines[17]}")
LOG_PATH="../../test_tipc/output"
mkdir -p ./test_tipc/output
status_log="${LOG_PATH}/results_serving.log"
function func_serving(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
# pdserving
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd
cd ${serving_dir_value}
echo $PWD
unset https_proxy
unset http_proxy
for python in ${python[*]}; do
if [ ${python} = "cpp"]; then
for use_gpu in ${web_use_gpu_list[*]}; do
if [ ${use_gpu} = "null" ]; then
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval $web_service_cmd
sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
else
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval $web_service_cmd
sleep 2s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
fi
done
else
# python serving
for use_gpu in ${web_use_gpu_list[*]}; do
echo ${ues_gpu}
if [ ${use_gpu} = "null" ]; then
for use_mkldnn in ${web_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ]; then
continue
fi
for threads in ${web_cpu_threads_list[*]}; do
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &"
eval $web_service_cmd
sleep 2s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 "
eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done
done
elif [ ${use_gpu} = "0" ]; then
for use_trt in ${web_use_trt_list[*]}; do
for precision in ${web_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
continue
fi
set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${web_precision_key}" "${precision}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & "
eval $web_service_cmd
sleep 2s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1"
eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}"
sleep 2s
done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
fi
done
}
# set cuda device
GPUID=$2
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
set CUDA_VISIBLE_DEVICES
eval $env
echo "################### run test ###################"
export Count=0
IFS="|"
func_serving "${web_service_cmd}"
#!/bin/bash #!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1 FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer'] # MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer']
MODE=$2 MODE=$2
dataline=$(cat ${FILENAME}) dataline=$(awk 'NR==1, NR==51{print}' $FILENAME)
# parser params # parser params
IFS=$'\n' IFS=$'\n'
lines=(${dataline}) lines=(${dataline})
function func_parser_key(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[0]}
echo ${tmp}
}
function func_parser_value(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[1]}
echo ${tmp}
}
function func_set_params(){
key=$1
value=$2
if [ ${key} = "null" ];then
echo " "
elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then
echo " "
else
echo "${key}=${value}"
fi
}
function func_parser_params(){
strs=$1
IFS=":"
array=(${strs})
key=${array[0]}
tmp=${array[1]}
IFS="|"
res=""
for _params in ${tmp[*]}; do
IFS="="
array=(${_params})
mode=${array[0]}
value=${array[1]}
if [[ ${mode} = ${MODE} ]]; then
IFS="|"
#echo $(func_set_params "${mode}" "${value}")
echo $value
break
fi
IFS="|"
done
echo ${res}
}
function status_check(){
last_status=$1 # the exit code
run_command=$2
run_log=$3
if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log}
else
echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log}
fi
}
IFS=$'\n'
# The training params # The training params
model_name=$(func_parser_value "${lines[1]}") model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}") python=$(func_parser_value "${lines[2]}")
...@@ -78,10 +20,10 @@ train_use_gpu_value=$(func_parser_value "${lines[4]}") ...@@ -78,10 +20,10 @@ train_use_gpu_value=$(func_parser_value "${lines[4]}")
autocast_list=$(func_parser_value "${lines[5]}") autocast_list=$(func_parser_value "${lines[5]}")
autocast_key=$(func_parser_key "${lines[5]}") autocast_key=$(func_parser_key "${lines[5]}")
epoch_key=$(func_parser_key "${lines[6]}") epoch_key=$(func_parser_key "${lines[6]}")
epoch_num=$(func_parser_params "${lines[6]}") epoch_num=$(func_parser_params "${lines[6]}" "${MODE}")
save_model_key=$(func_parser_key "${lines[7]}") save_model_key=$(func_parser_key "${lines[7]}")
train_batch_key=$(func_parser_key "${lines[8]}") train_batch_key=$(func_parser_key "${lines[8]}")
train_batch_value=$(func_parser_params "${lines[8]}") train_batch_value=$(func_parser_params "${lines[8]}" "${MODE}")
pretrain_model_key=$(func_parser_key "${lines[9]}") pretrain_model_key=$(func_parser_key "${lines[9]}")
pretrain_model_value=$(func_parser_value "${lines[9]}") pretrain_model_value=$(func_parser_value "${lines[9]}")
train_model_name=$(func_parser_value "${lines[10]}") train_model_name=$(func_parser_value "${lines[10]}")
...@@ -117,6 +59,7 @@ export_key1=$(func_parser_key "${lines[33]}") ...@@ -117,6 +59,7 @@ export_key1=$(func_parser_key "${lines[33]}")
export_value1=$(func_parser_value "${lines[33]}") export_value1=$(func_parser_value "${lines[33]}")
export_key2=$(func_parser_key "${lines[34]}") export_key2=$(func_parser_key "${lines[34]}")
export_value2=$(func_parser_value "${lines[34]}") export_value2=$(func_parser_value "${lines[34]}")
inference_dir=$(func_parser_value "${lines[35]}")
# parser inference model # parser inference model
infer_model_dir_list=$(func_parser_value "${lines[36]}") infer_model_dir_list=$(func_parser_value "${lines[36]}")
...@@ -144,64 +87,44 @@ benchmark_key=$(func_parser_key "${lines[49]}") ...@@ -144,64 +87,44 @@ benchmark_key=$(func_parser_key "${lines[49]}")
benchmark_value=$(func_parser_value "${lines[49]}") benchmark_value=$(func_parser_value "${lines[49]}")
infer_key1=$(func_parser_key "${lines[50]}") infer_key1=$(func_parser_key "${lines[50]}")
infer_value1=$(func_parser_value "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}")
# parser serving
trans_model_py=$(func_parser_value "${lines[67]}")
infer_model_dir_key=$(func_parser_key "${lines[68]}")
infer_model_dir_value=$(func_parser_value "${lines[68]}")
model_filename_key=$(func_parser_key "${lines[69]}")
model_filename_value=$(func_parser_value "${lines[69]}")
params_filename_key=$(func_parser_key "${lines[70]}")
params_filename_value=$(func_parser_value "${lines[70]}")
serving_server_key=$(func_parser_key "${lines[71]}")
serving_server_value=$(func_parser_value "${lines[71]}")
serving_client_key=$(func_parser_key "${lines[72]}")
serving_client_value=$(func_parser_value "${lines[72]}")
serving_dir_value=$(func_parser_value "${lines[73]}")
web_service_py=$(func_parser_value "${lines[74]}")
web_use_gpu_key=$(func_parser_key "${lines[75]}")
web_use_gpu_list=$(func_parser_value "${lines[75]}")
web_use_mkldnn_key=$(func_parser_key "${lines[76]}")
web_use_mkldnn_list=$(func_parser_value "${lines[76]}")
web_cpu_threads_key=$(func_parser_key "${lines[77]}")
web_cpu_threads_list=$(func_parser_value "${lines[77]}")
web_use_trt_key=$(func_parser_key "${lines[78]}")
web_use_trt_list=$(func_parser_value "${lines[78]}")
web_precision_key=$(func_parser_key "${lines[79]}")
web_precision_list=$(func_parser_value "${lines[79]}")
pipeline_py=$(func_parser_value "${lines[80]}")
# parser klquant_infer
if [ ${MODE} = "cpp_infer" ]; then if [ ${MODE} = "klquant_whole_infer" ]; then
# parser cpp inference model dataline=$(awk 'NR==1 NR==17{print}' $FILENAME)
cpp_infer_model_dir_list=$(func_parser_value "${lines[53]}") lines=(${dataline})
cpp_infer_is_quant=$(func_parser_value "${lines[54]}") model_name=$(func_parser_value "${lines[1]}")
# parser cpp inference python=$(func_parser_value "${lines[2]}")
inference_cmd=$(func_parser_value "${lines[55]}") # parser inference model
cpp_use_gpu_key=$(func_parser_key "${lines[56]}") infer_model_dir_list=$(func_parser_value "${lines[3]}")
cpp_use_gpu_list=$(func_parser_value "${lines[56]}") infer_export_list=$(func_parser_value "${lines[4]}")
cpp_use_mkldnn_key=$(func_parser_key "${lines[57]}") infer_is_quant=$(func_parser_value "${lines[5]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[57]}") # parser inference
cpp_cpu_threads_key=$(func_parser_key "${lines[58]}") inference_py=$(func_parser_value "${lines[6]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[58]}") use_gpu_key=$(func_parser_key "${lines[7]}")
cpp_batch_size_key=$(func_parser_key "${lines[59]}") use_gpu_list=$(func_parser_value "${lines[7]}")
cpp_batch_size_list=$(func_parser_value "${lines[59]}") use_mkldnn_key=$(func_parser_key "${lines[8]}")
cpp_use_trt_key=$(func_parser_key "${lines[60]}") use_mkldnn_list=$(func_parser_value "${lines[8]}")
cpp_use_trt_list=$(func_parser_value "${lines[60]}") cpu_threads_key=$(func_parser_key "${lines[9]}")
cpp_precision_key=$(func_parser_key "${lines[61]}") cpu_threads_list=$(func_parser_value "${lines[9]}")
cpp_precision_list=$(func_parser_value "${lines[61]}") batch_size_key=$(func_parser_key "${lines[10]}")
cpp_infer_model_key=$(func_parser_key "${lines[62]}") batch_size_list=$(func_parser_value "${lines[10]}")
cpp_image_dir_key=$(func_parser_key "${lines[63]}") use_trt_key=$(func_parser_key "${lines[11]}")
cpp_infer_img_dir=$(func_parser_value "${lines[63]}") use_trt_list=$(func_parser_value "${lines[11]}")
cpp_infer_key1=$(func_parser_key "${lines[64]}") precision_key=$(func_parser_key "${lines[12]}")
cpp_infer_value1=$(func_parser_value "${lines[64]}") precision_list=$(func_parser_value "${lines[12]}")
cpp_benchmark_key=$(func_parser_key "${lines[65]}") infer_model_key=$(func_parser_key "${lines[13]}")
cpp_benchmark_value=$(func_parser_value "${lines[65]}") image_dir_key=$(func_parser_key "${lines[14]}")
infer_img_dir=$(func_parser_value "${lines[14]}")
save_log_key=$(func_parser_key "${lines[15]}")
benchmark_key=$(func_parser_key "${lines[16]}")
benchmark_value=$(func_parser_value "${lines[16]}")
infer_key1=$(func_parser_key "${lines[17]}")
infer_value1=$(func_parser_value "${lines[17]}")
fi fi
LOG_PATH="./test_tipc/output"
LOG_PATH="./tests/output"
mkdir -p ${LOG_PATH} mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log" status_log="${LOG_PATH}/results_python.log"
function func_inference(){ function func_inference(){
...@@ -221,18 +144,28 @@ function func_inference(){ ...@@ -221,18 +144,28 @@ function func_inference(){
fi fi
for threads in ${cpu_threads_list[*]}; do for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" for precision in ${precision_list[*]}; do
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") continue
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") fi # skip when enable fp16 but disable mkldnn
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") continue
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") fi # skip when quant model inference but precision is not int8
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " set_precision=$(func_set_params "${precision_key}" "${precision}")
eval $command
last_status=${PIPESTATUS[0]} _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
eval "cat ${_save_log_path}" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
status_check $last_status "${command}" "${status_log}" set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done done
done done
done done
...@@ -249,7 +182,7 @@ function func_inference(){ ...@@ -249,7 +182,7 @@ function func_inference(){
continue continue
fi fi
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
...@@ -271,150 +204,8 @@ function func_inference(){ ...@@ -271,150 +204,8 @@ function func_inference(){
fi fi
done done
} }
function func_serving(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
# pdserving
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd
cd ${serving_dir_value}
echo $PWD
unset https_proxy
unset http_proxy
for use_gpu in ${web_use_gpu_list[*]}; do
echo ${ues_gpu}
if [ ${use_gpu} = "null" ]; then
for use_mkldnn in ${web_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ]; then
continue
fi
for threads in ${web_cpu_threads_list[*]}; do
_save_log_path="${_log_path}/server_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &>${_save_log_path} &"
eval $web_service_cmd
sleep 2s
pipeline_cmd="${python} ${pipeline_py}"
eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}"
PID=$!
kill $PID
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done
done
elif [ ${use_gpu} = "0" ]; then
for use_trt in ${web_use_trt_list[*]}; do
for precision in ${web_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then
continue
fi
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${web_precision_key}" "${precision}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} &>${_save_log_path} & "
eval $web_service_cmd
sleep 2s
pipeline_cmd="${python} ${pipeline_py}"
eval $pipeline_cmd
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}"
PID=$!
kill $PID
sleep 2s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
function func_cpp_inference(){
IFS='|'
_script=$1
_model_dir=$2
_log_path=$3
_img_dir=$4
_flag_quant=$5
# inference
for use_gpu in ${cpp_use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpp_cpu_threads_list[*]}; do
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for use_trt in ${cpp_use_trt_list[*]}; do
for precision in ${cpp_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${cpp_precision_key}" "${precision}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
if [ ${MODE} = "infer" ]; then if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
GPUID=$3 GPUID=$3
if [ ${#GPUID} -le 0 ];then if [ ${#GPUID} -le 0 ];then
env=" " env=" "
...@@ -435,7 +226,7 @@ if [ ${MODE} = "infer" ]; then ...@@ -435,7 +226,7 @@ if [ ${MODE} = "infer" ]; then
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
echo ${infer_run_exports[Count]} echo ${infer_run_exports[Count]}
echo $export_cmd echo $export_cmd
eval $export_cmd eval $export_cmd
status_export=$? status_export=$?
status_check $status_export "${export_cmd}" "${status_log}" status_check $status_export "${export_cmd}" "${status_log}"
...@@ -444,50 +235,20 @@ if [ ${MODE} = "infer" ]; then ...@@ -444,50 +235,20 @@ if [ ${MODE} = "infer" ]; then
fi fi
#run inference #run inference
is_quant=${infer_quant_flag[Count]} is_quant=${infer_quant_flag[Count]}
if [ ${MODE} = "klquant_infer" ]; then
is_quant="True"
fi
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
Count=$(($Count + 1)) Count=$(($Count + 1))
done done
elif [ ${MODE} = "cpp_infer" ]; then
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
# set CUDA_VISIBLE_DEVICES
eval $env
export Count=0
IFS="|"
infer_quant_flag=(${cpp_infer_is_quant})
for infer_model in ${cpp_infer_model_dir_list[*]}; do
#run inference
is_quant=${infer_quant_flag[Count]}
func_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_infer_img_dir}" ${is_quant}
Count=$(($Count + 1))
done
elif [ ${MODE} = "serving_infer" ]; then
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
# set CUDA_VISIBLE_DEVICES
eval $env
export Count=0
IFS="|"
#run serving
func_serving "${web_service_cmd}"
else else
IFS="|" IFS="|"
export Count=0 export Count=0
USE_GPU_KEY=(${train_use_gpu_value}) USE_GPU_KEY=(${train_use_gpu_value})
for gpu in ${gpu_list[*]}; do for gpu in ${gpu_list[*]}; do
use_gpu=${USE_GPU_KEY[Count]} train_use_gpu=${USE_GPU_KEY[Count]}
Count=$(($Count + 1)) Count=$(($Count + 1))
ips=""
if [ ${gpu} = "-1" ];then if [ ${gpu} = "-1" ];then
env="" env=""
elif [ ${#gpu} -le 1 ];then elif [ ${#gpu} -le 1 ];then
...@@ -507,6 +268,11 @@ else ...@@ -507,6 +268,11 @@ else
env=" " env=" "
fi fi
for autocast in ${autocast_list[*]}; do for autocast in ${autocast_list[*]}; do
if [ ${autocast} = "amp" ]; then
set_amp_config="Global.use_amp=True Global.scale_loss=1024.0 Global.use_dynamic_loss_scaling=True"
else
set_amp_config=" "
fi
for trainer in ${trainer_list[*]}; do for trainer in ${trainer_list[*]}; do
flag_quant=False flag_quant=False
if [ ${trainer} = ${pact_key} ]; then if [ ${trainer} = ${pact_key} ]; then
...@@ -533,27 +299,35 @@ else ...@@ -533,27 +299,35 @@ else
if [ ${run_train} = "null" ]; then if [ ${run_train} = "null" ]; then
continue continue
fi fi
set_autocast=$(func_set_params "${autocast_key}" "${autocast}") set_autocast=$(func_set_params "${autocast_key}" "${autocast}")
set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}") set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}")
set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}")
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}") set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu}")
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" if [ ${#ips} -le 26 ];then
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
nodes=1
else
IFS=","
ips_array=(${ips})
IFS="|"
nodes=${#ips_array[@]}
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}"
fi
# load pretrain from norm training if current trainer is pact or fpgm trainer # load pretrain from norm training if current trainer is pact or fpgm trainer
if [ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]; then if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then
set_pretrain="${load_norm_train_model}" set_pretrain="${load_norm_train_model}"
fi fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}") set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} "
elif [ ${#gpu} -le 15 ];then # train with multi-gpu elif [ ${#ips} -le 26 ];then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}"
else # train with multi-machine else # train with multi-machine
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}"
fi fi
# run train # run train
eval "unset CUDA_VISIBLE_DEVICES" eval "unset CUDA_VISIBLE_DEVICES"
...@@ -562,7 +336,7 @@ else ...@@ -562,7 +336,7 @@ else
set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}")
# save norm trained models to set pretrain for pact training and fpgm training # save norm trained models to set pretrain for pact training and fpgm training
if [ ${trainer} = ${trainer_norm} ]; then if [ ${trainer} = ${trainer_norm} ] && [ ${nodes} -le 1 ]; then
load_norm_train_model=${set_eval_pretrain} load_norm_train_model=${set_eval_pretrain}
fi fi
# run eval # run eval
...@@ -585,10 +359,17 @@ else ...@@ -585,10 +359,17 @@ else
#run inference #run inference
eval $env eval $env
save_infer_path="${save_log}" save_infer_path="${save_log}"
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" if [[ ${inference_dir} != "null" ]] && [[ ${inference_dir} != '##' ]]; then
infer_model_dir="${save_infer_path}/${inference_dir}"
else
infer_model_dir=${save_infer_path}
fi
func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}"
eval "unset CUDA_VISIBLE_DEVICES" eval "unset CUDA_VISIBLE_DEVICES"
fi fi
done # done with: for trainer in ${trainer_list[*]}; do done # done with: for trainer in ${trainer_list[*]}; do
done # done with: for autocast in ${autocast_list[*]}; do done # done with: for autocast in ${autocast_list[*]}; do
done # done with: for gpu in ${gpu_list[*]}; do done # done with: for gpu in ${gpu_list[*]}; do
fi # end if [ ${MODE} = "infer" ]; then fi # end if [ ${MODE} = "infer" ]; then
===========================train_params===========================
model_name:ocr_server_rec
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_infer=2|whole_train_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_infer=128|whole_train_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./inference/rec_inference
null:null
##
trainer:norm_train|pact_train
norm_train:tools/train.py -c tests/configs/rec_icdar15_r34_train.yml -o
pact_train:deploy/slim/quantization/quant.py -c tests/configs/rec_icdar15_r34_train.yml -o
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c tests/configs/rec_icdar15_r34_train.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c tests/configs/rec_icdar15_r34_train.yml -o
quant_export:deploy/slim/quantization/export_model.py -c tests/configs/rec_icdar15_r34_train.yml -o
fpgm_export:null
distill_export:null
export1:null
export2:null
##
infer_model:./inference/ch_ppocr_server_v2.0_rec_infer/
infer_export:null
infer_quant:False
inference:tools/infer/predict_rec.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1|6
--use_tensorrt:True|False
--precision:fp32|fp16|int8
--rec_model_dir:
--image_dir:./inference/rec_inference
--save_log_path:./test/output/
--benchmark:True
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_server_v2.0_rec_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr rec
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--rec_model_dir:
--image_dir:./inference/rec_inference/
null:null
--benchmark:True
===========================serving_params===========================
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_rec_server_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:null|0
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en
\ No newline at end of file
# 从训练到推理部署工具链测试方法介绍
test.sh和params.txt文件配合使用,完成OCR轻量检测和识别模型从训练到预测的流程测试。
# 安装依赖
- 安装PaddlePaddle >= 2.0
- 安装PaddleOCR依赖
```
pip3 install -r ../requirements.txt
```
- 安装autolog
```
git clone https://github.com/LDOUBLEV/AutoLog
cd AutoLog
pip3 install -r requirements.txt
python3 setup.py bdist_wheel
pip3 install ./dist/auto_log-1.0.0-py3-none-any.whl
cd ../
```
# 目录介绍
```bash
tests/
├── ocr_det_params.txt # 测试OCR检测模型的参数配置文件
├── ocr_rec_params.txt # 测试OCR识别模型的参数配置文件
├── ocr_ppocr_mobile_params.txt # 测试OCR检测+识别模型串联的参数配置文件
└── prepare.sh # 完成test.sh运行所需要的数据和模型下载
└── test.sh # 测试主程序
```
# 使用方法
test.sh包含四种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,分别是:
- 模式1:lite_train_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'lite_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'lite_train_infer'
```
- 模式2:whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理;
```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'whole_infer'
```
- 模式3:infer 不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度;
```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'infer'
# 用法1:
bash tests/test.sh ./tests/ocr_det_params.txt 'infer'
# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号
bash tests/test.sh ./tests/ocr_det_params.txt 'infer' '1'
```
- 模式4:whole_train_infer , CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度;
```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'whole_train_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'whole_train_infer'
```
- 模式5:cpp_infer , CE: 验证inference model的c++预测是否走通;
```shell
bash tests/prepare.sh ./tests/ocr_det_params.txt 'cpp_infer'
bash tests/test.sh ./tests/ocr_det_params.txt 'cpp_infer'
```
# 日志输出
最终在```tests/output```目录下生成.log后缀的日志文件
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
...@@ -27,7 +27,7 @@ from ppocr.data import build_dataloader ...@@ -27,7 +27,7 @@ from ppocr.data import build_dataloader
from ppocr.modeling.architectures import build_model from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric from ppocr.metrics import build_metric
from ppocr.utils.save_load import init_model, load_dygraph_params from ppocr.utils.save_load import load_model
from ppocr.utils.utility import print_dict from ppocr.utils.utility import print_dict
import tools.program as program import tools.program as program
...@@ -61,7 +61,7 @@ def main(): ...@@ -61,7 +61,7 @@ def main():
else: else:
model_type = None model_type = None
best_model_dict = load_dygraph_params(config, model, logger, None) best_model_dict = load_model(config, model)
if len(best_model_dict): if len(best_model_dict):
logger.info('metric in ckpt ***************') logger.info('metric in ckpt ***************')
for k, v in best_model_dict.items(): for k, v in best_model_dict.items():
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import pickle
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
from ppocr.data import build_dataloader
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import load_model
from ppocr.utils.utility import print_dict
import tools.program as program
def main():
global_config = config['Global']
# build dataloader
config['Eval']['dataset']['name'] = config['Train']['dataset']['name']
config['Eval']['dataset']['data_dir'] = config['Train']['dataset'][
'data_dir']
config['Eval']['dataset']['label_file_list'] = config['Train']['dataset'][
'label_file_list']
eval_dataloader = build_dataloader(config, 'Eval', device, logger)
# build post process
post_process_class = build_post_process(config['PostProcess'],
global_config)
# build model
# for rec algorithm
if hasattr(post_process_class, 'character'):
char_num = len(getattr(post_process_class, 'character'))
config['Architecture']["Head"]['out_channels'] = char_num
#set return_features = True
config['Architecture']["Head"]["return_feats"] = True
model = build_model(config['Architecture'])
best_model_dict = load_model(config, model)
if len(best_model_dict):
logger.info('metric in ckpt ***************')
for k, v in best_model_dict.items():
logger.info('{}:{}'.format(k, v))
# get features from train data
char_center = program.get_center(model, eval_dataloader, post_process_class)
#serialize to disk
with open("train_center.pkl", 'wb') as f:
pickle.dump(char_center, f)
return
if __name__ == '__main__':
config, device, logger, vdl_writer = program.preprocess()
main()
...@@ -26,7 +26,7 @@ from paddle.jit import to_static ...@@ -26,7 +26,7 @@ from paddle.jit import to_static
from ppocr.modeling.architectures import build_model from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import init_model from ppocr.utils.save_load import load_model
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
from tools.program import load_config, merge_config, ArgsParser from tools.program import load_config, merge_config, ArgsParser
...@@ -107,7 +107,7 @@ def main(): ...@@ -107,7 +107,7 @@ def main():
else: # base rec model else: # base rec model
config["Architecture"]["Head"]["out_channels"] = char_num config["Architecture"]["Head"]["out_channels"] = char_num
model = build_model(config["Architecture"]) model = build_model(config["Architecture"])
init_model(config, model) load_model(config, model)
model.eval() model.eval()
save_path = config["Global"]["save_inference_dir"] save_path = config["Global"]["save_inference_dir"]
......
...@@ -47,6 +47,7 @@ class TextClassifier(object): ...@@ -47,6 +47,7 @@ class TextClassifier(object):
self.postprocess_op = build_post_process(postprocess_params) self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.input_tensor, self.output_tensors, _ = \ self.predictor, self.input_tensor, self.output_tensors, _ = \
utility.create_predictor(args, 'cls', logger) utility.create_predictor(args, 'cls', logger)
self.use_onnx = args.use_onnx
def resize_norm_img(self, img): def resize_norm_img(self, img):
imgC, imgH, imgW = self.cls_image_shape imgC, imgH, imgW = self.cls_image_shape
...@@ -100,10 +101,16 @@ class TextClassifier(object): ...@@ -100,10 +101,16 @@ class TextClassifier(object):
norm_img_batch = np.concatenate(norm_img_batch) norm_img_batch = np.concatenate(norm_img_batch)
norm_img_batch = norm_img_batch.copy() norm_img_batch = norm_img_batch.copy()
self.input_tensor.copy_from_cpu(norm_img_batch) if self.use_onnx:
self.predictor.run() input_dict = {}
prob_out = self.output_tensors[0].copy_to_cpu() input_dict[self.input_tensor.name] = norm_img_batch
self.predictor.try_shrink_memory() outputs = self.predictor.run(self.output_tensors, input_dict)
prob_out = outputs[0]
else:
self.input_tensor.copy_from_cpu(norm_img_batch)
self.predictor.run()
prob_out = self.output_tensors[0].copy_to_cpu()
self.predictor.try_shrink_memory()
cls_result = self.postprocess_op(prob_out) cls_result = self.postprocess_op(prob_out)
elapse += time.time() - starttime elapse += time.time() - starttime
for rno in range(len(cls_result)): for rno in range(len(cls_result)):
...@@ -131,14 +138,9 @@ def main(args): ...@@ -131,14 +138,9 @@ def main(args):
img_list.append(img) img_list.append(img)
try: try:
img_list, cls_res, predict_time = text_classifier(img_list) img_list, cls_res, predict_time = text_classifier(img_list)
except: except Exception as E:
logger.info(traceback.format_exc()) logger.info(traceback.format_exc())
logger.info( logger.info(E)
"ERROR!!!! \n"
"Please read the FAQ:https://github.com/PaddlePaddle/PaddleOCR#faq \n"
"If your model has tps module: "
"TPS does not support variable shape.\n"
"Please set --rec_image_shape='3,32,100' and --rec_char_type='en' ")
exit() exit()
for ino in range(len(img_list)): for ino in range(len(img_list)):
logger.info("Predicts of {}:{}".format(valid_image_file_list[ino], logger.info("Predicts of {}:{}".format(valid_image_file_list[ino],
......
...@@ -38,6 +38,7 @@ class TextDetector(object): ...@@ -38,6 +38,7 @@ class TextDetector(object):
def __init__(self, args): def __init__(self, args):
self.args = args self.args = args
self.det_algorithm = args.det_algorithm self.det_algorithm = args.det_algorithm
self.use_onnx = args.use_onnx
pre_process_list = [{ pre_process_list = [{
'DetResizeForTest': { 'DetResizeForTest': {
'limit_side_len': args.det_limit_side_len, 'limit_side_len': args.det_limit_side_len,
...@@ -100,7 +101,12 @@ class TextDetector(object): ...@@ -100,7 +101,12 @@ class TextDetector(object):
else: else:
logger.info("unknown det_algorithm:{}".format(self.det_algorithm)) logger.info("unknown det_algorithm:{}".format(self.det_algorithm))
sys.exit(0) sys.exit(0)
if self.use_onnx:
pre_process_list[0] = {
'DetResizeForTest': {
'image_shape': [640, 640]
}
}
self.preprocess_op = create_operators(pre_process_list) self.preprocess_op = create_operators(pre_process_list)
self.postprocess_op = build_post_process(postprocess_params) self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.input_tensor, self.output_tensors, self.config = utility.create_predictor( self.predictor, self.input_tensor, self.output_tensors, self.config = utility.create_predictor(
...@@ -198,15 +204,19 @@ class TextDetector(object): ...@@ -198,15 +204,19 @@ class TextDetector(object):
if self.args.benchmark: if self.args.benchmark:
self.autolog.times.stamp() self.autolog.times.stamp()
if self.use_onnx:
self.input_tensor.copy_from_cpu(img) input_dict = {}
self.predictor.run() input_dict[self.input_tensor.name] = img
outputs = [] outputs = self.predictor.run(self.output_tensors, input_dict)
for output_tensor in self.output_tensors: else:
output = output_tensor.copy_to_cpu() self.input_tensor.copy_from_cpu(img)
outputs.append(output) self.predictor.run()
if self.args.benchmark: outputs = []
self.autolog.times.stamp() for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if self.args.benchmark:
self.autolog.times.stamp()
preds = {} preds = {}
if self.det_algorithm == "EAST": if self.det_algorithm == "EAST":
......
...@@ -38,6 +38,7 @@ class TextE2E(object): ...@@ -38,6 +38,7 @@ class TextE2E(object):
def __init__(self, args): def __init__(self, args):
self.args = args self.args = args
self.e2e_algorithm = args.e2e_algorithm self.e2e_algorithm = args.e2e_algorithm
self.use_onnx = args.use_onnx
pre_process_list = [{ pre_process_list = [{
'E2EResizeForTest': {} 'E2EResizeForTest': {}
}, { }, {
...@@ -67,7 +68,6 @@ class TextE2E(object): ...@@ -67,7 +68,6 @@ class TextE2E(object):
postprocess_params["character_dict_path"] = args.e2e_char_dict_path postprocess_params["character_dict_path"] = args.e2e_char_dict_path
postprocess_params["valid_set"] = args.e2e_pgnet_valid_set postprocess_params["valid_set"] = args.e2e_pgnet_valid_set
postprocess_params["mode"] = args.e2e_pgnet_mode postprocess_params["mode"] = args.e2e_pgnet_mode
self.e2e_pgnet_polygon = args.e2e_pgnet_polygon
else: else:
logger.info("unknown e2e_algorithm:{}".format(self.e2e_algorithm)) logger.info("unknown e2e_algorithm:{}".format(self.e2e_algorithm))
sys.exit(0) sys.exit(0)
...@@ -106,21 +106,31 @@ class TextE2E(object): ...@@ -106,21 +106,31 @@ class TextE2E(object):
img = img.copy() img = img.copy()
starttime = time.time() starttime = time.time()
self.input_tensor.copy_from_cpu(img) if self.use_onnx:
self.predictor.run() input_dict = {}
outputs = [] input_dict[self.input_tensor.name] = img
for output_tensor in self.output_tensors: outputs = self.predictor.run(self.output_tensors, input_dict)
output = output_tensor.copy_to_cpu() preds = {}
outputs.append(output)
preds = {}
if self.e2e_algorithm == 'PGNet':
preds['f_border'] = outputs[0] preds['f_border'] = outputs[0]
preds['f_char'] = outputs[1] preds['f_char'] = outputs[1]
preds['f_direction'] = outputs[2] preds['f_direction'] = outputs[2]
preds['f_score'] = outputs[3] preds['f_score'] = outputs[3]
else: else:
raise NotImplementedError self.input_tensor.copy_from_cpu(img)
self.predictor.run()
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
preds = {}
if self.e2e_algorithm == 'PGNet':
preds['f_border'] = outputs[0]
preds['f_char'] = outputs[1]
preds['f_direction'] = outputs[2]
preds['f_score'] = outputs[3]
else:
raise NotImplementedError
post_result = self.postprocess_op(preds, shape_list) post_result = self.postprocess_op(preds, shape_list)
points, strs = post_result['points'], post_result['texts'] points, strs = post_result['points'], post_result['texts']
dt_boxes = self.filter_tag_det_res_only_clip(points, ori_im.shape) dt_boxes = self.filter_tag_det_res_only_clip(points, ori_im.shape)
...@@ -141,7 +151,6 @@ if __name__ == "__main__": ...@@ -141,7 +151,6 @@ if __name__ == "__main__":
img, flag = check_and_read_gif(image_file) img, flag = check_and_read_gif(image_file)
if not flag: if not flag:
img = cv2.imread(image_file) img = cv2.imread(image_file)
img = img[:, :, ::-1]
if img is None: if img is None:
logger.info("error in loading image:{}".format(image_file)) logger.info("error in loading image:{}".format(image_file))
continue continue
......
...@@ -38,40 +38,34 @@ logger = get_logger() ...@@ -38,40 +38,34 @@ logger = get_logger()
class TextRecognizer(object): class TextRecognizer(object):
def __init__(self, args): def __init__(self, args):
self.rec_image_shape = [int(v) for v in args.rec_image_shape.split(",")] self.rec_image_shape = [int(v) for v in args.rec_image_shape.split(",")]
self.character_type = args.rec_char_type
self.rec_batch_num = args.rec_batch_num self.rec_batch_num = args.rec_batch_num
self.rec_algorithm = args.rec_algorithm self.rec_algorithm = args.rec_algorithm
postprocess_params = { postprocess_params = {
'name': 'CTCLabelDecode', 'name': 'CTCLabelDecode',
"character_type": args.rec_char_type,
"character_dict_path": args.rec_char_dict_path, "character_dict_path": args.rec_char_dict_path,
"use_space_char": args.use_space_char "use_space_char": args.use_space_char
} }
if self.rec_algorithm == "SRN": if self.rec_algorithm == "SRN":
postprocess_params = { postprocess_params = {
'name': 'SRNLabelDecode', 'name': 'SRNLabelDecode',
"character_type": args.rec_char_type,
"character_dict_path": args.rec_char_dict_path, "character_dict_path": args.rec_char_dict_path,
"use_space_char": args.use_space_char "use_space_char": args.use_space_char
} }
elif self.rec_algorithm == "RARE": elif self.rec_algorithm == "RARE":
postprocess_params = { postprocess_params = {
'name': 'AttnLabelDecode', 'name': 'AttnLabelDecode',
"character_type": args.rec_char_type,
"character_dict_path": args.rec_char_dict_path, "character_dict_path": args.rec_char_dict_path,
"use_space_char": args.use_space_char "use_space_char": args.use_space_char
} }
elif self.rec_algorithm == 'NRTR': elif self.rec_algorithm == 'NRTR':
postprocess_params = { postprocess_params = {
'name': 'NRTRLabelDecode', 'name': 'NRTRLabelDecode',
"character_type": args.rec_char_type,
"character_dict_path": args.rec_char_dict_path, "character_dict_path": args.rec_char_dict_path,
"use_space_char": args.use_space_char "use_space_char": args.use_space_char
} }
elif self.rec_algorithm == "SAR": elif self.rec_algorithm == "SAR":
postprocess_params = { postprocess_params = {
'name': 'SARLabelDecode', 'name': 'SARLabelDecode',
"character_type": args.rec_char_type,
"character_dict_path": args.rec_char_dict_path, "character_dict_path": args.rec_char_dict_path,
"use_space_char": args.use_space_char "use_space_char": args.use_space_char
} }
...@@ -79,6 +73,7 @@ class TextRecognizer(object): ...@@ -79,6 +73,7 @@ class TextRecognizer(object):
self.predictor, self.input_tensor, self.output_tensors, self.config = \ self.predictor, self.input_tensor, self.output_tensors, self.config = \
utility.create_predictor(args, 'rec', logger) utility.create_predictor(args, 'rec', logger)
self.benchmark = args.benchmark self.benchmark = args.benchmark
self.use_onnx = args.use_onnx
if args.benchmark: if args.benchmark:
import auto_log import auto_log
pid = os.getpid() pid = os.getpid()
...@@ -96,7 +91,7 @@ class TextRecognizer(object): ...@@ -96,7 +91,7 @@ class TextRecognizer(object):
time_keys=[ time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time' 'preprocess_time', 'inference_time', 'postprocess_time'
], ],
warmup=2, warmup=0,
logger=logger) logger=logger)
def resize_norm_img(self, img, max_wh_ratio): def resize_norm_img(self, img, max_wh_ratio):
...@@ -112,8 +107,9 @@ class TextRecognizer(object): ...@@ -112,8 +107,9 @@ class TextRecognizer(object):
return norm_img.astype(np.float32) / 128. - 1. return norm_img.astype(np.float32) / 128. - 1.
assert imgC == img.shape[2] assert imgC == img.shape[2]
max_wh_ratio = max(max_wh_ratio, imgW / imgH)
imgW = int((32 * max_wh_ratio)) imgW = int((32 * max_wh_ratio))
if self.use_onnx:
imgW = 100
h, w = img.shape[:2] h, w = img.shape[:2]
ratio = w / float(h) ratio = w / float(h)
if math.ceil(imgH * ratio) > imgW: if math.ceil(imgH * ratio) > imgW:
...@@ -303,51 +299,72 @@ class TextRecognizer(object): ...@@ -303,51 +299,72 @@ class TextRecognizer(object):
gsrm_slf_attn_bias1_list, gsrm_slf_attn_bias1_list,
gsrm_slf_attn_bias2_list, gsrm_slf_attn_bias2_list,
] ]
input_names = self.predictor.get_input_names() if self.use_onnx:
for i in range(len(input_names)): input_dict = {}
input_tensor = self.predictor.get_input_handle(input_names[ input_dict[self.input_tensor.name] = norm_img_batch
i]) outputs = self.predictor.run(self.output_tensors,
input_tensor.copy_from_cpu(inputs[i]) input_dict)
self.predictor.run() preds = {"predict": outputs[2]}
outputs = [] else:
for output_tensor in self.output_tensors: input_names = self.predictor.get_input_names()
output = output_tensor.copy_to_cpu() for i in range(len(input_names)):
outputs.append(output) input_tensor = self.predictor.get_input_handle(
if self.benchmark: input_names[i])
self.autolog.times.stamp() input_tensor.copy_from_cpu(inputs[i])
preds = {"predict": outputs[2]} self.predictor.run()
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
preds = {"predict": outputs[2]}
elif self.rec_algorithm == "SAR": elif self.rec_algorithm == "SAR":
valid_ratios = np.concatenate(valid_ratios) valid_ratios = np.concatenate(valid_ratios)
inputs = [ inputs = [
norm_img_batch, norm_img_batch,
valid_ratios, valid_ratios,
] ]
input_names = self.predictor.get_input_names() if self.use_onnx:
for i in range(len(input_names)): input_dict = {}
input_tensor = self.predictor.get_input_handle(input_names[ input_dict[self.input_tensor.name] = norm_img_batch
i]) outputs = self.predictor.run(self.output_tensors,
input_tensor.copy_from_cpu(inputs[i]) input_dict)
self.predictor.run() preds = outputs[0]
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
preds = outputs[0]
else:
self.input_tensor.copy_from_cpu(norm_img_batch)
self.predictor.run()
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
if len(outputs) != 1:
preds = outputs
else: else:
input_names = self.predictor.get_input_names()
for i in range(len(input_names)):
input_tensor = self.predictor.get_input_handle(
input_names[i])
input_tensor.copy_from_cpu(inputs[i])
self.predictor.run()
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
preds = outputs[0] preds = outputs[0]
else:
if self.use_onnx:
input_dict = {}
input_dict[self.input_tensor.name] = norm_img_batch
outputs = self.predictor.run(self.output_tensors,
input_dict)
preds = outputs[0]
else:
self.input_tensor.copy_from_cpu(norm_img_batch)
self.predictor.run()
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if self.benchmark:
self.autolog.times.stamp()
if len(outputs) != 1:
preds = outputs
else:
preds = outputs[0]
rec_result = self.postprocess_op(preds) rec_result = self.postprocess_op(preds)
for rno in range(len(rec_result)): for rno in range(len(rec_result)):
rec_res[indices[beg_img_no + rno]] = rec_result[rno] rec_res[indices[beg_img_no + rno]] = rec_result[rno]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment