Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
12530a99
Commit
12530a99
authored
Nov 10, 2021
by
LDOUBLEV
Browse files
Merge branch 'dygraph' of
https://github.com/PaddlePaddle/PaddleOCR
into test_v10
parents
f322e377
a8960021
Changes
34
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
171 additions
and
65 deletions
+171
-65
test_tipc/configs/ppocr_det_server_params.txt
test_tipc/configs/ppocr_det_server_params.txt
+2
-1
test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+12
-0
test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+18
-0
test_tipc/configs/ppocr_rec_mobile_params.txt
test_tipc/configs/ppocr_rec_mobile_params.txt
+2
-1
test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+12
-0
test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+18
-0
test_tipc/configs/ppocr_rec_server_params.txt
test_tipc/configs/ppocr_rec_server_params.txt
+2
-1
test_tipc/configs/win_ppocr_det_mobile_params.txt
test_tipc/configs/win_ppocr_det_mobile_params.txt
+2
-1
test_tipc/docs/test_paddle2onnx.md
test_tipc/docs/test_paddle2onnx.md
+2
-2
test_tipc/docs/test_serving.md
test_tipc/docs/test_serving.md
+2
-2
test_tipc/test_lite.sh
test_tipc/test_lite.sh
+1
-1
test_tipc/test_paddle2onnx.sh
test_tipc/test_paddle2onnx.sh
+1
-1
test_tipc/test_serving.sh
test_tipc/test_serving.sh
+81
-48
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+16
-7
No files found.
test_tipc/configs/ppocr_det_server_params.txt
View file @
12530a99
...
...
@@ -80,4 +80,5 @@ op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs
test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
0 → 100644
View file @
12530a99
===========================paddle2onnx_params===========================
2onnx: paddle2onnx
--model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./inference/rec_mobile_onnx/model.onnx
--opset_version:10
--enable_onnx_checker:True
inference:tools/infer/predict_rec.py
--use_gpu:True|False
--rec_model_dir:
--image_dir:./inference/rec_inference
\ No newline at end of file
test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
0 → 100644
View file @
12530a99
===========================serving_params===========================
model_name:ocr_rec_mobile
python:python3.7|cpp
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:null|0
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs_words_en
\ No newline at end of file
test_tipc/configs/ppocr_rec_mobile_params.txt
View file @
12530a99
...
...
@@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en
pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs_words_en
test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
0 → 100644
View file @
12530a99
===========================paddle2onnx_params===========================
2onnx: paddle2onnx
--model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--save_file:./inference/rec_server_onnx/model.onnx
--opset_version:10
--enable_onnx_checker:True
inference:tools/infer/predict_rec.py
--use_gpu:True|False
--rec_model_dir:
--image_dir:./inference/rec_inference
\ No newline at end of file
test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
0 → 100644
View file @
12530a99
===========================serving_params===========================
model_name:ocr_rec_server
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_rec_server_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:null|0
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs_words_en
\ No newline at end of file
test_tipc/configs/ppocr_rec_server_params.txt
View file @
12530a99
...
...
@@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en
pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs_words_en
test_tipc/configs/win_ppocr_det_mobile_params.txt
View file @
12530a99
...
...
@@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
pipline:pipeline_http_client.py|pipeline_rpc_client.py
--image_dir=../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
...
...
test_tipc/docs/test_paddle2onnx.md
View file @
12530a99
...
...
@@ -18,10 +18,10 @@ PaddleServing预测功能测试的主程序为`test_paddle2onnx.sh`,可以测
先运行
`prepare.sh`
准备数据和模型,然后运行
`test_paddle2onnx.sh`
进行测试,最终在
```test_tipc/output```
目录下生成
`paddle2onnx_infer_*.log`
后缀的日志文件。
```
shell
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile
_params
.txt
"paddle2onnx_infer"
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile
/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu
.txt
"paddle2onnx_infer"
# 用法:
bash test_tipc/test_paddle2onnx.sh ./test_tipc/configs/ppocr_det_mobile
_params
.txt
bash test_tipc/test_paddle2onnx.sh ./test_tipc/configs/ppocr_det_mobile
/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu
.txt
```
#### 运行结果
...
...
test_tipc/docs/test_serving.md
View file @
12530a99
...
...
@@ -20,10 +20,10 @@ PaddleServing预测功能测试的主程序为`test_serving.sh`,可以测试
先运行
`prepare.sh`
准备数据和模型,然后运行
`test_serving.sh`
进行测试,最终在
```test_tipc/output```
目录下生成
`serving_infer_*.log`
后缀的日志文件。
```
shell
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile
_params
.txt
"serving_infer"
bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile
/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu
.txt
"serving_infer"
# 用法:
bash test_tipc/test_serving.sh ./test_tipc/configs/ppocr_det_mobile
_params
.txt
bash test_tipc/test_serving.sh ./test_tipc/configs/ppocr_det_mobile
/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu
.txt
```
#### 运行结果
...
...
test_tipc/test_lite.sh
View file @
12530a99
...
...
@@ -3,7 +3,7 @@ source ./common_func.sh
export
LD_LIBRARY_PATH
=
${
PWD
}
:
$LD_LIBRARY_PATH
FILENAME
=
$1
dataline
=
$(
awk
'NR==10
1
, NR==11
0
{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==10
2
, NR==11
1
{print}'
$FILENAME
)
echo
$dataline
# parser params
IFS
=
$'
\n
'
...
...
test_tipc/test_paddle2onnx.sh
View file @
12530a99
...
...
@@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}")
# parser params
dataline
=
$(
awk
'NR==1
11
, NR==12
3
{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==1, NR==12{print}'
$FILENAME
)
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
...
...
test_tipc/test_serving.sh
View file @
12530a99
...
...
@@ -2,7 +2,7 @@
source
test_tipc/common_func.sh
FILENAME
=
$1
dataline
=
$(
awk
'NR==
67
, NR==8
3
{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==
1
, NR==
1
8{print}'
$FILENAME
)
# parser params
IFS
=
$'
\n
'
...
...
@@ -35,6 +35,8 @@ web_use_trt_list=$(func_parser_value "${lines[14]}")
web_precision_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
web_precision_list
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
pipeline_py
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[17]
}
"
)
image_dir_value
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
LOG_PATH
=
"../../test_tipc/output"
mkdir
-p
./test_tipc/output
...
...
@@ -51,67 +53,98 @@ function func_serving(){
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
serving_server_key
}
"
"
${
serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
serving_client_key
}
"
"
${
serving_client_value
}
"
)
set_image_dir
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
image_dir_value
}
"
)
trans_model_cmd
=
"
${
python
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
eval
$trans_model_cmd
cd
${
serving_dir_value
}
echo
$PWD
unset
https_proxy
unset
http_proxy
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
echo
${
ues_gpu
}
if
[
${
use_gpu
}
=
"null"
]
;
then
for
use_mkldnn
in
${
web_use_mkldnn_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"False"
]
;
then
continue
fi
for
threads
in
${
web_cpu_threads_list
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
set_cpu_threads
=
$(
func_set_params
"
${
web_cpu_threads_key
}
"
"
${
threads
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
web_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
&"
eval
$web_service_cmd
sleep
2s
pipeline_cmd
=
"
${
python
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1 "
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
PID
=
$!
kill
$PID
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
done
done
elif
[
${
use_gpu
}
=
"0"
]
;
then
for
use_trt
in
${
web_use_trt_list
[*]
}
;
do
for
precision
in
${
web_precision_list
[*]
}
;
do
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
fi
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
fi
if
[[
${
use_trt
}
=
"False"
||
${
precision
}
=
~
"int8"
]]
&&
[[
${
_flag_quant
}
=
"True"
]]
;
then
continue
fi
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
set_tensorrt
=
$(
func_set_params
"
${
web_use_trt_key
}
"
"
${
use_trt
}
"
)
set_precision
=
$(
func_set_params
"
${
web_precision_key
}
"
"
${
precision
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
& "
for
python
in
${
python
[*]
}
;
do
if
[
${
python
}
=
"cpp"
]
;
then
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"null"
]
;
then
web_service_cpp_cmd
=
"
${
python
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval
$web_service_cmd
sleep
2s
pipeline_cmd
=
"
${
python
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
PID
=
$!
kill
$PID
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
done
else
web_service_cpp_cmd
=
"
${
python
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval
$web_service_cmd
sleep
2s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
fi
done
else
echo
"Does not support hardware other than CPU and GPU Currently!"
# python serving
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
echo
${
ues_gpu
}
if
[
${
use_gpu
}
=
"null"
]
;
then
for
use_mkldnn
in
${
web_use_mkldnn_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"False"
]
;
then
continue
fi
for
threads
in
${
web_cpu_threads_list
[*]
}
;
do
set_cpu_threads
=
$(
func_set_params
"
${
web_cpu_threads_key
}
"
"
${
threads
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
web_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
&"
eval
$web_service_cmd
sleep
2s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
${
set_image_dir
}
>
${
_save_log_path
}
2>&1 "
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
sleep
2s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
done
done
elif
[
${
use_gpu
}
=
"0"
]
;
then
for
use_trt
in
${
web_use_trt_list
[*]
}
;
do
for
precision
in
${
web_precision_list
[*]
}
;
do
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
fi
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
fi
if
[[
${
use_trt
}
=
"False"
||
${
precision
}
=
~
"int8"
]]
&&
[[
${
_flag_quant
}
=
"True"
]]
;
then
continue
fi
set_tensorrt
=
$(
func_set_params
"
${
web_use_trt_key
}
"
"
${
use_trt
}
"
)
set_precision
=
$(
func_set_params
"
${
web_precision_key
}
"
"
${
precision
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
& "
eval
$web_service_cmd
sleep
2s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_gpu_
${
pipeline
%_client*
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
${
set_image_dir
}
>
${
_save_log_path
}
2>&1"
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
sleep
2s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
done
done
else
echo
"Does not support hardware other than CPU and GPU Currently!"
fi
done
fi
done
}
...
...
test_tipc/test_train_inference_python.sh
View file @
12530a99
...
...
@@ -90,7 +90,7 @@ infer_value1=$(func_parser_value "${lines[50]}")
# parser klquant_infer
if
[
${
MODE
}
=
"klquant_whole_infer"
]
;
then
dataline
=
$(
awk
'NR==8
2,
NR==
98
{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==8
5
NR==
101
{print}'
$FILENAME
)
lines
=(
${
dataline
}
)
# parser inference model
infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
...
...
@@ -244,7 +244,7 @@ else
export
Count
=
0
USE_GPU_KEY
=(
${
train_use_gpu_value
}
)
for
gpu
in
${
gpu_list
[*]
}
;
do
use_gpu
=
${
USE_GPU_KEY
[Count]
}
train_
use_gpu
=
${
USE_GPU_KEY
[Count]
}
Count
=
$((
$Count
+
1
))
ips
=
""
if
[
${
gpu
}
=
"-1"
]
;
then
...
...
@@ -302,11 +302,20 @@ else
set_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
pretrain_model_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
train_batch_key
}
"
"
${
train_batch_value
}
"
)
set_train_params1
=
$(
func_set_params
"
${
train_param_key1
}
"
"
${
train_param_value1
}
"
)
set_use_gpu
=
$(
func_set_params
"
${
train_use_gpu_key
}
"
"
${
use_gpu
}
"
)
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
"
set_use_gpu
=
$(
func_set_params
"
${
train_use_gpu_key
}
"
"
${
train_use_gpu
}
"
)
if
[
${#
ips
}
-le
26
]
;
then
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
"
nodes
=
1
else
IFS
=
","
ips_array
=(
${
ips
}
)
IFS
=
"|"
nodes
=
${#
ips_array
[@]
}
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
"
fi
# load pretrain from norm training if current trainer is pact or fpgm trainer
if
[
${
trainer
}
=
${
pact_key
}
]
||
[
${
trainer
}
=
${
fpgm_key
}
]
;
then
if
(
[
${
trainer
}
=
${
pact_key
}
]
||
[
${
trainer
}
=
${
fpgm_key
}
])
&&
[
${
nodes
}
-le
1
]
;
then
set_pretrain
=
"
${
load_norm_train_model
}
"
fi
...
...
@@ -325,7 +334,7 @@ else
set_eval_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
save_log
}
/
${
train_model_name
}
"
)
# save norm trained models to set pretrain for pact training and fpgm training
if
[
${
trainer
}
=
${
trainer_norm
}
]
;
then
if
[
${
trainer
}
=
${
trainer_norm
}
]
&&
[
${
nodes
}
-le
1]
;
then
load_norm_train_model
=
${
set_eval_pretrain
}
fi
# run eval
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment