Unverified Commit 7ed7d859 authored by andyjpaddle's avatar andyjpaddle Committed by GitHub
Browse files

Merge branch 'PaddlePaddle:release/2.5' into onnx

parents bd61fb5e d8a8ca81
...@@ -115,4 +115,4 @@ ValueError: The results of python_infer_gpu_usetrt_True_precision_fp32_batchsize ...@@ -115,4 +115,4 @@ ValueError: The results of python_infer_gpu_usetrt_True_precision_fp32_batchsize
## 3. 更多教程 ## 3. 更多教程
本文档为功能测试用,更丰富的训练预测使用教程请参考: 本文档为功能测试用,更丰富的训练预测使用教程请参考:
[模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md)
[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) [基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md)
...@@ -152,4 +152,4 @@ ValueError: The results of python_infer_cpu_usemkldnn_False_threads_1_batchsize_ ...@@ -152,4 +152,4 @@ ValueError: The results of python_infer_cpu_usemkldnn_False_threads_1_batchsize_
## 3. 更多教程 ## 3. 更多教程
本文档为功能测试用,更丰富的训练预测使用教程请参考: 本文档为功能测试用,更丰富的训练预测使用教程请参考:
[模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md)
[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) [基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md)
...@@ -153,4 +153,4 @@ python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/python_*.tx ...@@ -153,4 +153,4 @@ python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/python_*.tx
## 3. 更多教程 ## 3. 更多教程
本文档为功能测试用,更丰富的训练预测使用教程请参考: 本文档为功能测试用,更丰富的训练预测使用教程请参考:
[模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md)
[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) [基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md)
...@@ -156,4 +156,4 @@ ValueError: The results of python_infer_cpu_usemkldnn_False_threads_1_batchsize_ ...@@ -156,4 +156,4 @@ ValueError: The results of python_infer_cpu_usemkldnn_False_threads_1_batchsize_
## 3. 更多教程 ## 3. 更多教程
本文档为功能测试用,更丰富的训练预测使用教程请参考: 本文档为功能测试用,更丰富的训练预测使用教程请参考:
[模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) [模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md)
[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) [基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_ppocr.md)
...@@ -17,7 +17,7 @@ import sys ...@@ -17,7 +17,7 @@ import sys
__dir__ = os.path.dirname(os.path.abspath(__file__)) __dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__) sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, ".."))) sys.path.insert(0, os.path.abspath(os.path.join(__dir__, "..")))
import argparse import argparse
......
...@@ -34,6 +34,7 @@ def init_args(): ...@@ -34,6 +34,7 @@ def init_args():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
# params for prediction engine # params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True) parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--use_xpu", type=str2bool, default=False)
parser.add_argument("--ir_optim", type=str2bool, default=True) parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False) parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--min_subgraph_size", type=int, default=15) parser.add_argument("--min_subgraph_size", type=int, default=15)
...@@ -285,6 +286,8 @@ def create_predictor(args, mode, logger): ...@@ -285,6 +286,8 @@ def create_predictor(args, mode, logger):
config.set_trt_dynamic_shape_info( config.set_trt_dynamic_shape_info(
min_input_shape, max_input_shape, opt_input_shape) min_input_shape, max_input_shape, opt_input_shape)
elif args.use_xpu:
config.enable_xpu(10 * 1024 * 1024)
else: else:
config.disable_gpu() config.disable_gpu()
if hasattr(args, "cpu_threads"): if hasattr(args, "cpu_threads"):
......
...@@ -157,7 +157,7 @@ def main(): ...@@ -157,7 +157,7 @@ def main():
if info is not None: if info is not None:
logger.info("\t result: {}".format(info)) logger.info("\t result: {}".format(info))
fout.write(file + "\t" + info) fout.write(file + "\t" + info + "\n")
logger.info("success!") logger.info("success!")
......
...@@ -112,20 +112,25 @@ def merge_config(config, opts): ...@@ -112,20 +112,25 @@ def merge_config(config, opts):
return config return config
def check_gpu(use_gpu): def check_device(use_gpu, use_xpu=False):
""" """
Log error and exit when set use_gpu=true in paddlepaddle Log error and exit when set use_gpu=true in paddlepaddle
cpu version. cpu version.
""" """
err = "Config use_gpu cannot be set as true while you are " \ err = "Config {} cannot be set as true while your paddle " \
"using paddlepaddle cpu version ! \nPlease try: \n" \ "is not compiled with {} ! \nPlease try: \n" \
"\t1. Install paddlepaddle-gpu to run model on GPU \n" \ "\t1. Install paddlepaddle to run model on {} \n" \
"\t2. Set use_gpu as false in config file to run " \ "\t2. Set {} as false in config file to run " \
"model on CPU" "model on CPU"
try: try:
if use_gpu and use_xpu:
print("use_xpu and use_gpu can not both be ture.")
if use_gpu and not paddle.is_compiled_with_cuda(): if use_gpu and not paddle.is_compiled_with_cuda():
print(err) print(err.format("use_gpu", "cuda", "gpu", "use_gpu"))
sys.exit(1)
if use_xpu and not paddle.device.is_compiled_with_xpu():
print(err.format("use_xpu", "xpu", "xpu", "use_xpu"))
sys.exit(1) sys.exit(1)
except Exception as e: except Exception as e:
pass pass
...@@ -301,6 +306,7 @@ def train(config, ...@@ -301,6 +306,7 @@ def train(config,
stats['lr'] = lr stats['lr'] = lr
train_stats.update(stats) train_stats.update(stats)
if log_writer is not None and dist.get_rank() == 0: if log_writer is not None and dist.get_rank() == 0:
log_writer.log_metrics(metrics=train_stats.get(), prefix="TRAIN", step=global_step) log_writer.log_metrics(metrics=train_stats.get(), prefix="TRAIN", step=global_step)
...@@ -547,7 +553,7 @@ def preprocess(is_train=False): ...@@ -547,7 +553,7 @@ def preprocess(is_train=False):
# check if set use_gpu=True in paddlepaddle cpu version # check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu'] use_gpu = config['Global']['use_gpu']
check_gpu(use_gpu) use_xpu = config['Global'].get('use_xpu', False)
# check if set use_xpu=True in paddlepaddle cpu/gpu version # check if set use_xpu=True in paddlepaddle cpu/gpu version
use_xpu = False use_xpu = False
...@@ -562,11 +568,13 @@ def preprocess(is_train=False): ...@@ -562,11 +568,13 @@ def preprocess(is_train=False):
'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM', 'PREN', 'FCE', 'SVTR' 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM', 'PREN', 'FCE', 'SVTR'
] ]
device = 'cpu'
if use_gpu:
device = 'gpu:{}'.format(dist.ParallelEnv().dev_id)
if use_xpu: if use_xpu:
device = 'xpu' device = 'xpu:{0}'.format(os.getenv('FLAGS_selected_xpus', 0))
else:
device = 'gpu:{}'.format(dist.ParallelEnv()
.dev_id) if use_gpu else 'cpu'
check_device(use_gpu, use_xpu)
device = paddle.set_device(device) device = paddle.set_device(device)
config['Global']['distributed'] = dist.get_world_size() != 1 config['Global']['distributed'] = dist.get_world_size() != 1
......
...@@ -35,6 +35,7 @@ from ppocr.postprocess import build_post_process ...@@ -35,6 +35,7 @@ from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric from ppocr.metrics import build_metric
from ppocr.utils.save_load import load_model from ppocr.utils.save_load import load_model
from ppocr.utils.utility import set_seed from ppocr.utils.utility import set_seed
from ppocr.modeling.architectures import apply_to_static
import tools.program as program import tools.program as program
dist.get_world_size() dist.get_world_size()
...@@ -121,6 +122,8 @@ def main(config, device, logger, vdl_writer): ...@@ -121,6 +122,8 @@ def main(config, device, logger, vdl_writer):
if config['Global']['distributed']: if config['Global']['distributed']:
model = paddle.DataParallel(model) model = paddle.DataParallel(model)
model = apply_to_static(model, config, logger)
# build loss # build loss
loss_class = build_loss(config['Loss']) loss_class = build_loss(config['Loss'])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment