Commit 8485edfc authored by tink2123's avatar tink2123
Browse files

Merge branch 'dygraph' of https://github.com/PaddlePaddle/PaddleOCR into paddle2onnx

parents 48a6ebad 0b37c118
...@@ -11,6 +11,10 @@ ...@@ -11,6 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/attention_recognition_head.py
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
......
...@@ -75,7 +75,7 @@ class AttentionHead(nn.Layer): ...@@ -75,7 +75,7 @@ class AttentionHead(nn.Layer):
probs_step, axis=1)], axis=1) probs_step, axis=1)], axis=1)
next_input = probs_step.argmax(axis=1) next_input = probs_step.argmax(axis=1)
targets = next_input targets = next_input
probs = paddle.nn.functional.softmax(probs, axis=2)
return probs return probs
......
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is refer from:
https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/encoders/sar_encoder.py
https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/decoders/sar_decoder.py
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
...@@ -275,7 +294,6 @@ class ParallelSARDecoder(BaseDecoder): ...@@ -275,7 +294,6 @@ class ParallelSARDecoder(BaseDecoder):
if img_metas is not None and self.mask: if img_metas is not None and self.mask:
valid_ratios = img_metas[-1] valid_ratios = img_metas[-1]
label = label.cuda()
lab_embedding = self.embedding(label) lab_embedding = self.embedding(label)
# bsz * seq_len * emb_dim # bsz * seq_len * emb_dim
out_enc = out_enc.unsqueeze(1) out_enc = out_enc.unsqueeze(1)
......
...@@ -11,64 +11,102 @@ ...@@ -11,64 +11,102 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/whai362/PSENet/blob/python3/models/neck/fpn.py
"""
import paddle.nn as nn import paddle.nn as nn
import paddle import paddle
import math import math
import paddle.nn.functional as F import paddle.nn.functional as F
class Conv_BN_ReLU(nn.Layer): class Conv_BN_ReLU(nn.Layer):
def __init__(self, in_planes, out_planes, kernel_size=1, stride=1, padding=0): def __init__(self,
in_planes,
out_planes,
kernel_size=1,
stride=1,
padding=0):
super(Conv_BN_ReLU, self).__init__() super(Conv_BN_ReLU, self).__init__()
self.conv = nn.Conv2D(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, self.conv = nn.Conv2D(
bias_attr=False) in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias_attr=False)
self.bn = nn.BatchNorm2D(out_planes, momentum=0.1) self.bn = nn.BatchNorm2D(out_planes, momentum=0.1)
self.relu = nn.ReLU() self.relu = nn.ReLU()
for m in self.sublayers(): for m in self.sublayers():
if isinstance(m, nn.Conv2D): if isinstance(m, nn.Conv2D):
n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels
m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', default_initializer=paddle.nn.initializer.Normal(0, math.sqrt(2. / n))) m.weight = paddle.create_parameter(
shape=m.weight.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Normal(
0, math.sqrt(2. / n)))
elif isinstance(m, nn.BatchNorm2D): elif isinstance(m, nn.BatchNorm2D):
m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', default_initializer=paddle.nn.initializer.Constant(1.0)) m.weight = paddle.create_parameter(
m.bias = paddle.create_parameter(shape=m.bias.shape, dtype='float32', default_initializer=paddle.nn.initializer.Constant(0.0)) shape=m.weight.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(1.0))
m.bias = paddle.create_parameter(
shape=m.bias.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(0.0))
def forward(self, x): def forward(self, x):
return self.relu(self.bn(self.conv(x))) return self.relu(self.bn(self.conv(x)))
class FPN(nn.Layer): class FPN(nn.Layer):
def __init__(self, in_channels, out_channels): def __init__(self, in_channels, out_channels):
super(FPN, self).__init__() super(FPN, self).__init__()
# Top layer # Top layer
self.toplayer_ = Conv_BN_ReLU(in_channels[3], out_channels, kernel_size=1, stride=1, padding=0) self.toplayer_ = Conv_BN_ReLU(
in_channels[3], out_channels, kernel_size=1, stride=1, padding=0)
# Lateral layers # Lateral layers
self.latlayer1_ = Conv_BN_ReLU(in_channels[2], out_channels, kernel_size=1, stride=1, padding=0) self.latlayer1_ = Conv_BN_ReLU(
in_channels[2], out_channels, kernel_size=1, stride=1, padding=0)
self.latlayer2_ = Conv_BN_ReLU(in_channels[1], out_channels, kernel_size=1, stride=1, padding=0) self.latlayer2_ = Conv_BN_ReLU(
in_channels[1], out_channels, kernel_size=1, stride=1, padding=0)
self.latlayer3_ = Conv_BN_ReLU(in_channels[0], out_channels, kernel_size=1, stride=1, padding=0) self.latlayer3_ = Conv_BN_ReLU(
in_channels[0], out_channels, kernel_size=1, stride=1, padding=0)
# Smooth layers # Smooth layers
self.smooth1_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1) self.smooth1_ = Conv_BN_ReLU(
out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.smooth2_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.smooth3_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1) self.smooth2_ = Conv_BN_ReLU(
out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.smooth3_ = Conv_BN_ReLU(
out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.out_channels = out_channels * 4 self.out_channels = out_channels * 4
for m in self.sublayers(): for m in self.sublayers():
if isinstance(m, nn.Conv2D): if isinstance(m, nn.Conv2D):
n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels
m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', m.weight = paddle.create_parameter(
default_initializer=paddle.nn.initializer.Normal(0, shape=m.weight.shape,
math.sqrt(2. / n))) dtype='float32',
default_initializer=paddle.nn.initializer.Normal(
0, math.sqrt(2. / n)))
elif isinstance(m, nn.BatchNorm2D): elif isinstance(m, nn.BatchNorm2D):
m.weight = paddle.create_parameter(shape=m.weight.shape, dtype='float32', m.weight = paddle.create_parameter(
default_initializer=paddle.nn.initializer.Constant(1.0)) shape=m.weight.shape,
m.bias = paddle.create_parameter(shape=m.bias.shape, dtype='float32', dtype='float32',
default_initializer=paddle.nn.initializer.Constant(0.0)) default_initializer=paddle.nn.initializer.Constant(1.0))
m.bias = paddle.create_parameter(
shape=m.bias.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(0.0))
def _upsample(self, x, scale=1): def _upsample(self, x, scale=1):
return F.upsample(x, scale_factor=scale, mode='bilinear') return F.upsample(x, scale_factor=scale, mode='bilinear')
...@@ -81,15 +119,15 @@ class FPN(nn.Layer): ...@@ -81,15 +119,15 @@ class FPN(nn.Layer):
p5 = self.toplayer_(f5) p5 = self.toplayer_(f5)
f4 = self.latlayer1_(f4) f4 = self.latlayer1_(f4)
p4 = self._upsample_add(p5, f4,2) p4 = self._upsample_add(p5, f4, 2)
p4 = self.smooth1_(p4) p4 = self.smooth1_(p4)
f3 = self.latlayer2_(f3) f3 = self.latlayer2_(f3)
p3 = self._upsample_add(p4, f3,2) p3 = self._upsample_add(p4, f3, 2)
p3 = self.smooth2_(p3) p3 = self.smooth2_(p3)
f2 = self.latlayer3_(f2) f2 = self.latlayer3_(f2)
p2 = self._upsample_add(p3, f2,2) p2 = self._upsample_add(p3, f2, 2)
p2 = self.smooth3_(p2) p2 = self.smooth3_(p2)
p3 = self._upsample(p3, 2) p3 = self._upsample(p3, 2)
...@@ -97,4 +135,4 @@ class FPN(nn.Layer): ...@@ -97,4 +135,4 @@ class FPN(nn.Layer):
p5 = self._upsample(p5, 8) p5 = self._upsample(p5, 8)
fuse = paddle.concat([p2, p3, p4, p5], axis=1) fuse = paddle.concat([p2, p3, p4, p5], axis=1)
return fuse return fuse
\ No newline at end of file
...@@ -11,7 +11,10 @@ ...@@ -11,7 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/stn_head.py
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
......
...@@ -11,6 +11,10 @@ ...@@ -11,6 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/clovaai/deep-text-recognition-benchmark/blob/master/modules/transformation.py
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
......
...@@ -11,6 +11,10 @@ ...@@ -11,6 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/tps_spatial_transformer.py
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
......
...@@ -18,7 +18,6 @@ from __future__ import print_function ...@@ -18,7 +18,6 @@ from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
import copy import copy
import platform
__all__ = ['build_post_process'] __all__ = ['build_post_process']
...@@ -26,21 +25,24 @@ from .db_postprocess import DBPostProcess, DistillationDBPostProcess ...@@ -26,21 +25,24 @@ from .db_postprocess import DBPostProcess, DistillationDBPostProcess
from .east_postprocess import EASTPostProcess from .east_postprocess import EASTPostProcess
from .sast_postprocess import SASTPostProcess from .sast_postprocess import SASTPostProcess
from .rec_postprocess import CTCLabelDecode, AttnLabelDecode, SRNLabelDecode, DistillationCTCLabelDecode, \ from .rec_postprocess import CTCLabelDecode, AttnLabelDecode, SRNLabelDecode, DistillationCTCLabelDecode, \
TableLabelDecode, NRTRLabelDecode, SARLabelDecode , SEEDLabelDecode TableLabelDecode, NRTRLabelDecode, SARLabelDecode, SEEDLabelDecode
from .cls_postprocess import ClsPostProcess from .cls_postprocess import ClsPostProcess
from .pg_postprocess import PGPostProcess from .pg_postprocess import PGPostProcess
from .pse_postprocess import PSEPostProcess
def build_post_process(config, global_config=None): def build_post_process(config, global_config=None):
support_dict = [ support_dict = [
'DBPostProcess', 'PSEPostProcess', 'EASTPostProcess', 'SASTPostProcess', 'DBPostProcess', 'EASTPostProcess', 'SASTPostProcess', 'CTCLabelDecode',
'CTCLabelDecode', 'AttnLabelDecode', 'ClsPostProcess', 'SRNLabelDecode', 'AttnLabelDecode', 'ClsPostProcess', 'SRNLabelDecode', 'PGPostProcess',
'PGPostProcess', 'DistillationCTCLabelDecode', 'TableLabelDecode', 'DistillationCTCLabelDecode', 'TableLabelDecode',
'DistillationDBPostProcess', 'NRTRLabelDecode', 'SARLabelDecode', 'DistillationDBPostProcess', 'NRTRLabelDecode', 'SARLabelDecode',
'SEEDLabelDecode' 'SEEDLabelDecode'
] ]
if config['name'] == 'PSEPostProcess':
from .pse_postprocess import PSEPostProcess
support_dict.append('PSEPostProcess')
config = copy.deepcopy(config) config = copy.deepcopy(config)
module_name = config.pop('name') module_name = config.pop('name')
if global_config is not None: if global_config is not None:
......
...@@ -11,7 +11,10 @@ ...@@ -11,7 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refered from:
https://github.com/WenmuZhou/DBNet.pytorch/blob/master/post_processing/seg_detector_representer.py
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
...@@ -190,7 +193,8 @@ class DBPostProcess(object): ...@@ -190,7 +193,8 @@ class DBPostProcess(object):
class DistillationDBPostProcess(object): class DistillationDBPostProcess(object):
def __init__(self, model_name=["student"], def __init__(self,
model_name=["student"],
key=None, key=None,
thresh=0.3, thresh=0.3,
box_thresh=0.6, box_thresh=0.6,
...@@ -201,12 +205,13 @@ class DistillationDBPostProcess(object): ...@@ -201,12 +205,13 @@ class DistillationDBPostProcess(object):
**kwargs): **kwargs):
self.model_name = model_name self.model_name = model_name
self.key = key self.key = key
self.post_process = DBPostProcess(thresh=thresh, self.post_process = DBPostProcess(
box_thresh=box_thresh, thresh=thresh,
max_candidates=max_candidates, box_thresh=box_thresh,
unclip_ratio=unclip_ratio, max_candidates=max_candidates,
use_dilation=use_dilation, unclip_ratio=unclip_ratio,
score_mode=score_mode) use_dilation=use_dilation,
score_mode=score_mode)
def __call__(self, predicts, shape_list): def __call__(self, predicts, shape_list):
results = {} results = {}
......
""" """
Locality aware nms. Locality aware nms.
This code is refered from: https://github.com/songdejia/EAST/blob/master/locality_aware_nms.py
""" """
import numpy as np import numpy as np
......
## 编译 ## 编译
code from https://github.com/whai362/pan_pp.pytorch This code is refer from:
https://github.com/whai362/PSENet/blob/python3/models/post_processing/pse
```python ```python
python3 setup.py build_ext --inplace python3 setup.py build_ext --inplace
``` ```
...@@ -21,8 +21,9 @@ ori_path = os.getcwd() ...@@ -21,8 +21,9 @@ ori_path = os.getcwd()
os.chdir('ppocr/postprocess/pse_postprocess/pse') os.chdir('ppocr/postprocess/pse_postprocess/pse')
if subprocess.call( if subprocess.call(
'{} setup.py build_ext --inplace'.format(python_path), shell=True) != 0: '{} setup.py build_ext --inplace'.format(python_path), shell=True) != 0:
raise RuntimeError('Cannot compile pse: {}'.format( raise RuntimeError(
os.path.dirname(os.path.realpath(__file__)))) 'Cannot compile pse: {}, if your system is windows, you need to install all the default components of `desktop development using C++` in visual studio 2019+'.
format(os.path.dirname(os.path.realpath(__file__))))
os.chdir(ori_path) os.chdir(ori_path)
from .pse import pse from .pse import pse
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py
"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
...@@ -47,7 +51,8 @@ class PSEPostProcess(object): ...@@ -47,7 +51,8 @@ class PSEPostProcess(object):
pred = outs_dict['maps'] pred = outs_dict['maps']
if not isinstance(pred, paddle.Tensor): if not isinstance(pred, paddle.Tensor):
pred = paddle.to_tensor(pred) pred = paddle.to_tensor(pred)
pred = F.interpolate(pred, scale_factor=4 // self.scale, mode='bilinear') pred = F.interpolate(
pred, scale_factor=4 // self.scale, mode='bilinear')
score = F.sigmoid(pred[:, 0, :, :]) score = F.sigmoid(pred[:, 0, :, :])
...@@ -60,7 +65,9 @@ class PSEPostProcess(object): ...@@ -60,7 +65,9 @@ class PSEPostProcess(object):
boxes_batch = [] boxes_batch = []
for batch_index in range(pred.shape[0]): for batch_index in range(pred.shape[0]):
boxes, scores = self.boxes_from_bitmap(score[batch_index], kernels[batch_index], shape_list[batch_index]) boxes, scores = self.boxes_from_bitmap(score[batch_index],
kernels[batch_index],
shape_list[batch_index])
boxes_batch.append({'points': boxes, 'scores': scores}) boxes_batch.append({'points': boxes, 'scores': scores})
return boxes_batch return boxes_batch
...@@ -98,15 +105,14 @@ class PSEPostProcess(object): ...@@ -98,15 +105,14 @@ class PSEPostProcess(object):
mask = np.zeros((box_height, box_width), np.uint8) mask = np.zeros((box_height, box_width), np.uint8)
mask[points[:, 1], points[:, 0]] = 255 mask[points[:, 1], points[:, 0]] = 255
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
bbox = np.squeeze(contours[0], 1) bbox = np.squeeze(contours[0], 1)
else: else:
raise NotImplementedError raise NotImplementedError
bbox[:, 0] = np.clip( bbox[:, 0] = np.clip(np.round(bbox[:, 0] / ratio_w), 0, src_w)
np.round(bbox[:, 0] / ratio_w), 0, src_w) bbox[:, 1] = np.clip(np.round(bbox[:, 1] / ratio_h), 0, src_h)
bbox[:, 1] = np.clip(
np.round(bbox[:, 1] / ratio_h), 0, src_h)
boxes.append(bbox) boxes.append(bbox)
scores.append(score_i) scores.append(score_i)
return boxes, scores return boxes, scores
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -11,18 +11,23 @@ ...@@ -11,18 +11,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/whai362/PSENet/blob/python3/models/loss/iou.py
"""
import paddle import paddle
EPS = 1e-6 EPS = 1e-6
def iou_single(a, b, mask, n_class): def iou_single(a, b, mask, n_class):
valid = mask == 1 valid = mask == 1
a = a.masked_select(valid) a = a.masked_select(valid)
b = b.masked_select(valid) b = b.masked_select(valid)
miou = [] miou = []
for i in range(n_class): for i in range(n_class):
if a.shape == [0] and a.shape==b.shape: if a.shape == [0] and a.shape == b.shape:
inter = paddle.to_tensor(0.0) inter = paddle.to_tensor(0.0)
union = paddle.to_tensor(0.0) union = paddle.to_tensor(0.0)
else: else:
...@@ -32,6 +37,7 @@ def iou_single(a, b, mask, n_class): ...@@ -32,6 +37,7 @@ def iou_single(a, b, mask, n_class):
miou = sum(miou) / len(miou) miou = sum(miou) / len(miou)
return miou return miou
def iou(a, b, mask, n_class=2, reduce=True): def iou(a, b, mask, n_class=2, reduce=True):
batch_size = a.shape[0] batch_size = a.shape[0]
...@@ -39,10 +45,10 @@ def iou(a, b, mask, n_class=2, reduce=True): ...@@ -39,10 +45,10 @@ def iou(a, b, mask, n_class=2, reduce=True):
b = b.reshape([batch_size, -1]) b = b.reshape([batch_size, -1])
mask = mask.reshape([batch_size, -1]) mask = mask.reshape([batch_size, -1])
iou = paddle.zeros((batch_size,), dtype='float32') iou = paddle.zeros((batch_size, ), dtype='float32')
for i in range(batch_size): for i in range(batch_size):
iou[i] = iou_single(a[i], b[i], mask[i], n_class) iou[i] = iou_single(a[i], b[i], mask[i], n_class)
if reduce: if reduce:
iou = paddle.mean(iou) iou = paddle.mean(iou)
return iou return iou
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -11,6 +11,10 @@ ...@@ -11,6 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""
This code is refer from:
https://github.com/WenmuZhou/PytorchOCR/blob/master/torchocr/utils/logging.py
"""
import os import os
import sys import sys
......
...@@ -32,6 +32,7 @@ def run_shell_command(cmd): ...@@ -32,6 +32,7 @@ def run_shell_command(cmd):
else: else:
return None return None
def parser_results_from_log_by_name(log_path, names_list): def parser_results_from_log_by_name(log_path, names_list):
if not os.path.exists(log_path): if not os.path.exists(log_path):
raise ValueError("The log file {} does not exists!".format(log_path)) raise ValueError("The log file {} does not exists!".format(log_path))
...@@ -52,6 +53,7 @@ def parser_results_from_log_by_name(log_path, names_list): ...@@ -52,6 +53,7 @@ def parser_results_from_log_by_name(log_path, names_list):
parser_results[name] = result parser_results[name] = result
return parser_results return parser_results
def load_gt_from_file(gt_file): def load_gt_from_file(gt_file):
if not os.path.exists(gt_file): if not os.path.exists(gt_file):
raise ValueError("The log file {} does not exists!".format(gt_file)) raise ValueError("The log file {} does not exists!".format(gt_file))
......
===========================train_params===========================
model_name:ocr_det
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:amp
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null
##
trainer:norm_train|pact_train|fpgm_train
norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o
distill_export:null
export1:null
export2:null
inference_dir:null
train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:False
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
===========================serving_params===========================
model_name:ocr_det
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
null:null
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb
--cpu_threads:1|4
--batch_size:1
--power_mode:LITE_POWER_HIGH|LITE_POWER_LOW
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
===========================train_params===========================
model_name:ocr_det
python:python3.7
gpu_list:xx.xx.xx.xx,xx.xx.xx.xx;0,1
Global.use_gpu:True|True
Global.auto_cast:null|amp
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null
##
trainer:norm_train|pact_train|fpgm_train
norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o
distill_export:null
export1:null
export2:null
inference_dir:null
train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:False
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16|int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
===========================serving_params===========================
model_name:ocr_det
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
null:null
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb
--cpu_threads:1|4
--batch_size:1
--power_mode:LITE_POWER_HIGH|LITE_POWER_LOW
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
===========================train_params===========================
model_name:ocr_det
python:python
gpu_list:-1
Global.use_gpu:False
Global.auto_cast:null
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null
##
trainer:norm_train|pact_train|fpgm_train
norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o
distill_export:null
export1:null
export2:null
inference_dir:null
train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:False
inference:tools/infer/predict_det.py
--use_gpu:False
--enable_mkldnn:False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False
--precision:fp32
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
===========================cpp_infer_params===========================
use_opencv:True
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:False
--enable_mkldnn:False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False
--precision:fp32
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
===========================serving_params===========================
model_name:ocr_det
python:python3.7
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:True
inference:tools/infer/predict_det.py
--use_gpu:False
--enable_mkldnn:False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False
--precision:int8
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
--benchmark:True
null:null
null:null
===========================train_params=========================== ===========================train_params===========================
model_name:ocr_det model_name:ocr_det
python:python3.7 python:python3.7
gpu_list:0|0,1|10.21.226.181,10.21.226.133;0,1 gpu_list:0|0,1
Global.use_gpu:True|True|True Global.use_gpu:True|True
Global.auto_cast:fp32|amp Global.auto_cast:null
Global.epoch_num:lite_train_infer=1|whole_train_infer=300 Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300
Global.save_model_dir:./output/ Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4 Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4
Global.pretrained_model:null Global.pretrained_model:null
train_model_name:latest train_model_name:latest
train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/
null:null null:null
## ##
trainer:norm_train|pact_train|fpgm_train trainer:norm_train|pact_train|fpgm_train
norm_train:tools/train.py -c tests/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained
pact_train:deploy/slim/quantization/quant.py -c tests/configs/det_mv3_db.yml -o pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_train:deploy/slim/prune/sensitivity_anal.py -c tests/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy
distill_train:null distill_train:null
null:null null:null
null:null null:null
...@@ -27,13 +27,13 @@ null:null ...@@ -27,13 +27,13 @@ null:null
===========================infer_params=========================== ===========================infer_params===========================
Global.save_inference_dir:./output/ Global.save_inference_dir:./output/
Global.pretrained_model: Global.pretrained_model:
norm_export:tools/export_model.py -c tests/configs/det_mv3_db.yml -o norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
quant_export:deploy/slim/quantization/export_model.py -c tests/configs/det_mv3_db.yml -o quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py -c tests/configs/det_mv3_db.yml -o fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o
distill_export:null distill_export:null
export1:null export1:null
export2:null export2:null
## inference_dir:null
train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:False infer_quant:False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment