Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
4824c25b
Commit
4824c25b
authored
Jul 04, 2024
by
wangsen
Browse files
Initial commit
parents
Changes
396
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1501 additions
and
0 deletions
+1501
-0
benchmark/PaddleOCR_DBNet/models/head/DBHead.py
benchmark/PaddleOCR_DBNet/models/head/DBHead.py
+138
-0
benchmark/PaddleOCR_DBNet/models/head/__init__.py
benchmark/PaddleOCR_DBNet/models/head/__init__.py
+14
-0
benchmark/PaddleOCR_DBNet/models/losses/DB_loss.py
benchmark/PaddleOCR_DBNet/models/losses/DB_loss.py
+49
-0
benchmark/PaddleOCR_DBNet/models/losses/__init__.py
benchmark/PaddleOCR_DBNet/models/losses/__init__.py
+16
-0
benchmark/PaddleOCR_DBNet/models/losses/basic_loss.py
benchmark/PaddleOCR_DBNet/models/losses/basic_loss.py
+97
-0
benchmark/PaddleOCR_DBNet/models/model.py
benchmark/PaddleOCR_DBNet/models/model.py
+39
-0
benchmark/PaddleOCR_DBNet/models/neck/FPN.py
benchmark/PaddleOCR_DBNet/models/neck/FPN.py
+84
-0
benchmark/PaddleOCR_DBNet/models/neck/__init__.py
benchmark/PaddleOCR_DBNet/models/neck/__init__.py
+13
-0
benchmark/PaddleOCR_DBNet/multi_gpu_train.sh
benchmark/PaddleOCR_DBNet/multi_gpu_train.sh
+3
-0
benchmark/PaddleOCR_DBNet/post_processing/__init__.py
benchmark/PaddleOCR_DBNet/post_processing/__init__.py
+14
-0
benchmark/PaddleOCR_DBNet/post_processing/seg_detector_representer.py
...ddleOCR_DBNet/post_processing/seg_detector_representer.py
+192
-0
benchmark/PaddleOCR_DBNet/predict.sh
benchmark/PaddleOCR_DBNet/predict.sh
+2
-0
benchmark/PaddleOCR_DBNet/requirement.txt
benchmark/PaddleOCR_DBNet/requirement.txt
+14
-0
benchmark/PaddleOCR_DBNet/singlel_gpu_train.sh
benchmark/PaddleOCR_DBNet/singlel_gpu_train.sh
+2
-0
benchmark/PaddleOCR_DBNet/test/README.MD
benchmark/PaddleOCR_DBNet/test/README.MD
+9
-0
benchmark/PaddleOCR_DBNet/test_tipc/benchmark_train.sh
benchmark/PaddleOCR_DBNet/test_tipc/benchmark_train.sh
+287
-0
benchmark/PaddleOCR_DBNet/test_tipc/common_func.sh
benchmark/PaddleOCR_DBNet/test_tipc/common_func.sh
+68
-0
benchmark/PaddleOCR_DBNet/test_tipc/configs/det_res50_db/train_infer_python.txt
...Net/test_tipc/configs/det_res50_db/train_infer_python.txt
+61
-0
benchmark/PaddleOCR_DBNet/test_tipc/prepare.sh
benchmark/PaddleOCR_DBNet/test_tipc/prepare.sh
+55
-0
benchmark/PaddleOCR_DBNet/test_tipc/test_train_inference_python.sh
.../PaddleOCR_DBNet/test_tipc/test_train_inference_python.sh
+344
-0
No files found.
Too many changes to show.
To preserve performance only
396 of 396+
files are displayed.
Plain diff
Email patch
benchmark/PaddleOCR_DBNet/models/head/DBHead.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2019/12/4 14:54
# @Author : zhoujun
import
paddle
from
paddle
import
nn
,
ParamAttr
class
DBHead
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
k
=
50
):
super
().
__init__
()
self
.
k
=
k
self
.
binarize
=
nn
.
Sequential
(
nn
.
Conv2D
(
in_channels
,
in_channels
//
4
,
3
,
padding
=
1
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
KaimingNormal
())),
nn
.
BatchNorm2D
(
in_channels
//
4
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1
)),
bias_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1e-4
))),
nn
.
ReLU
(),
nn
.
Conv2DTranspose
(
in_channels
//
4
,
in_channels
//
4
,
2
,
2
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
KaimingNormal
())),
nn
.
BatchNorm2D
(
in_channels
//
4
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1
)),
bias_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1e-4
))),
nn
.
ReLU
(),
nn
.
Conv2DTranspose
(
in_channels
//
4
,
1
,
2
,
2
,
weight_attr
=
nn
.
initializer
.
KaimingNormal
()),
nn
.
Sigmoid
())
self
.
thresh
=
self
.
_init_thresh
(
in_channels
)
def
forward
(
self
,
x
):
shrink_maps
=
self
.
binarize
(
x
)
threshold_maps
=
self
.
thresh
(
x
)
if
self
.
training
:
binary_maps
=
self
.
step_function
(
shrink_maps
,
threshold_maps
)
y
=
paddle
.
concat
(
(
shrink_maps
,
threshold_maps
,
binary_maps
),
axis
=
1
)
else
:
y
=
paddle
.
concat
((
shrink_maps
,
threshold_maps
),
axis
=
1
)
return
y
def
_init_thresh
(
self
,
inner_channels
,
serial
=
False
,
smooth
=
False
,
bias
=
False
):
in_channels
=
inner_channels
if
serial
:
in_channels
+=
1
self
.
thresh
=
nn
.
Sequential
(
nn
.
Conv2D
(
in_channels
,
inner_channels
//
4
,
3
,
padding
=
1
,
bias_attr
=
bias
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
KaimingNormal
())),
nn
.
BatchNorm2D
(
inner_channels
//
4
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1
)),
bias_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1e-4
))),
nn
.
ReLU
(),
self
.
_init_upsample
(
inner_channels
//
4
,
inner_channels
//
4
,
smooth
=
smooth
,
bias
=
bias
),
nn
.
BatchNorm2D
(
inner_channels
//
4
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1
)),
bias_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1e-4
))),
nn
.
ReLU
(),
self
.
_init_upsample
(
inner_channels
//
4
,
1
,
smooth
=
smooth
,
bias
=
bias
),
nn
.
Sigmoid
())
return
self
.
thresh
def
_init_upsample
(
self
,
in_channels
,
out_channels
,
smooth
=
False
,
bias
=
False
):
if
smooth
:
inter_out_channels
=
out_channels
if
out_channels
==
1
:
inter_out_channels
=
in_channels
module_list
=
[
nn
.
Upsample
(
scale_factor
=
2
,
mode
=
'nearest'
),
nn
.
Conv2D
(
in_channels
,
inter_out_channels
,
3
,
1
,
1
,
bias_attr
=
bias
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
KaimingNormal
()))
]
if
out_channels
==
1
:
module_list
.
append
(
nn
.
Conv2D
(
in_channels
,
out_channels
,
kernel_size
=
1
,
stride
=
1
,
padding
=
1
,
bias_attr
=
True
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
KaimingNormal
())))
return
nn
.
Sequential
(
module_list
)
else
:
return
nn
.
Conv2DTranspose
(
in_channels
,
out_channels
,
2
,
2
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
KaimingNormal
()))
def
step_function
(
self
,
x
,
y
):
return
paddle
.
reciprocal
(
1
+
paddle
.
exp
(
-
self
.
k
*
(
x
-
y
)))
benchmark/PaddleOCR_DBNet/models/head/__init__.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2020/6/5 11:35
# @Author : zhoujun
from
.DBHead
import
DBHead
__all__
=
[
'build_head'
]
support_head
=
[
'DBHead'
]
def
build_head
(
head_name
,
**
kwargs
):
assert
head_name
in
support_head
,
f
'all support head is
{
support_head
}
'
head
=
eval
(
head_name
)(
**
kwargs
)
return
head
\ No newline at end of file
benchmark/PaddleOCR_DBNet/models/losses/DB_loss.py
0 → 100644
View file @
4824c25b
import
paddle
from
models.losses.basic_loss
import
BalanceCrossEntropyLoss
,
MaskL1Loss
,
DiceLoss
class
DBLoss
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
,
alpha
=
1.0
,
beta
=
10
,
ohem_ratio
=
3
,
reduction
=
'mean'
,
eps
=
1e-06
):
"""
Implement PSE Loss.
:param alpha: binary_map loss 前面的系数
:param beta: threshold_map loss 前面的系数
:param ohem_ratio: OHEM的比例
:param reduction: 'mean' or 'sum'对 batch里的loss 算均值或求和
"""
super
().
__init__
()
assert
reduction
in
[
'mean'
,
'sum'
],
" reduction must in ['mean','sum']"
self
.
alpha
=
alpha
self
.
beta
=
beta
self
.
bce_loss
=
BalanceCrossEntropyLoss
(
negative_ratio
=
ohem_ratio
)
self
.
dice_loss
=
DiceLoss
(
eps
=
eps
)
self
.
l1_loss
=
MaskL1Loss
(
eps
=
eps
)
self
.
ohem_ratio
=
ohem_ratio
self
.
reduction
=
reduction
def
forward
(
self
,
pred
,
batch
):
shrink_maps
=
pred
[:,
0
,
:,
:]
threshold_maps
=
pred
[:,
1
,
:,
:]
binary_maps
=
pred
[:,
2
,
:,
:]
loss_shrink_maps
=
self
.
bce_loss
(
shrink_maps
,
batch
[
'shrink_map'
],
batch
[
'shrink_mask'
])
loss_threshold_maps
=
self
.
l1_loss
(
threshold_maps
,
batch
[
'threshold_map'
],
batch
[
'threshold_mask'
])
metrics
=
dict
(
loss_shrink_maps
=
loss_shrink_maps
,
loss_threshold_maps
=
loss_threshold_maps
)
if
pred
.
shape
[
1
]
>
2
:
loss_binary_maps
=
self
.
dice_loss
(
binary_maps
,
batch
[
'shrink_map'
],
batch
[
'shrink_mask'
])
metrics
[
'loss_binary_maps'
]
=
loss_binary_maps
loss_all
=
(
self
.
alpha
*
loss_shrink_maps
+
self
.
beta
*
loss_threshold_maps
+
loss_binary_maps
)
metrics
[
'loss'
]
=
loss_all
else
:
metrics
[
'loss'
]
=
loss_shrink_maps
return
metrics
benchmark/PaddleOCR_DBNet/models/losses/__init__.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2020/6/5 11:36
# @Author : zhoujun
import
copy
from
.DB_loss
import
DBLoss
__all__
=
[
'build_loss'
]
support_loss
=
[
'DBLoss'
]
def
build_loss
(
config
):
copy_config
=
copy
.
deepcopy
(
config
)
loss_type
=
copy_config
.
pop
(
'type'
)
assert
loss_type
in
support_loss
,
f
'all support loss is
{
support_loss
}
'
criterion
=
eval
(
loss_type
)(
**
copy_config
)
return
criterion
benchmark/PaddleOCR_DBNet/models/losses/basic_loss.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2019/12/4 14:39
# @Author : zhoujun
import
paddle
import
paddle.nn
as
nn
class
BalanceCrossEntropyLoss
(
nn
.
Layer
):
'''
Balanced cross entropy loss.
Shape:
- Input: :math:`(N, 1, H, W)`
- GT: :math:`(N, 1, H, W)`, same shape as the input
- Mask: :math:`(N, H, W)`, same spatial shape as the input
- Output: scalar.
'''
def
__init__
(
self
,
negative_ratio
=
3.0
,
eps
=
1e-6
):
super
(
BalanceCrossEntropyLoss
,
self
).
__init__
()
self
.
negative_ratio
=
negative_ratio
self
.
eps
=
eps
def
forward
(
self
,
pred
:
paddle
.
Tensor
,
gt
:
paddle
.
Tensor
,
mask
:
paddle
.
Tensor
,
return_origin
=
False
):
'''
Args:
pred: shape :math:`(N, 1, H, W)`, the prediction of network
gt: shape :math:`(N, 1, H, W)`, the target
mask: shape :math:`(N, H, W)`, the mask indicates positive regions
'''
positive
=
(
gt
*
mask
)
negative
=
((
1
-
gt
)
*
mask
)
positive_count
=
int
(
positive
.
sum
())
negative_count
=
min
(
int
(
negative
.
sum
()),
int
(
positive_count
*
self
.
negative_ratio
))
loss
=
nn
.
functional
.
binary_cross_entropy
(
pred
,
gt
,
reduction
=
'none'
)
positive_loss
=
loss
*
positive
negative_loss
=
loss
*
negative
negative_loss
,
_
=
negative_loss
.
reshape
([
-
1
]).
topk
(
negative_count
)
balance_loss
=
(
positive_loss
.
sum
()
+
negative_loss
.
sum
())
/
(
positive_count
+
negative_count
+
self
.
eps
)
if
return_origin
:
return
balance_loss
,
loss
return
balance_loss
class
DiceLoss
(
nn
.
Layer
):
'''
Loss function from https://arxiv.org/abs/1707.03237,
where iou computation is introduced heatmap manner to measure the
diversity bwtween tow heatmaps.
'''
def
__init__
(
self
,
eps
=
1e-6
):
super
(
DiceLoss
,
self
).
__init__
()
self
.
eps
=
eps
def
forward
(
self
,
pred
:
paddle
.
Tensor
,
gt
,
mask
,
weights
=
None
):
'''
pred: one or two heatmaps of shape (N, 1, H, W),
the losses of tow heatmaps are added together.
gt: (N, 1, H, W)
mask: (N, H, W)
'''
return
self
.
_compute
(
pred
,
gt
,
mask
,
weights
)
def
_compute
(
self
,
pred
,
gt
,
mask
,
weights
):
if
len
(
pred
.
shape
)
==
4
:
pred
=
pred
[:,
0
,
:,
:]
gt
=
gt
[:,
0
,
:,
:]
assert
pred
.
shape
==
gt
.
shape
assert
pred
.
shape
==
mask
.
shape
if
weights
is
not
None
:
assert
weights
.
shape
==
mask
.
shape
mask
=
weights
*
mask
intersection
=
(
pred
*
gt
*
mask
).
sum
()
union
=
(
pred
*
mask
).
sum
()
+
(
gt
*
mask
).
sum
()
+
self
.
eps
loss
=
1
-
2.0
*
intersection
/
union
assert
loss
<=
1
return
loss
class
MaskL1Loss
(
nn
.
Layer
):
def
__init__
(
self
,
eps
=
1e-6
):
super
(
MaskL1Loss
,
self
).
__init__
()
self
.
eps
=
eps
def
forward
(
self
,
pred
:
paddle
.
Tensor
,
gt
,
mask
):
loss
=
(
paddle
.
abs
(
pred
-
gt
)
*
mask
).
sum
()
/
(
mask
.
sum
()
+
self
.
eps
)
return
loss
benchmark/PaddleOCR_DBNet/models/model.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:57
# @Author : zhoujun
from
addict
import
Dict
from
paddle
import
nn
import
paddle.nn.functional
as
F
from
models.backbone
import
build_backbone
from
models.neck
import
build_neck
from
models.head
import
build_head
class
Model
(
nn
.
Layer
):
def
__init__
(
self
,
model_config
:
dict
):
"""
PANnet
:param model_config: 模型配置
"""
super
().
__init__
()
model_config
=
Dict
(
model_config
)
backbone_type
=
model_config
.
backbone
.
pop
(
'type'
)
neck_type
=
model_config
.
neck
.
pop
(
'type'
)
head_type
=
model_config
.
head
.
pop
(
'type'
)
self
.
backbone
=
build_backbone
(
backbone_type
,
**
model_config
.
backbone
)
self
.
neck
=
build_neck
(
neck_type
,
in_channels
=
self
.
backbone
.
out_channels
,
**
model_config
.
neck
)
self
.
head
=
build_head
(
head_type
,
in_channels
=
self
.
neck
.
out_channels
,
**
model_config
.
head
)
self
.
name
=
f
'
{
backbone_type
}
_
{
neck_type
}
_
{
head_type
}
'
def
forward
(
self
,
x
):
_
,
_
,
H
,
W
=
x
.
shape
backbone_out
=
self
.
backbone
(
x
)
neck_out
=
self
.
neck
(
backbone_out
)
y
=
self
.
head
(
neck_out
)
y
=
F
.
interpolate
(
y
,
size
=
(
H
,
W
),
mode
=
'bilinear'
,
align_corners
=
True
)
return
y
benchmark/PaddleOCR_DBNet/models/neck/FPN.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2019/9/13 10:29
# @Author : zhoujun
import
paddle
import
paddle.nn.functional
as
F
from
paddle
import
nn
from
models.basic
import
ConvBnRelu
class
FPN
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
inner_channels
=
256
,
**
kwargs
):
"""
:param in_channels: 基础网络输出的维度
:param kwargs:
"""
super
().
__init__
()
inplace
=
True
self
.
conv_out
=
inner_channels
inner_channels
=
inner_channels
//
4
# reduce layers
self
.
reduce_conv_c2
=
ConvBnRelu
(
in_channels
[
0
],
inner_channels
,
kernel_size
=
1
,
inplace
=
inplace
)
self
.
reduce_conv_c3
=
ConvBnRelu
(
in_channels
[
1
],
inner_channels
,
kernel_size
=
1
,
inplace
=
inplace
)
self
.
reduce_conv_c4
=
ConvBnRelu
(
in_channels
[
2
],
inner_channels
,
kernel_size
=
1
,
inplace
=
inplace
)
self
.
reduce_conv_c5
=
ConvBnRelu
(
in_channels
[
3
],
inner_channels
,
kernel_size
=
1
,
inplace
=
inplace
)
# Smooth layers
self
.
smooth_p4
=
ConvBnRelu
(
inner_channels
,
inner_channels
,
kernel_size
=
3
,
padding
=
1
,
inplace
=
inplace
)
self
.
smooth_p3
=
ConvBnRelu
(
inner_channels
,
inner_channels
,
kernel_size
=
3
,
padding
=
1
,
inplace
=
inplace
)
self
.
smooth_p2
=
ConvBnRelu
(
inner_channels
,
inner_channels
,
kernel_size
=
3
,
padding
=
1
,
inplace
=
inplace
)
self
.
conv
=
nn
.
Sequential
(
nn
.
Conv2D
(
self
.
conv_out
,
self
.
conv_out
,
kernel_size
=
3
,
padding
=
1
,
stride
=
1
),
nn
.
BatchNorm2D
(
self
.
conv_out
),
nn
.
ReLU
())
self
.
out_channels
=
self
.
conv_out
def
forward
(
self
,
x
):
c2
,
c3
,
c4
,
c5
=
x
# Top-down
p5
=
self
.
reduce_conv_c5
(
c5
)
p4
=
self
.
_upsample_add
(
p5
,
self
.
reduce_conv_c4
(
c4
))
p4
=
self
.
smooth_p4
(
p4
)
p3
=
self
.
_upsample_add
(
p4
,
self
.
reduce_conv_c3
(
c3
))
p3
=
self
.
smooth_p3
(
p3
)
p2
=
self
.
_upsample_add
(
p3
,
self
.
reduce_conv_c2
(
c2
))
p2
=
self
.
smooth_p2
(
p2
)
x
=
self
.
_upsample_cat
(
p2
,
p3
,
p4
,
p5
)
x
=
self
.
conv
(
x
)
return
x
def
_upsample_add
(
self
,
x
,
y
):
return
F
.
interpolate
(
x
,
size
=
y
.
shape
[
2
:])
+
y
def
_upsample_cat
(
self
,
p2
,
p3
,
p4
,
p5
):
h
,
w
=
p2
.
shape
[
2
:]
p3
=
F
.
interpolate
(
p3
,
size
=
(
h
,
w
))
p4
=
F
.
interpolate
(
p4
,
size
=
(
h
,
w
))
p5
=
F
.
interpolate
(
p5
,
size
=
(
h
,
w
))
return
paddle
.
concat
([
p2
,
p3
,
p4
,
p5
],
axis
=
1
)
benchmark/PaddleOCR_DBNet/models/neck/__init__.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2020/6/5 11:34
# @Author : zhoujun
from
.FPN
import
FPN
__all__
=
[
'build_neck'
]
support_neck
=
[
'FPN'
]
def
build_neck
(
neck_name
,
**
kwargs
):
assert
neck_name
in
support_neck
,
f
'all support neck is
{
support_neck
}
'
neck
=
eval
(
neck_name
)(
**
kwargs
)
return
neck
benchmark/PaddleOCR_DBNet/multi_gpu_train.sh
0 → 100644
View file @
4824c25b
# export NCCL_P2P_DISABLE=1
CUDA_VISIBLE_DEVICES
=
0,1,2,3 python3
-m
paddle.distributed.launch tools/train.py
--config_file
"config/icdar2015_resnet50_FPN_DBhead_polyLR.yaml"
\ No newline at end of file
benchmark/PaddleOCR_DBNet/post_processing/__init__.py
0 → 100644
View file @
4824c25b
# -*- coding: utf-8 -*-
# @Time : 2019/12/5 15:17
# @Author : zhoujun
from
.seg_detector_representer
import
SegDetectorRepresenter
def
get_post_processing
(
config
):
try
:
cls
=
eval
(
config
[
'type'
])(
**
config
[
'args'
])
return
cls
except
:
return
None
\ No newline at end of file
benchmark/PaddleOCR_DBNet/post_processing/seg_detector_representer.py
0 → 100644
View file @
4824c25b
import
cv2
import
numpy
as
np
import
pyclipper
import
paddle
from
shapely.geometry
import
Polygon
class
SegDetectorRepresenter
():
def
__init__
(
self
,
thresh
=
0.3
,
box_thresh
=
0.7
,
max_candidates
=
1000
,
unclip_ratio
=
1.5
):
self
.
min_size
=
3
self
.
thresh
=
thresh
self
.
box_thresh
=
box_thresh
self
.
max_candidates
=
max_candidates
self
.
unclip_ratio
=
unclip_ratio
def
__call__
(
self
,
batch
,
pred
,
is_output_polygon
=
False
):
'''
batch: (image, polygons, ignore_tags
batch: a dict produced by dataloaders.
image: tensor of shape (N, C, H, W).
polygons: tensor of shape (N, K, 4, 2), the polygons of objective regions.
ignore_tags: tensor of shape (N, K), indicates whether a region is ignorable or not.
shape: the original shape of images.
filename: the original filenames of images.
pred:
binary: text region segmentation map, with shape (N, H, W)
thresh: [if exists] thresh hold prediction with shape (N, H, W)
thresh_binary: [if exists] binarized with threshhold, (N, H, W)
'''
if
isinstance
(
pred
,
paddle
.
Tensor
):
pred
=
pred
.
numpy
()
pred
=
pred
[:,
0
,
:,
:]
segmentation
=
self
.
binarize
(
pred
)
boxes_batch
=
[]
scores_batch
=
[]
for
batch_index
in
range
(
pred
.
shape
[
0
]):
height
,
width
=
batch
[
'shape'
][
batch_index
]
if
is_output_polygon
:
boxes
,
scores
=
self
.
polygons_from_bitmap
(
pred
[
batch_index
],
segmentation
[
batch_index
],
width
,
height
)
else
:
boxes
,
scores
=
self
.
boxes_from_bitmap
(
pred
[
batch_index
],
segmentation
[
batch_index
],
width
,
height
)
boxes_batch
.
append
(
boxes
)
scores_batch
.
append
(
scores
)
return
boxes_batch
,
scores_batch
def
binarize
(
self
,
pred
):
return
pred
>
self
.
thresh
def
polygons_from_bitmap
(
self
,
pred
,
_bitmap
,
dest_width
,
dest_height
):
'''
_bitmap: single map with shape (H, W),
whose values are binarized as {0, 1}
'''
assert
len
(
_bitmap
.
shape
)
==
2
bitmap
=
_bitmap
# The first channel
height
,
width
=
bitmap
.
shape
boxes
=
[]
scores
=
[]
contours
,
_
=
cv2
.
findContours
((
bitmap
*
255
).
astype
(
np
.
uint8
),
cv2
.
RETR_LIST
,
cv2
.
CHAIN_APPROX_SIMPLE
)
for
contour
in
contours
[:
self
.
max_candidates
]:
epsilon
=
0.005
*
cv2
.
arcLength
(
contour
,
True
)
approx
=
cv2
.
approxPolyDP
(
contour
,
epsilon
,
True
)
points
=
approx
.
reshape
((
-
1
,
2
))
if
points
.
shape
[
0
]
<
4
:
continue
# _, sside = self.get_mini_boxes(contour)
# if sside < self.min_size:
# continue
score
=
self
.
box_score_fast
(
pred
,
contour
.
squeeze
(
1
))
if
self
.
box_thresh
>
score
:
continue
if
points
.
shape
[
0
]
>
2
:
box
=
self
.
unclip
(
points
,
unclip_ratio
=
self
.
unclip_ratio
)
if
len
(
box
)
>
1
:
continue
else
:
continue
box
=
box
.
reshape
(
-
1
,
2
)
_
,
sside
=
self
.
get_mini_boxes
(
box
.
reshape
((
-
1
,
1
,
2
)))
if
sside
<
self
.
min_size
+
2
:
continue
if
not
isinstance
(
dest_width
,
int
):
dest_width
=
dest_width
.
item
()
dest_height
=
dest_height
.
item
()
box
[:,
0
]
=
np
.
clip
(
np
.
round
(
box
[:,
0
]
/
width
*
dest_width
),
0
,
dest_width
)
box
[:,
1
]
=
np
.
clip
(
np
.
round
(
box
[:,
1
]
/
height
*
dest_height
),
0
,
dest_height
)
boxes
.
append
(
box
)
scores
.
append
(
score
)
return
boxes
,
scores
def
boxes_from_bitmap
(
self
,
pred
,
_bitmap
,
dest_width
,
dest_height
):
'''
_bitmap: single map with shape (H, W),
whose values are binarized as {0, 1}
'''
assert
len
(
_bitmap
.
shape
)
==
2
bitmap
=
_bitmap
# The first channel
height
,
width
=
bitmap
.
shape
contours
,
_
=
cv2
.
findContours
((
bitmap
*
255
).
astype
(
np
.
uint8
),
cv2
.
RETR_LIST
,
cv2
.
CHAIN_APPROX_SIMPLE
)
num_contours
=
min
(
len
(
contours
),
self
.
max_candidates
)
boxes
=
np
.
zeros
((
num_contours
,
4
,
2
),
dtype
=
np
.
int16
)
scores
=
np
.
zeros
((
num_contours
,
),
dtype
=
np
.
float32
)
for
index
in
range
(
num_contours
):
contour
=
contours
[
index
].
squeeze
(
1
)
points
,
sside
=
self
.
get_mini_boxes
(
contour
)
if
sside
<
self
.
min_size
:
continue
points
=
np
.
array
(
points
)
score
=
self
.
box_score_fast
(
pred
,
contour
)
if
self
.
box_thresh
>
score
:
continue
box
=
self
.
unclip
(
points
,
unclip_ratio
=
self
.
unclip_ratio
).
reshape
(
-
1
,
1
,
2
)
box
,
sside
=
self
.
get_mini_boxes
(
box
)
if
sside
<
self
.
min_size
+
2
:
continue
box
=
np
.
array
(
box
)
if
not
isinstance
(
dest_width
,
int
):
dest_width
=
dest_width
.
item
()
dest_height
=
dest_height
.
item
()
box
[:,
0
]
=
np
.
clip
(
np
.
round
(
box
[:,
0
]
/
width
*
dest_width
),
0
,
dest_width
)
box
[:,
1
]
=
np
.
clip
(
np
.
round
(
box
[:,
1
]
/
height
*
dest_height
),
0
,
dest_height
)
boxes
[
index
,
:,
:]
=
box
.
astype
(
np
.
int16
)
scores
[
index
]
=
score
return
boxes
,
scores
def
unclip
(
self
,
box
,
unclip_ratio
=
1.5
):
poly
=
Polygon
(
box
)
distance
=
poly
.
area
*
unclip_ratio
/
poly
.
length
offset
=
pyclipper
.
PyclipperOffset
()
offset
.
AddPath
(
box
,
pyclipper
.
JT_ROUND
,
pyclipper
.
ET_CLOSEDPOLYGON
)
expanded
=
np
.
array
(
offset
.
Execute
(
distance
))
return
expanded
def
get_mini_boxes
(
self
,
contour
):
bounding_box
=
cv2
.
minAreaRect
(
contour
)
points
=
sorted
(
list
(
cv2
.
boxPoints
(
bounding_box
)),
key
=
lambda
x
:
x
[
0
])
index_1
,
index_2
,
index_3
,
index_4
=
0
,
1
,
2
,
3
if
points
[
1
][
1
]
>
points
[
0
][
1
]:
index_1
=
0
index_4
=
1
else
:
index_1
=
1
index_4
=
0
if
points
[
3
][
1
]
>
points
[
2
][
1
]:
index_2
=
2
index_3
=
3
else
:
index_2
=
3
index_3
=
2
box
=
[
points
[
index_1
],
points
[
index_2
],
points
[
index_3
],
points
[
index_4
]
]
return
box
,
min
(
bounding_box
[
1
])
def
box_score_fast
(
self
,
bitmap
,
_box
):
h
,
w
=
bitmap
.
shape
[:
2
]
box
=
_box
.
copy
()
xmin
=
np
.
clip
(
np
.
floor
(
box
[:,
0
].
min
()).
astype
(
np
.
int
),
0
,
w
-
1
)
xmax
=
np
.
clip
(
np
.
ceil
(
box
[:,
0
].
max
()).
astype
(
np
.
int
),
0
,
w
-
1
)
ymin
=
np
.
clip
(
np
.
floor
(
box
[:,
1
].
min
()).
astype
(
np
.
int
),
0
,
h
-
1
)
ymax
=
np
.
clip
(
np
.
ceil
(
box
[:,
1
].
max
()).
astype
(
np
.
int
),
0
,
h
-
1
)
mask
=
np
.
zeros
((
ymax
-
ymin
+
1
,
xmax
-
xmin
+
1
),
dtype
=
np
.
uint8
)
box
[:,
0
]
=
box
[:,
0
]
-
xmin
box
[:,
1
]
=
box
[:,
1
]
-
ymin
cv2
.
fillPoly
(
mask
,
box
.
reshape
(
1
,
-
1
,
2
).
astype
(
np
.
int32
),
1
)
return
cv2
.
mean
(
bitmap
[
ymin
:
ymax
+
1
,
xmin
:
xmax
+
1
],
mask
)[
0
]
benchmark/PaddleOCR_DBNet/predict.sh
0 → 100644
View file @
4824c25b
CUDA_VISIBLE_DEVICES
=
0 python tools/predict.py
--model_path
model_best.pth
--input_folder
./input
--output_folder
./output
--thre
0.7
--polygon
--show
--save_result
\ No newline at end of file
benchmark/PaddleOCR_DBNet/requirement.txt
0 → 100644
View file @
4824c25b
anyconfig
future
imgaug
matplotlib
numpy
opencv-python
Polygon3
pyclipper
PyYAML
scikit-image
Shapely
tqdm
addict
\ No newline at end of file
benchmark/PaddleOCR_DBNet/singlel_gpu_train.sh
0 → 100644
View file @
4824c25b
CUDA_VISIBLE_DEVICES
=
0 python3 tools/train.py
--config_file
"config/icdar2015_resnet50_FPN_DBhead_polyLR.yaml"
\ No newline at end of file
benchmark/PaddleOCR_DBNet/test/README.MD
0 → 100644
View file @
4824c25b
Place the images that you want to detect here. You better named them as such:
img_10.jpg
img_11.jpg
img_{img_id}.jpg
For predicting single images, you can change the
`img_path`
in the
`/tools/predict.py`
to your image number.
The result will be saved in the output_folder(default is test/output) you give in predict.sh
\ No newline at end of file
benchmark/PaddleOCR_DBNet/test_tipc/benchmark_train.sh
0 → 100644
View file @
4824c25b
#!/bin/bash
source
test_tipc/common_func.sh
# run benchmark sh
# Usage:
# bash run_benchmark_train.sh config.txt params
# or
# bash run_benchmark_train.sh config.txt
function
func_parser_params
(){
strs
=
$1
IFS
=
"="
array
=(
${
strs
}
)
tmp
=
${
array
[1]
}
echo
${
tmp
}
}
function
set_dynamic_epoch
(){
string
=
$1
num
=
$2
_str
=
${
string
:1:6
}
IFS
=
"C"
arr
=(
${
_str
}
)
M
=
${
arr
[0]
}
P
=
${
arr
[1]
}
ep
=
`
expr
$num
\*
$M
\*
$P
`
echo
$ep
}
function
func_sed_params
(){
filename
=
$1
line
=
$2
param_value
=
$3
params
=
`
sed
-n
"
${
line
}
p"
$filename
`
IFS
=
":"
array
=(
${
params
}
)
key
=
${
array
[0]
}
value
=
${
array
[1]
}
new_params
=
"
${
key
}
:
${
param_value
}
"
IFS
=
";"
cmd
=
"sed -i '
${
line
}
s/.*/
${
new_params
}
/' '
${
filename
}
'"
eval
$cmd
}
function
set_gpu_id
(){
string
=
$1
_str
=
${
string
:1:6
}
IFS
=
"C"
arr
=(
${
_str
}
)
M
=
${
arr
[0]
}
P
=
${
arr
[1]
}
gn
=
`
expr
$P
- 1
`
gpu_num
=
`
expr
$gn
/
$M
`
seq
=
`
seq
-s
","
0
$gpu_num
`
echo
$seq
}
function
get_repo_name
(){
IFS
=
";"
cur_dir
=
$(
pwd
)
IFS
=
"/"
arr
=(
${
cur_dir
}
)
echo
${
arr
[-1]
}
}
FILENAME
=
$1
# copy FILENAME as new
new_filename
=
"./test_tipc/benchmark_train.txt"
cmd
=
`
yes
|cp
$FILENAME
$new_filename
`
FILENAME
=
$new_filename
# MODE must be one of ['benchmark_train']
MODE
=
$2
PARAMS
=
$3
to_static
=
""
# parse "to_static" options and modify trainer into "to_static_trainer"
if
[[
$PARAMS
=
~
"dynamicTostatic"
]]
;
then
to_static
=
"d2sT_"
sed
-i
's/trainer:norm_train/trainer:to_static_train/g'
$FILENAME
# clear PARAM contents
if
[
$PARAMS
=
"to_static"
]
;
then
PARAMS
=
""
fi
fi
# bash test_tipc/benchmark_train.sh test_tipc/configs/det_mv3_db_v2_0/train_benchmark.txt benchmark_train dynamic_bs8_fp32_DP_N1C8
# bash test_tipc/benchmark_train.sh test_tipc/configs/det_mv3_db_v2_0/train_benchmark.txt benchmark_train dynamicTostatic_bs8_fp32_DP_N1C8
# bash test_tipc/benchmark_train.sh test_tipc/configs/det_mv3_db_v2_0/train_benchmark.txt benchmark_train dynamic_bs8_null_DP_N1C1
IFS
=
$'
\n
'
# parser params from train_benchmark.txt
dataline
=
`
cat
$FILENAME
`
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
model_name
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
# set env
python
=
${
python_name
}
export
str_tmp
=
$(
echo
`
pip list|grep paddlepaddle-gpu|awk
-F
' '
'{print $2}'
`
)
export
frame_version
=
${
str_tmp
%%.post*
}
export
frame_commit
=
$(
echo
`
${
python
}
-c
"import paddle;print(paddle.version.commit)"
`
)
# 获取benchmark_params所在的行数
line_num
=
`
grep
-n
-w
"train_benchmark_params"
$FILENAME
|
cut
-d
":"
-f
1
`
# for train log parser
batch_size
=
$(
func_parser_value
"
${
lines
[line_num]
}
"
)
line_num
=
`
expr
$line_num
+ 1
`
fp_items
=
$(
func_parser_value
"
${
lines
[line_num]
}
"
)
line_num
=
`
expr
$line_num
+ 1
`
epoch
=
$(
func_parser_value
"
${
lines
[line_num]
}
"
)
line_num
=
`
expr
$line_num
+ 1
`
profile_option_key
=
$(
func_parser_key
"
${
lines
[line_num]
}
"
)
profile_option_params
=
$(
func_parser_value
"
${
lines
[line_num]
}
"
)
profile_option
=
"
${
profile_option_key
}
:
${
profile_option_params
}
"
line_num
=
`
expr
$line_num
+ 1
`
flags_value
=
$(
func_parser_value
"
${
lines
[line_num]
}
"
)
# set flags
IFS
=
";"
flags_list
=(
${
flags_value
}
)
for
_flag
in
${
flags_list
[*]
}
;
do
cmd
=
"export
${
_flag
}
"
eval
$cmd
done
# set log_name
repo_name
=
$(
get_repo_name
)
SAVE_LOG
=
${
BENCHMARK_LOG_DIR
:-
$(
pwd
)
}
# */benchmark_log
mkdir
-p
"
${
SAVE_LOG
}
/benchmark_log/"
status_log
=
"
${
SAVE_LOG
}
/benchmark_log/results.log"
# The number of lines in which train params can be replaced.
line_python
=
3
line_gpuid
=
4
line_precision
=
6
line_epoch
=
7
line_batchsize
=
9
line_profile
=
13
line_eval_py
=
24
line_export_py
=
30
func_sed_params
"
$FILENAME
"
"
${
line_eval_py
}
"
"null"
func_sed_params
"
$FILENAME
"
"
${
line_export_py
}
"
"null"
func_sed_params
"
$FILENAME
"
"
${
line_python
}
"
"
$python
"
# if params
if
[
!
-n
"
$PARAMS
"
]
;
then
# PARAMS input is not a word.
IFS
=
"|"
batch_size_list
=(
${
batch_size
}
)
fp_items_list
=(
${
fp_items
}
)
device_num_list
=(
N1C4
)
run_mode
=
"DP"
elif
[[
${
PARAMS
}
=
"dynamicTostatic"
]]
;
then
IFS
=
"|"
model_type
=
$PARAMS
batch_size_list
=(
${
batch_size
}
)
fp_items_list
=(
${
fp_items
}
)
device_num_list
=(
N1C4
)
run_mode
=
"DP"
else
# parser params from input: modeltype_bs${bs_item}_${fp_item}_${run_mode}_${device_num}
IFS
=
"_"
params_list
=(
${
PARAMS
}
)
model_type
=
${
params_list
[0]
}
batch_size
=
${
params_list
[1]
}
batch_size
=
`
echo
${
batch_size
}
|
tr
-cd
"[0-9]"
`
precision
=
${
params_list
[2]
}
run_mode
=
${
params_list
[3]
}
device_num
=
${
params_list
[4]
}
IFS
=
";"
if
[
${
precision
}
=
"fp16"
]
;
then
precision
=
"amp"
fi
epoch
=
$(
set_dynamic_epoch
$device_num
$epoch
)
fp_items_list
=(
$precision
)
batch_size_list
=(
$batch_size
)
device_num_list
=(
$device_num
)
fi
IFS
=
"|"
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
train_precision
in
${
fp_items_list
[*]
}
;
do
for
device_num
in
${
device_num_list
[*]
}
;
do
# sed batchsize and precision
if
[
${
train_precision
}
=
"amp"
]
;
then
precision
=
"fp16"
else
precision
=
"fp32"
fi
func_sed_params
"
$FILENAME
"
"
${
line_precision
}
"
"
$train_precision
"
func_sed_params
"
$FILENAME
"
"
${
line_batchsize
}
"
"
$MODE
=
$batch_size
"
func_sed_params
"
$FILENAME
"
"
${
line_epoch
}
"
"
$MODE
=
$epoch
"
gpu_id
=
$(
set_gpu_id
$device_num
)
if
[
${#
gpu_id
}
-le
1
]
;
then
log_path
=
"
$SAVE_LOG
/profiling_log"
mkdir
-p
$log_path
log_name
=
"
${
repo_name
}
_
${
model_name
}
_bs
${
batch_size
}
_
${
precision
}
_
${
run_mode
}
_
${
device_num
}
_
${
to_static
}
profiling"
func_sed_params
"
$FILENAME
"
"
${
line_gpuid
}
"
"0"
# sed used gpu_id
# set profile_option params
tmp
=
`
sed
-i
"
${
line_profile
}
s/.*/
${
profile_option
}
/"
"
${
FILENAME
}
"
`
# run test_train_inference_python.sh
cmd
=
"bash test_tipc/test_train_inference_python.sh
${
FILENAME
}
benchmark_train >
${
log_path
}
/
${
log_name
}
2>&1 "
echo
$cmd
eval
$cmd
eval
"cat
${
log_path
}
/
${
log_name
}
"
# without profile
log_path
=
"
$SAVE_LOG
/train_log"
speed_log_path
=
"
$SAVE_LOG
/index"
mkdir
-p
$log_path
mkdir
-p
$speed_log_path
log_name
=
"
${
repo_name
}
_
${
model_name
}
_bs
${
batch_size
}
_
${
precision
}
_
${
run_mode
}
_
${
device_num
}
_
${
to_static
}
log"
speed_log_name
=
"
${
repo_name
}
_
${
model_name
}
_bs
${
batch_size
}
_
${
precision
}
_
${
run_mode
}
_
${
device_num
}
_
${
to_static
}
speed"
func_sed_params
"
$FILENAME
"
"
${
line_profile
}
"
"null"
# sed profile_id as null
cmd
=
"bash test_tipc/test_train_inference_python.sh
${
FILENAME
}
benchmark_train >
${
log_path
}
/
${
log_name
}
2>&1 "
echo
$cmd
job_bt
=
`
date
'+%Y%m%d%H%M%S'
`
eval
$cmd
job_et
=
`
date
'+%Y%m%d%H%M%S'
`
export
model_run_time
=
$((${
job_et
}
-
${
job_bt
}))
eval
"cat
${
log_path
}
/
${
log_name
}
"
# parser log
_model_name
=
"
${
model_name
}
_bs
${
batch_size
}
_
${
precision
}
_
${
run_mode
}
"
cmd
=
"
${
python
}
${
BENCHMARK_ROOT
}
/scripts/analysis.py --filename
${
log_path
}
/
${
log_name
}
\
--speed_log_file '
${
speed_log_path
}
/
${
speed_log_name
}
'
\
--model_name
${
_model_name
}
\
--base_batch_size
${
batch_size
}
\
--run_mode
${
run_mode
}
\
--fp_item
${
precision
}
\
--keyword ips:
\
--skip_steps 2
\
--device_num
${
device_num
}
\
--speed_unit samples/s
\
--convergence_key loss: "
echo
$cmd
eval
$cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
else
IFS
=
";"
unset_env
=
`
unset
CUDA_VISIBLE_DEVICES
`
log_path
=
"
$SAVE_LOG
/train_log"
speed_log_path
=
"
$SAVE_LOG
/index"
mkdir
-p
$log_path
mkdir
-p
$speed_log_path
log_name
=
"
${
repo_name
}
_
${
model_name
}
_bs
${
batch_size
}
_
${
precision
}
_
${
run_mode
}
_
${
device_num
}
_
${
to_static
}
log"
speed_log_name
=
"
${
repo_name
}
_
${
model_name
}
_bs
${
batch_size
}
_
${
precision
}
_
${
run_mode
}
_
${
device_num
}
_
${
to_static
}
speed"
func_sed_params
"
$FILENAME
"
"
${
line_gpuid
}
"
"
$gpu_id
"
# sed used gpu_id
func_sed_params
"
$FILENAME
"
"
${
line_profile
}
"
"null"
# sed --profile_option as null
cmd
=
"bash test_tipc/test_train_inference_python.sh
${
FILENAME
}
benchmark_train >
${
log_path
}
/
${
log_name
}
2>&1 "
echo
$cmd
job_bt
=
`
date
'+%Y%m%d%H%M%S'
`
eval
$cmd
job_et
=
`
date
'+%Y%m%d%H%M%S'
`
export
model_run_time
=
$((${
job_et
}
-
${
job_bt
}))
eval
"cat
${
log_path
}
/
${
log_name
}
"
# parser log
_model_name
=
"
${
model_name
}
_bs
${
batch_size
}
_
${
precision
}
_
${
run_mode
}
"
cmd
=
"
${
python
}
${
BENCHMARK_ROOT
}
/scripts/analysis.py --filename
${
log_path
}
/
${
log_name
}
\
--speed_log_file '
${
speed_log_path
}
/
${
speed_log_name
}
'
\
--model_name
${
_model_name
}
\
--base_batch_size
${
batch_size
}
\
--run_mode
${
run_mode
}
\
--fp_item
${
precision
}
\
--keyword ips:
\
--skip_steps 2
\
--device_num
${
device_num
}
\
--speed_unit images/s
\
--convergence_key loss: "
echo
$cmd
eval
$cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
fi
done
done
done
benchmark/PaddleOCR_DBNet/test_tipc/common_func.sh
0 → 100644
View file @
4824c25b
#!/bin/bash
function
func_parser_key
(){
strs
=
$1
IFS
=
":"
array
=(
${
strs
}
)
tmp
=
${
array
[0]
}
echo
${
tmp
}
}
function
func_parser_value
(){
strs
=
$1
IFS
=
":"
array
=(
${
strs
}
)
tmp
=
${
array
[1]
}
echo
${
tmp
}
}
function
func_set_params
(){
key
=
$1
value
=
$2
if
[
${
key
}
x
=
"null"
x
]
;
then
echo
" "
elif
[[
${
value
}
=
"null"
]]
||
[[
${
value
}
=
" "
]]
||
[
${#
value
}
-le
0
]
;
then
echo
" "
else
echo
"
${
key
}
=
${
value
}
"
fi
}
function
func_parser_params
(){
strs
=
$1
MODE
=
$2
IFS
=
":"
array
=(
${
strs
}
)
key
=
${
array
[0]
}
tmp
=
${
array
[1]
}
IFS
=
"|"
res
=
""
for
_params
in
${
tmp
[*]
}
;
do
IFS
=
"="
array
=(
${
_params
}
)
mode
=
${
array
[0]
}
value
=
${
array
[1]
}
if
[[
${
mode
}
=
${
MODE
}
]]
;
then
IFS
=
"|"
#echo $(func_set_params "${mode}" "${value}")
echo
$value
break
fi
IFS
=
"|"
done
echo
${
res
}
}
function
status_check
(){
last_status
=
$1
# the exit code
run_command
=
$2
run_log
=
$3
model_name
=
$4
log_path
=
$5
if
[
$last_status
-eq
0
]
;
then
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
\0
33[0m"
|
tee
-a
${
run_log
}
else
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
\0
33[0m"
|
tee
-a
${
run_log
}
fi
}
\ No newline at end of file
benchmark/PaddleOCR_DBNet/test_tipc/configs/det_res50_db/train_infer_python.txt
0 → 100644
View file @
4824c25b
===========================train_params===========================
model_name:det_res50_db
python:python
gpu_list:0|0,1
trainer.use_gpu:True|True
amp:null
trainer.epochs:lite_train_lite_infer=1|whole_train_whole_infer=300
trainer.output_dir:./output/
dataset.train.loader.batch_size:lite_train_lite_infer=8|whole_train_lite_infer=8
trainer.finetune_checkpoint:null
train_model_name:checkpoint/model_latest.pth
train_infer_img_dir:imgs/paper/db.jpg
null:null
##
trainer:norm_train
norm_train:tools/train.py --config_file config/icdar2015_resnet50_FPN_DBhead_polyLR.yaml -o trainer.log_iter=1 trainer.enable_eval=False dataset.train.loader.shuffle=false arch.backbone.pretrained=False
quant_export:null
fpgm_export:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:null
null:null
##
===========================infer_params===========================
trainer.output_dir:./output/
trainer.resume_checkpoint:
norm_export:tools/export_model.py --config_file config/icdar2015_resnet50_FPN_DBhead_polyLR.yaml -o
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
##
train_model:./inference/det_r50_vd_db_v2.0_train/best_accuracy
infer_export:tools/export_model.py --config_file config/icdar2015_resnet50_FPN_DBhead_polyLR.yaml -o
infer_quant:False
inference:tools/infer.py
--use_gpu:True|False
--enable_mkldnn:False
--cpu_threads:6
--batch_size:1
--use_tensorrt:False
--precision:fp32
--model_dir:
--img_path:imgs/paper/db.jpg
--save_log_path:null
--benchmark:True
null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,640,640]}];[{float32,[3,960,960]}]
===========================train_benchmark_params==========================
batch_size:8
fp_items:fp32|fp16
epoch:2
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096
===========================to_static_train_benchmark_params===========================
to_static_train:trainer.to_static=true
benchmark/PaddleOCR_DBNet/test_tipc/prepare.sh
0 → 100644
View file @
4824c25b
#!/bin/bash
source
test_tipc/common_func.sh
FILENAME
=
$1
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',
# 'whole_infer', 'klquant_whole_infer',
# 'cpp_infer', 'serving_infer']
MODE
=
$2
dataline
=
$(
cat
${
FILENAME
}
)
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
# The training params
model_name
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
trainer_list
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
if
[
${
MODE
}
=
"lite_train_lite_infer"
]
;
then
python_name_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
array
=(
${
python_name_list
}
)
python_name
=
${
array
[0]
}
${
python_name
}
-m
pip
install
-r
requirement.txt
if
[[
${
model_name
}
=
~
"det_res50_db"
]]
;
then
wget
-nc
https://paddle-wheel.bj.bcebos.com/benchmark/resnet50-19c8e357.pth
-O
/root/.cache/torch/hub/checkpoints/resnet50-19c8e357.pth
# 下载数据集并解压
rm
-rf
datasets
wget
-nc
https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/benchmark_train/datasets.tar
tar
xf datasets.tar
fi
elif
[
${
MODE
}
=
"benchmark_train"
]
;
then
python_name_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
array
=(
${
python_name_list
}
)
python_name
=
${
array
[0]
}
${
python_name
}
-m
pip
install
-r
requirement.txt
if
[[
${
model_name
}
=
~
"det_res50_db"
]]
;
then
wget
-nc
https://paddle-wheel.bj.bcebos.com/benchmark/resnet50-19c8e357.pth
-O
/root/.cache/torch/hub/checkpoints/resnet50-19c8e357.pth
# 下载数据集并解压
rm
-rf
datasets
wget
-nc
https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/benchmark_train/datasets.tar
tar
xf datasets.tar
# expand gt.txt 2 times
# cd ./train_data/icdar2015/text_localization
# for i in `seq 2`;do cp train_icdar2015_label.txt dup$i.txt;done
# cat dup* > train_icdar2015_label.txt && rm -rf dup*
# cd ../../../
fi
fi
\ No newline at end of file
benchmark/PaddleOCR_DBNet/test_tipc/test_train_inference_python.sh
0 → 100644
View file @
4824c25b
#!/bin/bash
source
test_tipc/common_func.sh
FILENAME
=
$1
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer']
MODE
=
$2
dataline
=
$(
awk
'NR>=1{print}'
$FILENAME
)
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
# The training params
model_name
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
python
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
gpu_list
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
train_use_gpu_key
=
$(
func_parser_key
"
${
lines
[4]
}
"
)
train_use_gpu_value
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
autocast_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
autocast_key
=
$(
func_parser_key
"
${
lines
[5]
}
"
)
epoch_key
=
$(
func_parser_key
"
${
lines
[6]
}
"
)
epoch_num
=
$(
func_parser_params
"
${
lines
[6]
}
"
"
${
MODE
}
"
)
save_model_key
=
$(
func_parser_key
"
${
lines
[7]
}
"
)
train_batch_key
=
$(
func_parser_key
"
${
lines
[8]
}
"
)
train_batch_value
=
$(
func_parser_params
"
${
lines
[8]
}
"
"
${
MODE
}
"
)
pretrain_model_key
=
$(
func_parser_key
"
${
lines
[9]
}
"
)
pretrain_model_value
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
train_model_name
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
train_infer_img_dir
=
$(
func_parser_value
"
${
lines
[11]
}
"
)
train_param_key1
=
$(
func_parser_key
"
${
lines
[12]
}
"
)
train_param_value1
=
$(
func_parser_value
"
${
lines
[12]
}
"
)
trainer_list
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
trainer_norm
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
norm_trainer
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
pact_key
=
$(
func_parser_key
"
${
lines
[16]
}
"
)
pact_trainer
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
fpgm_key
=
$(
func_parser_key
"
${
lines
[17]
}
"
)
fpgm_trainer
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
distill_key
=
$(
func_parser_key
"
${
lines
[18]
}
"
)
distill_trainer
=
$(
func_parser_value
"
${
lines
[18]
}
"
)
trainer_key1
=
$(
func_parser_key
"
${
lines
[19]
}
"
)
trainer_value1
=
$(
func_parser_value
"
${
lines
[19]
}
"
)
trainer_key2
=
$(
func_parser_key
"
${
lines
[20]
}
"
)
trainer_value2
=
$(
func_parser_value
"
${
lines
[20]
}
"
)
eval_py
=
$(
func_parser_value
"
${
lines
[23]
}
"
)
eval_key1
=
$(
func_parser_key
"
${
lines
[24]
}
"
)
eval_value1
=
$(
func_parser_value
"
${
lines
[24]
}
"
)
save_infer_key
=
$(
func_parser_key
"
${
lines
[27]
}
"
)
export_weight
=
$(
func_parser_key
"
${
lines
[28]
}
"
)
norm_export
=
$(
func_parser_value
"
${
lines
[29]
}
"
)
pact_export
=
$(
func_parser_value
"
${
lines
[30]
}
"
)
fpgm_export
=
$(
func_parser_value
"
${
lines
[31]
}
"
)
distill_export
=
$(
func_parser_value
"
${
lines
[32]
}
"
)
export_key1
=
$(
func_parser_key
"
${
lines
[33]
}
"
)
export_value1
=
$(
func_parser_value
"
${
lines
[33]
}
"
)
export_key2
=
$(
func_parser_key
"
${
lines
[34]
}
"
)
export_value2
=
$(
func_parser_value
"
${
lines
[34]
}
"
)
inference_dir
=
$(
func_parser_value
"
${
lines
[35]
}
"
)
# parser inference model
infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[36]
}
"
)
infer_export_list
=
$(
func_parser_value
"
${
lines
[37]
}
"
)
infer_is_quant
=
$(
func_parser_value
"
${
lines
[38]
}
"
)
# parser inference
inference_py
=
$(
func_parser_value
"
${
lines
[39]
}
"
)
use_gpu_key
=
$(
func_parser_key
"
${
lines
[40]
}
"
)
use_gpu_list
=
$(
func_parser_value
"
${
lines
[40]
}
"
)
use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[41]
}
"
)
use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[41]
}
"
)
cpu_threads_key
=
$(
func_parser_key
"
${
lines
[42]
}
"
)
cpu_threads_list
=
$(
func_parser_value
"
${
lines
[42]
}
"
)
batch_size_key
=
$(
func_parser_key
"
${
lines
[43]
}
"
)
batch_size_list
=
$(
func_parser_value
"
${
lines
[43]
}
"
)
use_trt_key
=
$(
func_parser_key
"
${
lines
[44]
}
"
)
use_trt_list
=
$(
func_parser_value
"
${
lines
[44]
}
"
)
precision_key
=
$(
func_parser_key
"
${
lines
[45]
}
"
)
precision_list
=
$(
func_parser_value
"
${
lines
[45]
}
"
)
infer_model_key
=
$(
func_parser_key
"
${
lines
[46]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[47]
}
"
)
infer_img_dir
=
$(
func_parser_value
"
${
lines
[47]
}
"
)
save_log_key
=
$(
func_parser_key
"
${
lines
[48]
}
"
)
benchmark_key
=
$(
func_parser_key
"
${
lines
[49]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[49]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[50]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[50]
}
"
)
LOG_PATH
=
"./test_tipc/output/
${
model_name
}
/
${
MODE
}
"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_python.log"
line_num
=
`
grep
-n
-w
"to_static_train_benchmark_params"
$FILENAME
|
cut
-d
":"
-f
1
`
to_static_key
=
$(
func_parser_key
"
${
lines
[line_num]
}
"
)
to_static_trainer
=
$(
func_parser_value
"
${
lines
[line_num]
}
"
)
function
func_inference
(){
IFS
=
'|'
_python
=
$1
_script
=
$2
_model_dir
=
$3
_log_path
=
$4
_img_dir
=
$5
_flag_quant
=
$6
_gpu
=
$7
# inference
for
use_gpu
in
${
use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
for
use_mkldnn
in
${
use_mkldnn_list
[*]
}
;
do
# if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
# continue
# fi
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
precision
in
${
precision_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"False"
]
&&
[
${
precision
}
=
"fp16"
]
;
then
continue
fi
# skip when enable fp16 but disable mkldnn
if
[
${
_flag_quant
}
=
"True"
]
&&
[
${
precision
}
!=
"int8"
]
;
then
continue
fi
# skip when quant model inference but precision is not int8
set_precision
=
$(
func_set_params
"
${
precision_key
}
"
"
${
precision
}
"
)
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_gpus_
${
_gpu
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_mkldnn
=
$(
func_set_params
"
${
use_mkldnn_key
}
"
"
${
use_mkldnn
}
"
)
set_cpu_threads
=
$(
func_set_params
"
${
cpu_threads_key
}
"
"
${
threads
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_infer_params0
=
$(
func_set_params
"
${
save_log_key
}
"
"
${
save_log_value
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
set_mkldnn
}
${
set_cpu_threads
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_params0
}
${
set_infer_data
}
${
set_benchmark
}
${
set_precision
}
${
set_infer_params1
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
elif
[
${
use_gpu
}
=
"True"
]
||
[
${
use_gpu
}
=
"gpu"
]
;
then
for
use_trt
in
${
use_trt_list
[*]
}
;
do
for
precision
in
${
precision_list
[*]
}
;
do
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
fi
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
fi
if
[[
${
use_trt
}
=
"False"
&&
${
precision
}
=
~
"int8"
]]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
continue
fi
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/python_infer_gpu_gpus_
${
_gpu
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_tensorrt
=
$(
func_set_params
"
${
use_trt_key
}
"
"
${
use_trt
}
"
)
set_precision
=
$(
func_set_params
"
${
precision_key
}
"
"
${
precision
}
"
)
set_model_dir
=
$(
func_set_params
"
${
infer_model_key
}
"
"
${
_model_dir
}
"
)
set_infer_params0
=
$(
func_set_params
"
${
save_log_key
}
"
"
${
save_log_value
}
"
)
set_infer_params1
=
$(
func_set_params
"
${
infer_key1
}
"
"
${
infer_value1
}
"
)
command
=
"
${
_python
}
${
_script
}
${
use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_data
}
${
set_benchmark
}
${
set_infer_params1
}
${
set_infer_params0
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
else
echo
"Does not support hardware other than CPU and GPU Currently!"
fi
done
}
if
[
${
MODE
}
=
"whole_infer"
]
;
then
GPUID
=
$3
if
[
${#
GPUID
}
-le
0
]
;
then
env
=
" "
else
env
=
"export CUDA_VISIBLE_DEVICES=
${
GPUID
}
"
fi
# set CUDA_VISIBLE_DEVICES
eval
$env
export
Count
=
0
gpu
=
0
IFS
=
"|"
infer_run_exports
=(
${
infer_export_list
}
)
infer_quant_flag
=(
${
infer_is_quant
}
)
for
infer_model
in
${
infer_model_dir_list
[*]
}
;
do
# run export
if
[
${
infer_run_exports
[Count]
}
!=
"null"
]
;
then
save_infer_dir
=
"
${
infer_model
}
"
set_export_weight
=
$(
func_set_params
"
${
export_weight
}
"
"
${
infer_model
}
"
)
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_dir
}
"
)
export_log_path
=
"
${
LOG_PATH
}
_export_
${
Count
}
.log"
export_cmd
=
"
${
python
}
${
infer_run_exports
[Count]
}
${
set_export_weight
}
${
set_save_infer_key
}
>
${
export_log_path
}
2>&1 "
echo
${
infer_run_exports
[Count]
}
echo
$export_cmd
eval
$export_cmd
status_export
=
$?
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
else
save_infer_dir
=
${
infer_model
}
fi
#run inference
is_quant
=
${
infer_quant_flag
[Count]
}
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
"
${
gpu
}
"
Count
=
$((
$Count
+
1
))
done
else
IFS
=
"|"
export
Count
=
0
USE_GPU_KEY
=(
${
train_use_gpu_value
}
)
for
gpu
in
${
gpu_list
[*]
}
;
do
train_use_gpu
=
${
USE_GPU_KEY
[Count]
}
Count
=
$((
$Count
+
1
))
ips
=
""
if
[
${
gpu
}
=
"-1"
]
;
then
env
=
""
elif
[
${#
gpu
}
-le
1
]
;
then
env
=
"export CUDA_VISIBLE_DEVICES=
${
gpu
}
"
elif
[
${#
gpu
}
-le
15
]
;
then
IFS
=
","
array
=(
${
gpu
}
)
env
=
"export CUDA_VISIBLE_DEVICES=
${
array
[0]
}
"
IFS
=
"|"
else
IFS
=
";"
array
=(
${
gpu
}
)
ips
=
${
array
[0]
}
gpu
=
${
array
[1]
}
IFS
=
"|"
env
=
" "
fi
for
autocast
in
${
autocast_list
[*]
}
;
do
if
[
${
autocast
}
=
"amp"
]
;
then
set_amp_config
=
"amp.scale_loss=1024.0 amp.use_dynamic_loss_scaling=True amp.amp_level=O2"
else
set_amp_config
=
"amp=None"
fi
for
trainer
in
${
trainer_list
[*]
}
;
do
flag_quant
=
False
if
[
${
trainer
}
=
${
pact_key
}
]
;
then
run_train
=
${
pact_trainer
}
run_export
=
${
pact_export
}
flag_quant
=
True
elif
[
${
trainer
}
=
"
${
fpgm_key
}
"
]
;
then
run_train
=
${
fpgm_trainer
}
run_export
=
${
fpgm_export
}
elif
[
${
trainer
}
=
"
${
distill_key
}
"
]
;
then
run_train
=
${
distill_trainer
}
run_export
=
${
distill_export
}
elif
[
${
trainer
}
=
"
${
to_static_key
}
"
]
;
then
run_train
=
"
${
norm_trainer
}
${
to_static_trainer
}
"
run_export
=
${
norm_export
}
elif
[[
${
trainer
}
=
${
trainer_key2
}
]]
;
then
run_train
=
${
trainer_value2
}
run_export
=
${
export_value2
}
else
run_train
=
${
norm_trainer
}
run_export
=
${
norm_export
}
fi
if
[
${
run_train
}
=
"null"
]
;
then
continue
fi
set_epoch
=
$(
func_set_params
"
${
epoch_key
}
"
"
${
epoch_num
}
"
)
set_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
pretrain_model_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
train_batch_key
}
"
"
${
train_batch_value
}
"
)
set_train_params1
=
$(
func_set_params
"
${
train_param_key1
}
"
"
${
train_param_value1
}
"
)
set_use_gpu
=
$(
func_set_params
"
${
train_use_gpu_key
}
"
"
${
train_use_gpu
}
"
)
# if length of ips >= 15, then it is seen as multi-machine
# 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0
if
[
${#
ips
}
-le
15
]
;
then
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
"
nodes
=
1
else
IFS
=
","
ips_array
=(
${
ips
}
)
IFS
=
"|"
nodes
=
${#
ips_array
[@]
}
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
"
fi
set_save_model
=
$(
func_set_params
"
${
save_model_key
}
"
"
${
save_log
}
"
)
if
[
${#
gpu
}
-le
2
]
;
then
# train with cpu or single gpu
cmd
=
"
${
python
}
${
run_train
}
${
set_use_gpu
}
${
set_save_model
}
${
set_epoch
}
${
set_pretrain
}
${
set_batchsize
}
${
set_amp_config
}
${
set_train_params1
}
"
elif
[
${#
ips
}
-le
15
]
;
then
# train with multi-gpu
cmd
=
"
${
python
}
-m paddle.distributed.launch --gpus=
${
gpu
}
${
run_train
}
${
set_use_gpu
}
${
set_save_model
}
${
set_epoch
}
${
set_pretrain
}
${
set_batchsize
}
${
set_amp_config
}
${
set_train_params1
}
"
else
# train with multi-machine
cmd
=
"
${
python
}
-m paddle.distributed.launch --ips=
${
ips
}
--gpus=
${
gpu
}
${
run_train
}
${
set_use_gpu
}
${
set_save_model
}
${
set_pretrain
}
${
set_epoch
}
${
set_batchsize
}
${
set_amp_config
}
${
set_train_params1
}
"
fi
# run train
eval
$cmd
eval
"cat
${
save_log
}
/train.log >>
${
save_log
}
.log"
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
save_log
}
.log"
set_eval_pretrain
=
$(
func_set_params
"
${
pretrain_model_key
}
"
"
${
save_log
}
/
${
train_model_name
}
"
)
# run eval
if
[
${
eval_py
}
!=
"null"
]
;
then
eval
${
env
}
set_eval_params1
=
$(
func_set_params
"
${
eval_key1
}
"
"
${
eval_value1
}
"
)
eval_log_path
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
_eval.log"
eval_cmd
=
"
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
>
${
eval_log_path
}
2>&1 "
eval
$eval_cmd
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
eval_log_path
}
"
fi
# run export model
if
[
${
run_export
}
!=
"null"
]
;
then
# run export model
save_infer_path
=
"
${
save_log
}
"
export_log_path
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
_export.log"
set_export_weight
=
$(
func_set_params
"
${
export_weight
}
"
"
${
save_log
}
/
${
train_model_name
}
"
)
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
>
${
export_log_path
}
2>&1 "
eval
$export_cmd
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
#run inference
eval
$env
save_infer_path
=
"
${
save_log
}
"
if
[[
${
inference_dir
}
!=
"null"
]]
&&
[[
${
inference_dir
}
!=
'##'
]]
;
then
infer_model_dir
=
"
${
save_infer_path
}
/
${
inference_dir
}
"
else
infer_model_dir
=
${
save_infer_path
}
fi
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
infer_model_dir
}
"
"
${
LOG_PATH
}
"
"
${
train_infer_img_dir
}
"
"
${
flag_quant
}
"
"
${
gpu
}
"
eval
"unset CUDA_VISIBLE_DEVICES"
fi
done
# done with: for trainer in ${trainer_list[*]}; do
done
# done with: for autocast in ${autocast_list[*]}; do
done
# done with: for gpu in ${gpu_list[*]}; do
fi
# end if [ ${MODE} = "infer" ]; then
\ No newline at end of file
Prev
1
…
7
8
9
10
11
12
13
14
15
…
20
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment