Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
31d48243
Commit
31d48243
authored
Nov 17, 2020
by
WenmuZhou
Browse files
添加方向分类器
parent
c8f7a683
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
189 additions
and
0 deletions
+189
-0
deploy/cpp_infer/include/ocr_cls.h
deploy/cpp_infer/include/ocr_cls.h
+81
-0
deploy/cpp_infer/src/ocr_cls.cpp
deploy/cpp_infer/src/ocr_cls.cpp
+108
-0
No files found.
deploy/cpp_infer/include/ocr_cls.h
0 → 100644
View file @
31d48243
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "paddle_api.h"
#include "paddle_inference_api.h"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>
#include <cstring>
#include <fstream>
#include <numeric>
#include <include/preprocess_op.h>
#include <include/utility.h>
namespace
PaddleOCR
{
class
Classifier
{
public:
explicit
Classifier
(
const
std
::
string
&
model_dir
,
const
bool
&
use_gpu
,
const
int
&
gpu_id
,
const
int
&
gpu_mem
,
const
int
&
cpu_math_library_num_threads
,
const
bool
&
use_mkldnn
,
const
bool
&
use_zero_copy_run
,
const
double
&
cls_thresh
)
{
this
->
use_gpu_
=
use_gpu
;
this
->
gpu_id_
=
gpu_id
;
this
->
gpu_mem_
=
gpu_mem
;
this
->
cpu_math_library_num_threads_
=
cpu_math_library_num_threads
;
this
->
use_mkldnn_
=
use_mkldnn
;
this
->
use_zero_copy_run_
=
use_zero_copy_run
;
this
->
cls_thresh
=
cls_thresh
;
LoadModel
(
model_dir
);
}
// Load Paddle inference model
void
LoadModel
(
const
std
::
string
&
model_dir
);
cv
::
Mat
Run
(
cv
::
Mat
&
img
);
private:
std
::
shared_ptr
<
PaddlePredictor
>
predictor_
;
bool
use_gpu_
=
false
;
int
gpu_id_
=
0
;
int
gpu_mem_
=
4000
;
int
cpu_math_library_num_threads_
=
4
;
bool
use_mkldnn_
=
false
;
bool
use_zero_copy_run_
=
false
;
double
cls_thresh
=
0.5
;
std
::
vector
<
float
>
mean_
=
{
0.5
f
,
0.5
f
,
0.5
f
};
std
::
vector
<
float
>
scale_
=
{
1
/
0.5
f
,
1
/
0.5
f
,
1
/
0.5
f
};
bool
is_scale_
=
true
;
// pre-process
ClsResizeImg
resize_op_
;
Normalize
normalize_op_
;
Permute
permute_op_
;
};
// class Classifier
}
// namespace PaddleOCR
deploy/cpp_infer/src/ocr_cls.cpp
0 → 100644
View file @
31d48243
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <include/ocr_cls.h>
namespace
PaddleOCR
{
cv
::
Mat
Classifier
::
Run
(
cv
::
Mat
&
img
)
{
cv
::
Mat
src_img
;
img
.
copyTo
(
src_img
);
cv
::
Mat
resize_img
;
std
::
vector
<
int
>
cls_image_shape
=
{
3
,
48
,
192
};
int
index
=
0
;
float
wh_ratio
=
float
(
img
.
cols
)
/
float
(
img
.
rows
);
this
->
resize_op_
.
Run
(
img
,
resize_img
,
cls_image_shape
);
this
->
normalize_op_
.
Run
(
&
resize_img
,
this
->
mean_
,
this
->
scale_
,
this
->
is_scale_
);
std
::
vector
<
float
>
input
(
1
*
3
*
resize_img
.
rows
*
resize_img
.
cols
,
0.0
f
);
this
->
permute_op_
.
Run
(
&
resize_img
,
input
.
data
());
// Inference.
if
(
this
->
use_zero_copy_run_
)
{
auto
input_names
=
this
->
predictor_
->
GetInputNames
();
auto
input_t
=
this
->
predictor_
->
GetInputTensor
(
input_names
[
0
]);
input_t
->
Reshape
({
1
,
3
,
resize_img
.
rows
,
resize_img
.
cols
});
input_t
->
copy_from_cpu
(
input
.
data
());
this
->
predictor_
->
ZeroCopyRun
();
}
else
{
paddle
::
PaddleTensor
input_t
;
input_t
.
shape
=
{
1
,
3
,
resize_img
.
rows
,
resize_img
.
cols
};
input_t
.
data
=
paddle
::
PaddleBuf
(
input
.
data
(),
input
.
size
()
*
sizeof
(
float
));
input_t
.
dtype
=
PaddleDType
::
FLOAT32
;
std
::
vector
<
paddle
::
PaddleTensor
>
outputs
;
this
->
predictor_
->
Run
({
input_t
},
&
outputs
,
1
);
}
std
::
vector
<
float
>
softmax_out
;
std
::
vector
<
int64_t
>
label_out
;
auto
output_names
=
this
->
predictor_
->
GetOutputNames
();
auto
softmax_out_t
=
this
->
predictor_
->
GetOutputTensor
(
output_names
[
0
]);
auto
softmax_shape_out
=
softmax_out_t
->
shape
();
int
softmax_out_num
=
std
::
accumulate
(
softmax_shape_out
.
begin
(),
softmax_shape_out
.
end
(),
1
,
std
::
multiplies
<
int
>
());
softmax_out
.
resize
(
softmax_out_num
);
softmax_out_t
->
copy_to_cpu
(
softmax_out
.
data
());
float
score
=
0
;
int
label
=
0
;
for
(
int
i
=
0
;
i
<
softmax_out_num
;
i
++
)
{
if
(
softmax_out
[
i
]
>
score
)
{
score
=
softmax_out
[
i
];
label
=
i
;
}
}
if
(
label
%
2
==
1
&&
score
>
this
->
cls_thresh
)
{
cv
::
rotate
(
src_img
,
src_img
,
1
);
}
return
src_img
;
}
void
Classifier
::
LoadModel
(
const
std
::
string
&
model_dir
)
{
AnalysisConfig
config
;
config
.
SetModel
(
model_dir
+
"/model"
,
model_dir
+
"/params"
);
if
(
this
->
use_gpu_
)
{
config
.
EnableUseGpu
(
this
->
gpu_mem_
,
this
->
gpu_id_
);
}
else
{
config
.
DisableGpu
();
if
(
this
->
use_mkldnn_
)
{
config
.
EnableMKLDNN
();
}
config
.
SetCpuMathLibraryNumThreads
(
this
->
cpu_math_library_num_threads_
);
}
// false for zero copy tensor
config
.
SwitchUseFeedFetchOps
(
!
this
->
use_zero_copy_run_
);
// true for multiple input
config
.
SwitchSpecifyInputNames
(
true
);
config
.
SwitchIrOptim
(
true
);
config
.
EnableMemoryOptim
();
config
.
DisableGlogInfo
();
this
->
predictor_
=
CreatePaddlePredictor
(
config
);
}
}
// namespace PaddleOCR
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment