Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
efb1b412
Commit
efb1b412
authored
Aug 20, 2021
by
MissPenguin
Browse files
mv log to .h file
parent
3f7f8c37
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
95 additions
and
33 deletions
+95
-33
deploy/cpp_infer/include/autolog.h
deploy/cpp_infer/include/autolog.h
+83
-0
deploy/cpp_infer/src/main.cpp
deploy/cpp_infer/src/main.cpp
+12
-33
No files found.
deploy/cpp_infer/include/autolog.h
0 → 100644
View file @
efb1b412
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <stdlib.h>
#include <vector>
#include <glog/logging.h>
class
AutoLogger
{
public:
AutoLogger
(
std
::
string
model_name
,
bool
use_gpu
,
bool
enable_tensorrt
,
bool
enable_mkldnn
,
int
cpu_threads
,
int
batch_size
,
std
::
string
input_shape
,
std
::
string
model_precision
,
std
::
vector
<
double
>
time_info
,
int
img_num
)
{
this
->
model_name_
=
model_name
;
this
->
use_gpu_
=
use_gpu
;
this
->
enable_tensorrt_
=
enable_tensorrt
;
this
->
enable_mkldnn_
=
enable_mkldnn
;
this
->
cpu_threads_
=
cpu_threads
;
this
->
batch_size_
=
batch_size
;
this
->
input_shape_
=
input_shape
;
this
->
model_precision_
=
model_precision
;
this
->
time_info_
=
time_info
;
this
->
img_num_
=
img_num
;
}
void
report
()
{
LOG
(
INFO
)
<<
"----------------------- Config info -----------------------"
;
LOG
(
INFO
)
<<
"runtime_device: "
<<
(
this
->
use_gpu_
?
"gpu"
:
"cpu"
);
LOG
(
INFO
)
<<
"ir_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_memory_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_tensorrt: "
<<
this
->
enable_tensorrt_
;
LOG
(
INFO
)
<<
"enable_mkldnn: "
<<
(
this
->
enable_mkldnn_
?
"True"
:
"False"
);
LOG
(
INFO
)
<<
"cpu_math_library_num_threads: "
<<
this
->
cpu_threads_
;
LOG
(
INFO
)
<<
"----------------------- Data info -----------------------"
;
LOG
(
INFO
)
<<
"batch_size: "
<<
this
->
batch_size_
;
LOG
(
INFO
)
<<
"input_shape: "
<<
this
->
input_shape_
;
LOG
(
INFO
)
<<
"data_num: "
<<
this
->
img_num_
;
LOG
(
INFO
)
<<
"----------------------- Model info -----------------------"
;
LOG
(
INFO
)
<<
"model_name: "
<<
this
->
model_name_
;
LOG
(
INFO
)
<<
"precision: "
<<
this
->
model_precision_
;
LOG
(
INFO
)
<<
"----------------------- Perf info ------------------------"
;
LOG
(
INFO
)
<<
"Total time spent(ms): "
<<
std
::
accumulate
(
this
->
time_info_
.
begin
(),
this
->
time_info_
.
end
(),
0
);
LOG
(
INFO
)
<<
"preprocess_time(ms): "
<<
this
->
time_info_
[
0
]
/
this
->
img_num_
<<
", inference_time(ms): "
<<
this
->
time_info_
[
1
]
/
this
->
img_num_
<<
", postprocess_time(ms): "
<<
this
->
time_info_
[
2
]
/
this
->
img_num_
;
}
private:
std
::
string
model_name_
;
bool
use_gpu_
=
false
;
bool
enable_tensorrt_
=
false
;
bool
enable_mkldnn_
=
true
;
int
cpu_threads_
=
10
;
int
batch_size_
=
1
;
std
::
string
input_shape_
=
"dynamic"
;
std
::
string
model_precision_
=
"fp32"
;
std
::
vector
<
double
>
time_info_
;
int
img_num_
;
};
deploy/cpp_infer/src/main.cpp
View file @
efb1b412
...
@@ -35,6 +35,7 @@
...
@@ -35,6 +35,7 @@
#include <sys/stat.h>
#include <sys/stat.h>
#include <gflags/gflags.h>
#include <gflags/gflags.h>
#include <include/autolog.h>
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU."
);
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU."
);
DEFINE_int32
(
gpu_id
,
0
,
"Device id of GPU to execute."
);
DEFINE_int32
(
gpu_id
,
0
,
"Device id of GPU to execute."
);
...
@@ -69,34 +70,6 @@ using namespace cv;
...
@@ -69,34 +70,6 @@ using namespace cv;
using
namespace
PaddleOCR
;
using
namespace
PaddleOCR
;
void
PrintBenchmarkLog
(
std
::
string
model_name
,
int
batch_size
,
std
::
string
input_shape
,
std
::
vector
<
double
>
time_info
,
int
img_num
){
LOG
(
INFO
)
<<
"----------------------- Config info -----------------------"
;
LOG
(
INFO
)
<<
"runtime_device: "
<<
(
FLAGS_use_gpu
?
"gpu"
:
"cpu"
);
LOG
(
INFO
)
<<
"ir_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_memory_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_tensorrt: "
<<
FLAGS_use_tensorrt
;
LOG
(
INFO
)
<<
"enable_mkldnn: "
<<
(
FLAGS_enable_mkldnn
?
"True"
:
"False"
);
LOG
(
INFO
)
<<
"cpu_math_library_num_threads: "
<<
FLAGS_cpu_threads
;
LOG
(
INFO
)
<<
"----------------------- Data info -----------------------"
;
LOG
(
INFO
)
<<
"batch_size: "
<<
batch_size
;
LOG
(
INFO
)
<<
"input_shape: "
<<
input_shape
;
LOG
(
INFO
)
<<
"data_num: "
<<
img_num
;
LOG
(
INFO
)
<<
"----------------------- Model info -----------------------"
;
LOG
(
INFO
)
<<
"model_name: "
<<
model_name
;
LOG
(
INFO
)
<<
"precision: "
<<
FLAGS_precision
;
LOG
(
INFO
)
<<
"----------------------- Perf info ------------------------"
;
LOG
(
INFO
)
<<
"Total time spent(ms): "
<<
std
::
accumulate
(
time_info
.
begin
(),
time_info
.
end
(),
0
);
LOG
(
INFO
)
<<
"preprocess_time(ms): "
<<
time_info
[
0
]
/
img_num
<<
", inference_time(ms): "
<<
time_info
[
1
]
/
img_num
<<
", postprocess_time(ms): "
<<
time_info
[
2
]
/
img_num
;
}
static
bool
PathExists
(
const
std
::
string
&
path
){
static
bool
PathExists
(
const
std
::
string
&
path
){
#ifdef _WIN32
#ifdef _WIN32
struct
_stat
buffer
;
struct
_stat
buffer
;
...
@@ -136,7 +109,17 @@ int main_det(std::vector<cv::String> cv_all_img_names) {
...
@@ -136,7 +109,17 @@ int main_det(std::vector<cv::String> cv_all_img_names) {
}
}
if
(
FLAGS_benchmark
)
{
if
(
FLAGS_benchmark
)
{
PrintBenchmarkLog
(
"det"
,
1
,
"dynamic"
,
time_info
,
cv_all_img_names
.
size
());
AutoLogger
autolog
(
"ocr_det"
,
FLAGS_use_gpu
,
FLAGS_use_tensorrt
,
FLAGS_enable_mkldnn
,
FLAGS_cpu_threads
,
1
,
"dynamic"
,
FLAGS_precision
,
time_info
,
cv_all_img_names
.
size
());
autolog
.
report
();
}
}
return
0
;
return
0
;
}
}
...
@@ -166,10 +149,6 @@ int main_rec(std::vector<cv::String> cv_all_img_names) {
...
@@ -166,10 +149,6 @@ int main_rec(std::vector<cv::String> cv_all_img_names) {
time_info
[
2
]
+=
rec_times
[
2
];
time_info
[
2
]
+=
rec_times
[
2
];
}
}
if
(
FLAGS_benchmark
)
{
PrintBenchmarkLog
(
"rec"
,
1
,
"dynamic"
,
time_info
,
cv_all_img_names
.
size
());
}
return
0
;
return
0
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment