Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
paddle_dbnet
Commits
d79dd99b
Unverified
Commit
d79dd99b
authored
Aug 19, 2020
by
MissPenguin
Committed by
GitHub
Aug 19, 2020
Browse files
Merge pull request #568 from wangjiawei04/pdserving_readme
pdserving add convert to serving model
parents
d7cd666a
0d8fe758
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
34 additions
and
7 deletions
+34
-7
deploy/pdserving/det_local_server.py
deploy/pdserving/det_local_server.py
+4
-2
deploy/pdserving/ocr_local_server.py
deploy/pdserving/ocr_local_server.py
+2
-1
deploy/pdserving/readme.md
deploy/pdserving/readme.md
+17
-0
deploy/pdserving/rec_local_server.py
deploy/pdserving/rec_local_server.py
+11
-4
No files found.
deploy/pdserving/det_local_server.py
View file @
d79dd99b
...
...
@@ -23,7 +23,7 @@ from paddle_serving_app.reader import Div, Normalize, Transpose
from
paddle_serving_app.reader
import
DBPostProcess
,
FilterBoxes
if
sys
.
argv
[
1
]
==
'gpu'
:
from
paddle_serving_server_gpu.web_service
import
WebService
elif
sys
.
argv
[
1
]
==
'cpu'
elif
sys
.
argv
[
1
]
==
'cpu'
:
from
paddle_serving_server.web_service
import
WebService
import
time
import
re
...
...
@@ -67,11 +67,13 @@ class OCRService(WebService):
ocr_service
=
OCRService
(
name
=
"ocr"
)
ocr_service
.
load_model_config
(
"ocr_det_model"
)
ocr_service
.
init_det
()
if
sys
.
argv
[
1
]
==
'gpu'
:
ocr_service
.
set_gpus
(
"0"
)
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
,
device
=
"gpu"
,
gpuid
=
0
)
ocr_service
.
run_debugger_service
(
gpu
=
True
)
elif
sys
.
argv
[
1
]
==
'cpu'
:
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
)
ocr_service
.
run_debugger_service
()
ocr_service
.
init_det
()
ocr_service
.
run_debugger_service
()
ocr_service
.
run_web_service
()
deploy/pdserving/ocr_local_server.py
View file @
d79dd99b
...
...
@@ -104,10 +104,11 @@ class OCRService(WebService):
ocr_service
=
OCRService
(
name
=
"ocr"
)
ocr_service
.
load_model_config
(
"ocr_rec_model"
)
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
)
ocr_service
.
init_det_debugger
(
det_model_config
=
"ocr_det_model"
)
if
sys
.
argv
[
1
]
==
'gpu'
:
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
,
device
=
"gpu"
,
gpuid
=
0
)
ocr_service
.
run_debugger_service
(
gpu
=
True
)
elif
sys
.
argv
[
1
]
==
'cpu'
:
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
,
device
=
"cpu"
)
ocr_service
.
run_debugger_service
()
ocr_service
.
run_web_service
()
deploy/pdserving/readme.md
View file @
d79dd99b
...
...
@@ -55,6 +55,23 @@ tar -xzvf ocr_det.tar.gz
```
执行上述命令会下载
`db_crnn_mobile`
的模型,如果想要下载规模更大的
`db_crnn_server`
模型,可以在下载预测模型并解压之后。参考
[
如何从Paddle保存的预测模型转为Paddle Serving格式可部署的模型
](
https://github.com/PaddlePaddle/Serving/blob/develop/doc/INFERENCE_TO_SERVING_CN.md
)
。
我们以
`ch_rec_r34_vd_crnn`
模型作为例子,下载链接在:
```
wget --no-check-certificate https://paddleocr.bj.bcebos.com/ch_models/ch_rec_r34_vd_crnn_infer.tar
tar xf ch_rec_r34_vd_crnn_infer.tar
```
因此我们按照Serving模型转换教程,运行下列python文件。
```
from paddle_serving_client.io import inference_model_to_serving
inference_model_dir = "ch_rec_r34_vd_crnn"
serving_client_dir = "serving_client_dir"
serving_server_dir = "serving_server_dir"
feed_var_names, fetch_var_names = inference_model_to_serving(
inference_model_dir, serving_client_dir, serving_server_dir, model_filename="model", params_filename="params")
```
最终会在
`serving_client_dir`
和
`serving_server_dir`
生成客户端和服务端的模型配置。
### 3. 启动服务
启动服务可以根据实际需求选择启动
`标准版`
或者
`快速版`
,两种方式的对比如下表:
...
...
deploy/pdserving/rec_local_server.py
View file @
d79dd99b
...
...
@@ -22,7 +22,10 @@ from paddle_serving_client import Client
from
paddle_serving_app.reader
import
Sequential
,
URL2Image
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
paddle_serving_app.reader
import
DBPostProcess
,
FilterBoxes
,
GetRotateCropImage
,
SortedBoxes
from
paddle_serving_server_gpu.web_service
import
WebService
if
sys
.
argv
[
1
]
==
'gpu'
:
from
paddle_serving_server_gpu.web_service
import
WebService
elif
sys
.
argv
[
1
]
==
'cpu'
:
from
paddle_serving_server.web_service
import
WebService
import
time
import
re
import
base64
...
...
@@ -65,8 +68,12 @@ class OCRService(WebService):
ocr_service
=
OCRService
(
name
=
"ocr"
)
ocr_service
.
load_model_config
(
"ocr_rec_model"
)
ocr_service
.
set_gpus
(
"0"
)
ocr_service
.
init_rec
()
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
,
device
=
"gpu"
,
gpuid
=
0
)
ocr_service
.
run_debugger_service
()
if
sys
.
argv
[
1
]
==
'gpu'
:
ocr_service
.
set_gpus
(
"0"
)
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
,
device
=
"gpu"
,
gpuid
=
0
)
ocr_service
.
run_debugger_service
(
gpu
=
True
)
elif
sys
.
argv
[
1
]
==
'cpu'
:
ocr_service
.
prepare_server
(
workdir
=
"workdir"
,
port
=
9292
,
device
=
"cpu"
)
ocr_service
.
run_debugger_service
()
ocr_service
.
run_web_service
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment