diff --git a/.clang_format.hook b/.clang_format.hook new file mode 100644 index 0000000000000000000000000000000000000000..1d928216867c0ba3897d71542fea44debf8d72a0 --- /dev/null +++ b/.clang_format.hook @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +readonly VERSION="3.8" + +version=$(clang-format -version) + +if ! [[ $version == *"$VERSION"* ]]; then + echo "clang-format version check failed." + echo "a version contains '$VERSION' is needed, but get '$version'" + echo "you can install the right version, and make an soft-link to '\$PATH' env" + exit -1 +fi + +clang-format $@ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1a2dd675e961f1804fa58e2e2e49118536b84ce9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +.ipynb_checkpoints/ +*.py[cod] +*$py.class + +# C extensions +*.so + +inference/ +inference_results/ +output/ + +*.DS_Store +*.vs +*.user +*~ +*.vscode +*.idea + +*.log +.clang-format +.clang_format.hook diff --git a/README.md b/README.md index fa17693b69d64998232111b4d7931776164be042..1f0f172075bfd40ee9d4124764ced8ab9d155a9b 100644 --- a/README.md +++ b/README.md @@ -1,86 +1,129 @@ +[English](README_en.md) | 简体中文 ## 简介 PaddleOCR旨在打造一套丰富、领先、且实用的OCR工具库,助力使用者训练出更好的模型,并应用落地。 +**直播预告:2020年7月21日晚8点B站直播,PaddleOCR开源大礼包全面解读,直播地址当天更新** + +**近期更新** +- 2020.7.15 添加基于EasyEdge和Paddle-Lite的移动端DEMO,支持iOS和Android系统 +- 2020.7.15 完善预测部署,添加基于C++预测引擎推理、服务化部署和端侧部署方案,以及超轻量级中文OCR模型预测耗时Benchmark +- 2020.7.15 整理OCR相关数据集、常用数据标注以及合成工具 +- 2020.7.9 添加支持空格的识别模型,识别效果,预测及训练方式请参考快速开始和文本识别训练相关文档 +- 2020.7.9 添加数据增强、学习率衰减策略,具体参考[配置文件](./doc/doc_ch/config.md) +- [more](./doc/doc_ch/update.md) + + ## 特性 -- 超轻量级中文OCR,总模型仅8.6M +- 超轻量级中文OCR模型,总模型仅8.6M - 单模型支持中英文数字组合识别、竖排文本识别、长文本识别 - 检测模型DB(4.1M)+识别模型CRNN(4.5M) +- 实用通用中文OCR模型 +- 多种预测推理部署方案,包括服务部署和端侧部署 - 多种文本检测训练算法,EAST、DB - 多种文本识别训练算法,Rosetta、CRNN、STAR-Net、RARE +- 可运行于Linux、Windows、MacOS等多种系统 -## **超轻量级中文OCR体验** - - - -上图是超轻量级中文OCR模型效果展示,更多效果图请见文末[效果展示](#效果展示)。 - -#### 1.环境配置 - -请先参考[快速安装](./doc/installation.md)配置PaddleOCR运行环境。 - -#### 2.模型下载 +## 快速体验 -``` -# 下载inference模型文件包 -wget https://paddleocr.bj.bcebos.com/inference.tar -# inference模型文件包解压 -tar -xf inference.tar -``` +
+
+
+
+
+
+
+
+## REFERENCES
+```
+1. EAST:
+@inproceedings{zhou2017east,
+ title={EAST: an efficient and accurate scene text detector},
+ author={Zhou, Xinyu and Yao, Cong and Wen, He and Wang, Yuzhi and Zhou, Shuchang and He, Weiran and Liang, Jiajun},
+ booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition},
+ pages={5551--5560},
+ year={2017}
+}
+
+2. DB:
+@article{liao2019real,
+ title={Real-time Scene Text Detection with Differentiable Binarization},
+ author={Liao, Minghui and Wan, Zhaoyi and Yao, Cong and Chen, Kai and Bai, Xiang},
+ journal={arXiv preprint arXiv:1911.08947},
+ year={2019}
+}
+
+3. DTRB:
+@inproceedings{baek2019wrong,
+ title={What is wrong with scene text recognition model comparisons? dataset and model analysis},
+ author={Baek, Jeonghun and Kim, Geewook and Lee, Junyeop and Park, Sungrae and Han, Dongyoon and Yun, Sangdoo and Oh, Seong Joon and Lee, Hwalsuk},
+ booktitle={Proceedings of the IEEE International Conference on Computer Vision},
+ pages={4715--4723},
+ year={2019}
+}
+
+4. SAST:
+@inproceedings{wang2019single,
+ title={A Single-Shot Arbitrarily-Shaped Text Detector based on Context Attended Multi-Task Learning},
+ author={Wang, Pengfei and Zhang, Chengquan and Qi, Fei and Huang, Zuming and En, Mengyi and Han, Junyu and Liu, Jingtuo and Ding, Errui and Shi, Guangming},
+ booktitle={Proceedings of the 27th ACM International Conference on Multimedia},
+ pages={1277--1285},
+ year={2019}
+}
+
+5. SRN:
+@article{yu2020towards,
+ title={Towards Accurate Scene Text Recognition with Semantic Reasoning Networks},
+ author={Yu, Deli and Li, Xuan and Zhang, Chengquan and Han, Junyu and Liu, Jingtuo and Ding, Errui},
+ journal={arXiv preprint arXiv:2003.12294},
+ year={2020}
+}
+
+6. end2end-psl:
+@inproceedings{sun2019chinese,
+ title={Chinese Street View Text: Large-scale Chinese Text Reading with Partially Supervised Learning},
+ author={Sun, Yipeng and Liu, Jiaming and Liu, Wei and Han, Junyu and Ding, Errui and Liu, Jingtuo},
+ booktitle={Proceedings of the IEEE International Conference on Computer Vision},
+ pages={9086--9095},
+ year={2019}
+}
+```
+
+## LICENSE
+This project is released under Apache 2.0 license
+
+## CONTRIBUTION
+We welcome all the contributions to PaddleOCR and appreciate for your feedback very much.
+
+- Many thanks to [Khanh Tran](https://github.com/xxxpsyduck) for contributing the English documentation.
+- Many thanks to [zhangxin](https://github.com/ZhangXinNan) for contributing the new visualize function、add .gitgnore and discard set PYTHONPATH manually.
+- Many thanks to [lyl120117](https://github.com/lyl120117) for contributing the code for printing the network structure.
diff --git a/configs/det/det_mv3_db.yml b/configs/det/det_mv3_db.yml
index 37e5dc596ea5ff50150199d4e705527af28b2822..caa7bd4fa09752cff8b4d596e80b5729cce175bf 100755
--- a/configs/det/det_mv3_db.yml
+++ b/configs/det/det_mv3_db.yml
@@ -6,7 +6,8 @@ Global:
print_batch_step: 2
save_model_dir: ./output/det_db/
save_epoch_step: 200
- eval_batch_step: 5000
+ # evaluation is run every 5000 iterations after the 4000th iteration
+ eval_batch_step: [4000, 5000]
train_batch_size_per_card: 16
test_batch_size_per_card: 16
image_shape: [3, 640, 640]
@@ -50,4 +51,4 @@ PostProcess:
thresh: 0.3
box_thresh: 0.7
max_candidates: 1000
- unclip_ratio: 1.5
\ No newline at end of file
+ unclip_ratio: 2.0
diff --git a/configs/det/det_mv3_east.yml b/configs/det/det_mv3_east.yml
index b6f37256291912757cd1d5b98d1f745d08452fd6..67b82fffff8c47e5ee5866ad22f238ece3822776 100755
--- a/configs/det/det_mv3_east.yml
+++ b/configs/det/det_mv3_east.yml
@@ -6,7 +6,7 @@ Global:
print_batch_step: 5
save_model_dir: ./output/det_east/
save_epoch_step: 200
- eval_batch_step: 5000
+ eval_batch_step: [5000, 5000]
train_batch_size_per_card: 16
test_batch_size_per_card: 16
image_shape: [3, 512, 512]
diff --git a/configs/det/det_r50_vd_db.yml b/configs/det/det_r50_vd_db.yml
index 6e3b3b9e264b29fcac2b2b9b20ee2f88d5c975f3..9a3b77e7cebce99f669d0b1be89ee56c84f41034 100755
--- a/configs/det/det_r50_vd_db.yml
+++ b/configs/det/det_r50_vd_db.yml
@@ -6,7 +6,7 @@ Global:
print_batch_step: 2
save_model_dir: ./output/det_db/
save_epoch_step: 200
- eval_batch_step: 5000
+ eval_batch_step: [5000, 5000]
train_batch_size_per_card: 8
test_batch_size_per_card: 16
image_shape: [3, 640, 640]
diff --git a/configs/det/det_r50_vd_east.yml b/configs/det/det_r50_vd_east.yml
index bb16f9fa12424db293ba498e78b00f279f1a7ff6..8d86819937c902e47dded38ae0238fb8254d8ff0 100755
--- a/configs/det/det_r50_vd_east.yml
+++ b/configs/det/det_r50_vd_east.yml
@@ -6,7 +6,7 @@ Global:
print_batch_step: 5
save_model_dir: ./output/det_east/
save_epoch_step: 200
- eval_batch_step: 5000
+ eval_batch_step: [5000, 5000]
train_batch_size_per_card: 8
test_batch_size_per_card: 16
image_shape: [3, 512, 512]
diff --git a/configs/rec/rec_benchmark_reader.yml b/configs/rec/rec_benchmark_reader.yml
index 44cc0567906f6ed29f8a868ea9a4b45814d9efb0..524f2f68bac92ff6ffe3ff3b34e461d2adc81e41 100755
--- a/configs/rec/rec_benchmark_reader.yml
+++ b/configs/rec/rec_benchmark_reader.yml
@@ -10,4 +10,3 @@ EvalReader:
TestReader:
reader_function: ppocr.data.rec.dataset_traversal,LMDBReader
lmdb_sets_dir: ./train_data/data_lmdb_release/evaluation/
- infer_img: ./infer_img
\ No newline at end of file
diff --git a/configs/rec/rec_chinese_common_train.yml b/configs/rec/rec_chinese_common_train.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0d897459e0a631a4ac1fa10973f18e8640078c1b
--- /dev/null
+++ b/configs/rec/rec_chinese_common_train.yml
@@ -0,0 +1,45 @@
+Global:
+ algorithm: CRNN
+ use_gpu: true
+ epoch_num: 3000
+ log_smooth_window: 20
+ print_batch_step: 10
+ save_model_dir: ./output/rec_CRNN
+ save_epoch_step: 3
+ eval_batch_step: 2000
+ train_batch_size_per_card: 128
+ test_batch_size_per_card: 128
+ image_shape: [3, 32, 320]
+ max_text_length: 25
+ character_type: ch
+ character_dict_path: ./ppocr/utils/ppocr_keys_v1.txt
+ loss_type: ctc
+ distort: false
+ use_space_char: false
+ reader_yml: ./configs/rec/rec_chinese_reader.yml
+ pretrain_weights:
+ checkpoints:
+ save_inference_dir:
+ infer_img:
+
+Architecture:
+ function: ppocr.modeling.architectures.rec_model,RecModel
+
+Backbone:
+ function: ppocr.modeling.backbones.rec_resnet_vd,ResNet
+ layers: 34
+
+Head:
+ function: ppocr.modeling.heads.rec_ctc_head,CTCPredict
+ encoder_type: rnn
+ SeqRNN:
+ hidden_size: 256
+
+Loss:
+ function: ppocr.modeling.losses.rec_ctc_loss,CTCLoss
+
+Optimizer:
+ function: ppocr.optimizer,AdamDecay
+ base_lr: 0.0005
+ beta1: 0.9
+ beta2: 0.999
diff --git a/configs/rec/rec_chinese_lite_train.yml b/configs/rec/rec_chinese_lite_train.yml
index ec1b7a697d95d12f884fb8e1080a1493b9a30ad3..95a39a3b4d349973356594e15a23f951e27dc7c5 100755
--- a/configs/rec/rec_chinese_lite_train.yml
+++ b/configs/rec/rec_chinese_lite_train.yml
@@ -14,10 +14,14 @@ Global:
character_type: ch
character_dict_path: ./ppocr/utils/ppocr_keys_v1.txt
loss_type: ctc
+ distort: false
+ use_space_char: false
reader_yml: ./configs/rec/rec_chinese_reader.yml
- pretrain_weights:
+ pretrain_weights:
checkpoints:
save_inference_dir:
+ infer_img:
+
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/configs/rec/rec_chinese_reader.yml b/configs/rec/rec_chinese_reader.yml
index f09a1ea72e6d929d0446fbbf51ca218e52ae5b3e..a44efd9911d4595cc519b660e868aa9a1e0f144b 100755
--- a/configs/rec/rec_chinese_reader.yml
+++ b/configs/rec/rec_chinese_reader.yml
@@ -11,4 +11,3 @@ EvalReader:
TestReader:
reader_function: ppocr.data.rec.dataset_traversal,SimpleReader
- infer_img: ./infer_img
diff --git a/configs/rec/rec_icdar15_reader.yml b/configs/rec/rec_icdar15_reader.yml
index 12facda1a2fd720765ccf8b39e21cff3a4d31129..322d5f25e0ef0fab167c0c39b38fa488a5546f1b 100755
--- a/configs/rec/rec_icdar15_reader.yml
+++ b/configs/rec/rec_icdar15_reader.yml
@@ -11,4 +11,3 @@ EvalReader:
TestReader:
reader_function: ppocr.data.rec.dataset_traversal,SimpleReader
- infer_img: ./infer_img
diff --git a/configs/rec/rec_icdar15_train.yml b/configs/rec/rec_icdar15_train.yml
index 6596fc339398af20a9c9ce74a264e24c0a0bdd35..98a38e7477f725c605c0cf017b6a7a4b469f7f3b 100755
--- a/configs/rec/rec_icdar15_train.yml
+++ b/configs/rec/rec_icdar15_train.yml
@@ -13,10 +13,14 @@ Global:
max_text_length: 25
character_type: en
loss_type: ctc
+ distort: true
+ debug: false
reader_yml: ./configs/rec/rec_icdar15_reader.yml
- pretrain_weights: ./pretrain_models/rec_mv3_none_bilstm_ctc/best_accuracy
+ pretrain_weights: ./pretrain_models/rec_mv3_none_bilstm_ctc/best_accuracy
checkpoints:
save_inference_dir:
+ infer_img:
+
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
@@ -39,3 +43,7 @@ Optimizer:
base_lr: 0.0005
beta1: 0.9
beta2: 0.999
+ decay:
+ function: cosine_decay
+ step_each_epoch: 20
+ total_epoch: 1000
diff --git a/configs/rec/rec_mv3_none_bilstm_ctc.yml b/configs/rec/rec_mv3_none_bilstm_ctc.yml
index 11a09ee927492154c46f82add1bcfae7c2bb787e..d2e096fb1c51588a6bd2c7ca8321cf817d435f23 100755
--- a/configs/rec/rec_mv3_none_bilstm_ctc.yml
+++ b/configs/rec/rec_mv3_none_bilstm_ctc.yml
@@ -17,6 +17,7 @@ Global:
pretrain_weights:
checkpoints:
save_inference_dir:
+ infer_img:
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/configs/rec/rec_mv3_none_none_ctc.yml b/configs/rec/rec_mv3_none_none_ctc.yml
index bbbb6d1fabacbebaf1481260f34ef0e2cfed97f6..ceec09ce6f3b6cb2238d6fb2e15f510cb31e0fd8 100755
--- a/configs/rec/rec_mv3_none_none_ctc.yml
+++ b/configs/rec/rec_mv3_none_none_ctc.yml
@@ -17,6 +17,7 @@ Global:
pretrain_weights:
checkpoints:
save_inference_dir:
+ infer_img:
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/configs/rec/rec_mv3_tps_bilstm_attn.yml b/configs/rec/rec_mv3_tps_bilstm_attn.yml
index 03a2e901b4997a5cec0e01756b69b1fa0d04511b..7fc4f6799459bf9fbcd25e1609aeca5e3fd12a74 100755
--- a/configs/rec/rec_mv3_tps_bilstm_attn.yml
+++ b/configs/rec/rec_mv3_tps_bilstm_attn.yml
@@ -13,11 +13,14 @@ Global:
max_text_length: 25
character_type: en
loss_type: attention
+ tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights:
checkpoints:
save_inference_dir:
-
+ infer_img:
+
+
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/configs/rec/rec_mv3_tps_bilstm_ctc.yml b/configs/rec/rec_mv3_tps_bilstm_ctc.yml
index 47247b723a0cb3a145d6e87a3d76b1a8dcf1ea21..4b9660bcdec60989a6d9b9926c40814a83db6f39 100755
--- a/configs/rec/rec_mv3_tps_bilstm_ctc.yml
+++ b/configs/rec/rec_mv3_tps_bilstm_ctc.yml
@@ -13,10 +13,12 @@ Global:
max_text_length: 25
character_type: en
loss_type: ctc
+ tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights:
checkpoints:
save_inference_dir:
+ infer_img:
Architecture:
diff --git a/configs/rec/rec_r34_vd_none_bilstm_ctc.yml b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml
index 1018193611855dd22ad54fb8fbc70b7f47d89c33..b71e8feae7ac8f235bf471101efd4383c61bfab2 100755
--- a/configs/rec/rec_r34_vd_none_bilstm_ctc.yml
+++ b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml
@@ -17,7 +17,9 @@ Global:
pretrain_weights:
checkpoints:
save_inference_dir:
-
+ infer_img:
+
+
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/configs/rec/rec_r34_vd_none_none_ctc.yml b/configs/rec/rec_r34_vd_none_none_ctc.yml
index ff4c57634aa12e6bbd88905a038260c75489d8f3..d9c9458d6d8fcdb9df590b0093d54b71e3e53fcc 100755
--- a/configs/rec/rec_r34_vd_none_none_ctc.yml
+++ b/configs/rec/rec_r34_vd_none_none_ctc.yml
@@ -17,6 +17,7 @@ Global:
pretrain_weights:
checkpoints:
save_inference_dir:
+ infer_img:
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/configs/rec/rec_r34_vd_tps_bilstm_attn.yml b/configs/rec/rec_r34_vd_tps_bilstm_attn.yml
index 4d96e9e72927e3822137bf95e89164cc33b41db7..dfcd97fad67081a7ed04ad1d67ff298c9f553b0c 100755
--- a/configs/rec/rec_r34_vd_tps_bilstm_attn.yml
+++ b/configs/rec/rec_r34_vd_tps_bilstm_attn.yml
@@ -13,10 +13,13 @@ Global:
max_text_length: 25
character_type: en
loss_type: attention
+ tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights:
checkpoints:
save_inference_dir:
+ infer_img:
+
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml
index 844721a2e44019382682e76d4f3f40954eaebc6b..574a088cc024541e086bedc0bc0a52082e9e7eb2 100755
--- a/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml
+++ b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml
@@ -13,10 +13,13 @@ Global:
max_text_length: 25
character_type: en
loss_type: ctc
+ tps: true
reader_yml: ./configs/rec/rec_benchmark_reader.yml
pretrain_weights:
checkpoints:
save_inference_dir:
+ infer_img:
+
Architecture:
function: ppocr.modeling.architectures.rec_model,RecModel
diff --git a/deploy/android_demo/.gitignore b/deploy/android_demo/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..93dcb29353fb4c4f92e35aeb34b76910ead7519c
--- /dev/null
+++ b/deploy/android_demo/.gitignore
@@ -0,0 +1,9 @@
+*.iml
+.gradle
+/local.properties
+/.idea/*
+.DS_Store
+/build
+/captures
+.externalNativeBuild
+
diff --git a/deploy/android_demo/README.md b/deploy/android_demo/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4d85dee99ab3616594b4ff3a17acb97a6267b12d
--- /dev/null
+++ b/deploy/android_demo/README.md
@@ -0,0 +1,19 @@
+# 如何快速测试
+### 1. 安装最新版本的Android Studio
+可以从https://developer.android.com/studio下载。本Demo使用是4.0版本Android Studio编写。
+
+### 2. 按照NDK 20 以上版本
+Demo测试的时候使用的是NDK 20b版本,20版本以上均可以支持编译成功。
+
+如果您是初学者,可以用以下方式安装和测试NDK编译环境。
+点击 File -> New ->New Project, 新建 "Native C++" project
+
+### 3. 导入项目
+点击 File->New->Import Project..., 然后跟着Android Studio的引导导入
+
+
+# 获得更多支持
+前往[端计算模型生成平台EasyEdge](https://ai.baidu.com/easyedge/app/open_source_demo?referrerUrl=paddlelite),获得更多开发支持:
+
+- Demo APP:可使用手机扫码安装,方便手机端快速体验文字识别
+- SDK:模型被封装为适配不同芯片硬件和操作系统SDK,包括完善的接口,方便进行二次开发
diff --git a/deploy/android_demo/app/.gitignore b/deploy/android_demo/app/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..796b96d1c402326528b4ba3c12ee9d92d0e212e9
--- /dev/null
+++ b/deploy/android_demo/app/.gitignore
@@ -0,0 +1 @@
+/build
diff --git a/deploy/android_demo/app/build.gradle b/deploy/android_demo/app/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..adf3968b40960b50bc62a7ba669ce28346afa362
--- /dev/null
+++ b/deploy/android_demo/app/build.gradle
@@ -0,0 +1,95 @@
+import java.security.MessageDigest
+
+apply plugin: 'com.android.application'
+
+android {
+ compileSdkVersion 28
+ defaultConfig {
+ applicationId "com.baidu.paddle.lite.demo.ocr"
+ minSdkVersion 15
+ targetSdkVersion 28
+ versionCode 1
+ versionName "1.0"
+ testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
+ externalNativeBuild {
+ cmake {
+ cppFlags "-std=c++11 -frtti -fexceptions -Wno-format"
+ arguments '-DANDROID_PLATFORM=android-23', '-DANDROID_STL=c++_shared' ,"-DANDROID_ARM_NEON=TRUE"
+ }
+ }
+ ndk {
+ // abiFilters "arm64-v8a", "armeabi-v7a"
+ abiFilters "arm64-v8a", "armeabi-v7a"
+ ldLibs "jnigraphics"
+ }
+ }
+ buildTypes {
+ release {
+ minifyEnabled false
+ proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
+ }
+ }
+ externalNativeBuild {
+ cmake {
+ path "src/main/cpp/CMakeLists.txt"
+ version "3.10.2"
+ }
+ }
+}
+
+dependencies {
+ implementation fileTree(include: ['*.jar'], dir: 'libs')
+ implementation 'com.android.support:appcompat-v7:28.0.0'
+ implementation 'com.android.support.constraint:constraint-layout:1.1.3'
+ implementation 'com.android.support:design:28.0.0'
+ testImplementation 'junit:junit:4.12'
+ androidTestImplementation 'com.android.support.test:runner:1.0.2'
+ androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'
+}
+
+def archives = [
+ [
+ 'src' : 'https://paddlelite-demo.bj.bcebos.com/libs/android/paddle_lite_libs_v2_6_1.tar.gz',
+ 'dest': 'PaddleLite'
+ ],
+ [
+ 'src' : 'https://paddlelite-demo.bj.bcebos.com/libs/android/opencv-4.2.0-android-sdk.tar.gz',
+ 'dest': 'OpenCV'
+ ],
+ [
+ 'src' : 'https://paddleocr.bj.bcebos.com/deploy/lite/ocr_v1_for_cpu.tar.gz',
+ 'dest' : 'src/main/assets/models/ocr_v1_for_cpu'
+ ]
+]
+
+task downloadAndExtractArchives(type: DefaultTask) {
+ doFirst {
+ println "Downloading and extracting archives including libs and models"
+ }
+ doLast {
+ // Prepare cache folder for archives
+ String cachePath = "cache"
+ if (!file("${cachePath}").exists()) {
+ mkdir "${cachePath}"
+ }
+ archives.eachWithIndex { archive, index ->
+ MessageDigest messageDigest = MessageDigest.getInstance('MD5')
+ messageDigest.update(archive.src.bytes)
+ String cacheName = new BigInteger(1, messageDigest.digest()).toString(32)
+ // Download the target archive if not exists
+ boolean copyFiles = !file("${archive.dest}").exists()
+ if (!file("${cachePath}/${cacheName}.tar.gz").exists()) {
+ ant.get(src: archive.src, dest: file("${cachePath}/${cacheName}.tar.gz"))
+ copyFiles = true; // force to copy files from the latest archive files
+ }
+ // Extract the target archive if its dest path does not exists
+ if (copyFiles) {
+ copy {
+ from tarTree("${cachePath}/${cacheName}.tar.gz")
+ into "${archive.dest}"
+ }
+ }
+ }
+ }
+}
+preBuild.dependsOn downloadAndExtractArchives
\ No newline at end of file
diff --git a/deploy/android_demo/app/proguard-rules.pro b/deploy/android_demo/app/proguard-rules.pro
new file mode 100644
index 0000000000000000000000000000000000000000..f1b424510da51fd82143bc74a0a801ae5a1e2fcd
--- /dev/null
+++ b/deploy/android_demo/app/proguard-rules.pro
@@ -0,0 +1,21 @@
+# Add project specific ProGuard rules here.
+# You can control the set of applied configuration files using the
+# proguardFiles setting in build.gradle.
+#
+# For more details, see
+# http://developer.android.com/guide/developing/tools/proguard.html
+
+# If your project uses WebView with JS, uncomment the following
+# and specify the fully qualified class name to the JavaScript interface
+# class:
+#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
+# public *;
+#}
+
+# Uncomment this to preserve the line number information for
+# debugging stack traces.
+#-keepattributes SourceFile,LineNumberTable
+
+# If you keep the line number information, uncomment this to
+# hide the original source file name.
+#-renamesourcefileattribute SourceFile
diff --git a/deploy/android_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ocr/ExampleInstrumentedTest.java b/deploy/android_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ocr/ExampleInstrumentedTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..77b179da469ed685729f261296ee956c64f26905
--- /dev/null
+++ b/deploy/android_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ocr/ExampleInstrumentedTest.java
@@ -0,0 +1,26 @@
+package com.baidu.paddle.lite.demo.ocr;
+
+import android.content.Context;
+import android.support.test.InstrumentationRegistry;
+import android.support.test.runner.AndroidJUnit4;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import static org.junit.Assert.*;
+
+/**
+ * Instrumented test, which will execute on an Android device.
+ *
+ * @see Testing documentation
+ */
+@RunWith(AndroidJUnit4.class)
+public class ExampleInstrumentedTest {
+ @Test
+ public void useAppContext() {
+ // Context of the app under test.
+ Context appContext = InstrumentationRegistry.getTargetContext();
+
+ assertEquals("com.baidu.paddle.lite.demo", appContext.getPackageName());
+ }
+}
diff --git a/deploy/android_demo/app/src/main/AndroidManifest.xml b/deploy/android_demo/app/src/main/AndroidManifest.xml
new file mode 100644
index 0000000000000000000000000000000000000000..ff1900d637a827998c4da52b9a2dda51b8ae89c8
--- /dev/null
+++ b/deploy/android_demo/app/src/main/AndroidManifest.xml
@@ -0,0 +1,30 @@
+
+
+ * This technique can be used with an {@link android.app.Activity} class, not just
+ * {@link PreferenceActivity}.
+ */
+public abstract class AppCompatPreferenceActivity extends PreferenceActivity {
+ private AppCompatDelegate mDelegate;
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ getDelegate().installViewFactory();
+ getDelegate().onCreate(savedInstanceState);
+ super.onCreate(savedInstanceState);
+ }
+
+ @Override
+ protected void onPostCreate(Bundle savedInstanceState) {
+ super.onPostCreate(savedInstanceState);
+ getDelegate().onPostCreate(savedInstanceState);
+ }
+
+ public ActionBar getSupportActionBar() {
+ return getDelegate().getSupportActionBar();
+ }
+
+ public void setSupportActionBar(@Nullable Toolbar toolbar) {
+ getDelegate().setSupportActionBar(toolbar);
+ }
+
+ @Override
+ public MenuInflater getMenuInflater() {
+ return getDelegate().getMenuInflater();
+ }
+
+ @Override
+ public void setContentView(@LayoutRes int layoutResID) {
+ getDelegate().setContentView(layoutResID);
+ }
+
+ @Override
+ public void setContentView(View view) {
+ getDelegate().setContentView(view);
+ }
+
+ @Override
+ public void setContentView(View view, ViewGroup.LayoutParams params) {
+ getDelegate().setContentView(view, params);
+ }
+
+ @Override
+ public void addContentView(View view, ViewGroup.LayoutParams params) {
+ getDelegate().addContentView(view, params);
+ }
+
+ @Override
+ protected void onPostResume() {
+ super.onPostResume();
+ getDelegate().onPostResume();
+ }
+
+ @Override
+ protected void onTitleChanged(CharSequence title, int color) {
+ super.onTitleChanged(title, color);
+ getDelegate().setTitle(title);
+ }
+
+ @Override
+ public void onConfigurationChanged(Configuration newConfig) {
+ super.onConfigurationChanged(newConfig);
+ getDelegate().onConfigurationChanged(newConfig);
+ }
+
+ @Override
+ protected void onStop() {
+ super.onStop();
+ getDelegate().onStop();
+ }
+
+ @Override
+ protected void onDestroy() {
+ super.onDestroy();
+ getDelegate().onDestroy();
+ }
+
+ public void invalidateOptionsMenu() {
+ getDelegate().invalidateOptionsMenu();
+ }
+
+ private AppCompatDelegate getDelegate() {
+ if (mDelegate == null) {
+ mDelegate = AppCompatDelegate.create(this, null);
+ }
+ return mDelegate;
+ }
+}
diff --git a/deploy/android_demo/app/src/main/java/com/baidu/paddle/lite/demo/ocr/MainActivity.java b/deploy/android_demo/app/src/main/java/com/baidu/paddle/lite/demo/ocr/MainActivity.java
new file mode 100644
index 0000000000000000000000000000000000000000..b72d72df47a3c6d769559230185c50823276fe85
--- /dev/null
+++ b/deploy/android_demo/app/src/main/java/com/baidu/paddle/lite/demo/ocr/MainActivity.java
@@ -0,0 +1,414 @@
+package com.baidu.paddle.lite.demo.ocr;
+
+import android.Manifest;
+import android.app.ProgressDialog;
+import android.content.ContentResolver;
+import android.content.Intent;
+import android.content.SharedPreferences;
+import android.content.pm.PackageManager;
+import android.database.Cursor;
+import android.graphics.Bitmap;
+import android.graphics.BitmapFactory;
+import android.net.Uri;
+import android.os.Bundle;
+import android.os.Handler;
+import android.os.HandlerThread;
+import android.os.Message;
+import android.preference.PreferenceManager;
+import android.provider.MediaStore;
+import android.support.annotation.NonNull;
+import android.support.v4.app.ActivityCompat;
+import android.support.v4.content.ContextCompat;
+import android.support.v7.app.AppCompatActivity;
+import android.text.method.ScrollingMovementMethod;
+import android.util.Log;
+import android.view.Menu;
+import android.view.MenuInflater;
+import android.view.MenuItem;
+import android.widget.ImageView;
+import android.widget.TextView;
+import android.widget.Toast;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+
+public class MainActivity extends AppCompatActivity {
+ private static final String TAG = MainActivity.class.getSimpleName();
+ public static final int OPEN_GALLERY_REQUEST_CODE = 0;
+ public static final int TAKE_PHOTO_REQUEST_CODE = 1;
+
+ public static final int REQUEST_LOAD_MODEL = 0;
+ public static final int REQUEST_RUN_MODEL = 1;
+ public static final int RESPONSE_LOAD_MODEL_SUCCESSED = 0;
+ public static final int RESPONSE_LOAD_MODEL_FAILED = 1;
+ public static final int RESPONSE_RUN_MODEL_SUCCESSED = 2;
+ public static final int RESPONSE_RUN_MODEL_FAILED = 3;
+
+ protected ProgressDialog pbLoadModel = null;
+ protected ProgressDialog pbRunModel = null;
+
+ protected Handler receiver = null; // Receive messages from worker thread
+ protected Handler sender = null; // Send command to worker thread
+ protected HandlerThread worker = null; // Worker thread to load&run model
+
+ // UI components of object detection
+ protected TextView tvInputSetting;
+ protected ImageView ivInputImage;
+ protected TextView tvOutputResult;
+ protected TextView tvInferenceTime;
+
+ // Model settings of object detection
+ protected String modelPath = "";
+ protected String labelPath = "";
+ protected String imagePath = "";
+ protected int cpuThreadNum = 1;
+ protected String cpuPowerMode = "";
+ protected String inputColorFormat = "";
+ protected long[] inputShape = new long[]{};
+ protected float[] inputMean = new float[]{};
+ protected float[] inputStd = new float[]{};
+ protected float scoreThreshold = 0.1f;
+
+ protected Predictor predictor = new Predictor();
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ setContentView(R.layout.activity_main);
+
+ // Clear all setting items to avoid app crashing due to the incorrect settings
+ SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(this);
+ SharedPreferences.Editor editor = sharedPreferences.edit();
+ editor.clear();
+ editor.commit();
+
+ // Prepare the worker thread for mode loading and inference
+ receiver = new Handler() {
+ @Override
+ public void handleMessage(Message msg) {
+ switch (msg.what) {
+ case RESPONSE_LOAD_MODEL_SUCCESSED:
+ pbLoadModel.dismiss();
+ onLoadModelSuccessed();
+ break;
+ case RESPONSE_LOAD_MODEL_FAILED:
+ pbLoadModel.dismiss();
+ Toast.makeText(MainActivity.this, "Load model failed!", Toast.LENGTH_SHORT).show();
+ onLoadModelFailed();
+ break;
+ case RESPONSE_RUN_MODEL_SUCCESSED:
+ pbRunModel.dismiss();
+ onRunModelSuccessed();
+ break;
+ case RESPONSE_RUN_MODEL_FAILED:
+ pbRunModel.dismiss();
+ Toast.makeText(MainActivity.this, "Run model failed!", Toast.LENGTH_SHORT).show();
+ onRunModelFailed();
+ break;
+ default:
+ break;
+ }
+ }
+ };
+
+ worker = new HandlerThread("Predictor Worker");
+ worker.start();
+ sender = new Handler(worker.getLooper()) {
+ public void handleMessage(Message msg) {
+ switch (msg.what) {
+ case REQUEST_LOAD_MODEL:
+ // Load model and reload test image
+ if (onLoadModel()) {
+ receiver.sendEmptyMessage(RESPONSE_LOAD_MODEL_SUCCESSED);
+ } else {
+ receiver.sendEmptyMessage(RESPONSE_LOAD_MODEL_FAILED);
+ }
+ break;
+ case REQUEST_RUN_MODEL:
+ // Run model if model is loaded
+ if (onRunModel()) {
+ receiver.sendEmptyMessage(RESPONSE_RUN_MODEL_SUCCESSED);
+ } else {
+ receiver.sendEmptyMessage(RESPONSE_RUN_MODEL_FAILED);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ };
+
+ // Setup the UI components
+ tvInputSetting = findViewById(R.id.tv_input_setting);
+ ivInputImage = findViewById(R.id.iv_input_image);
+ tvInferenceTime = findViewById(R.id.tv_inference_time);
+ tvOutputResult = findViewById(R.id.tv_output_result);
+ tvInputSetting.setMovementMethod(ScrollingMovementMethod.getInstance());
+ tvOutputResult.setMovementMethod(ScrollingMovementMethod.getInstance());
+ }
+
+ @Override
+ protected void onResume() {
+ super.onResume();
+ SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(this);
+ boolean settingsChanged = false;
+ String model_path = sharedPreferences.getString(getString(R.string.MODEL_PATH_KEY),
+ getString(R.string.MODEL_PATH_DEFAULT));
+ String label_path = sharedPreferences.getString(getString(R.string.LABEL_PATH_KEY),
+ getString(R.string.LABEL_PATH_DEFAULT));
+ String image_path = sharedPreferences.getString(getString(R.string.IMAGE_PATH_KEY),
+ getString(R.string.IMAGE_PATH_DEFAULT));
+ settingsChanged |= !model_path.equalsIgnoreCase(modelPath);
+ settingsChanged |= !label_path.equalsIgnoreCase(labelPath);
+ settingsChanged |= !image_path.equalsIgnoreCase(imagePath);
+ int cpu_thread_num = Integer.parseInt(sharedPreferences.getString(getString(R.string.CPU_THREAD_NUM_KEY),
+ getString(R.string.CPU_THREAD_NUM_DEFAULT)));
+ settingsChanged |= cpu_thread_num != cpuThreadNum;
+ String cpu_power_mode =
+ sharedPreferences.getString(getString(R.string.CPU_POWER_MODE_KEY),
+ getString(R.string.CPU_POWER_MODE_DEFAULT));
+ settingsChanged |= !cpu_power_mode.equalsIgnoreCase(cpuPowerMode);
+ String input_color_format =
+ sharedPreferences.getString(getString(R.string.INPUT_COLOR_FORMAT_KEY),
+ getString(R.string.INPUT_COLOR_FORMAT_DEFAULT));
+ settingsChanged |= !input_color_format.equalsIgnoreCase(inputColorFormat);
+ long[] input_shape =
+ Utils.parseLongsFromString(sharedPreferences.getString(getString(R.string.INPUT_SHAPE_KEY),
+ getString(R.string.INPUT_SHAPE_DEFAULT)), ",");
+ float[] input_mean =
+ Utils.parseFloatsFromString(sharedPreferences.getString(getString(R.string.INPUT_MEAN_KEY),
+ getString(R.string.INPUT_MEAN_DEFAULT)), ",");
+ float[] input_std =
+ Utils.parseFloatsFromString(sharedPreferences.getString(getString(R.string.INPUT_STD_KEY)
+ , getString(R.string.INPUT_STD_DEFAULT)), ",");
+ settingsChanged |= input_shape.length != inputShape.length;
+ settingsChanged |= input_mean.length != inputMean.length;
+ settingsChanged |= input_std.length != inputStd.length;
+ if (!settingsChanged) {
+ for (int i = 0; i < input_shape.length; i++) {
+ settingsChanged |= input_shape[i] != inputShape[i];
+ }
+ for (int i = 0; i < input_mean.length; i++) {
+ settingsChanged |= input_mean[i] != inputMean[i];
+ }
+ for (int i = 0; i < input_std.length; i++) {
+ settingsChanged |= input_std[i] != inputStd[i];
+ }
+ }
+ float score_threshold =
+ Float.parseFloat(sharedPreferences.getString(getString(R.string.SCORE_THRESHOLD_KEY),
+ getString(R.string.SCORE_THRESHOLD_DEFAULT)));
+ settingsChanged |= scoreThreshold != score_threshold;
+ if (settingsChanged) {
+ modelPath = model_path;
+ labelPath = label_path;
+ imagePath = image_path;
+ cpuThreadNum = cpu_thread_num;
+ cpuPowerMode = cpu_power_mode;
+ inputColorFormat = input_color_format;
+ inputShape = input_shape;
+ inputMean = input_mean;
+ inputStd = input_std;
+ scoreThreshold = score_threshold;
+ // Update UI
+ tvInputSetting.setText("Model: " + modelPath.substring(modelPath.lastIndexOf("/") + 1) + "\n" + "CPU" +
+ " Thread Num: " + Integer.toString(cpuThreadNum) + "\n" + "CPU Power Mode: " + cpuPowerMode);
+ tvInputSetting.scrollTo(0, 0);
+ // Reload model if configure has been changed
+ loadModel();
+ }
+ }
+
+ public void loadModel() {
+ pbLoadModel = ProgressDialog.show(this, "", "Loading model...", false, false);
+ sender.sendEmptyMessage(REQUEST_LOAD_MODEL);
+ }
+
+ public void runModel() {
+ pbRunModel = ProgressDialog.show(this, "", "Running model...", false, false);
+ sender.sendEmptyMessage(REQUEST_RUN_MODEL);
+ }
+
+ public boolean onLoadModel() {
+ return predictor.init(MainActivity.this, modelPath, labelPath, cpuThreadNum,
+ cpuPowerMode,
+ inputColorFormat,
+ inputShape, inputMean,
+ inputStd, scoreThreshold);
+ }
+
+ public boolean onRunModel() {
+ return predictor.isLoaded() && predictor.runModel();
+ }
+
+ public void onLoadModelSuccessed() {
+ // Load test image from path and run model
+ try {
+ if (imagePath.isEmpty()) {
+ return;
+ }
+ Bitmap image = null;
+ // Read test image file from custom path if the first character of mode path is '/', otherwise read test
+ // image file from assets
+ if (!imagePath.substring(0, 1).equals("/")) {
+ InputStream imageStream = getAssets().open(imagePath);
+ image = BitmapFactory.decodeStream(imageStream);
+ } else {
+ if (!new File(imagePath).exists()) {
+ return;
+ }
+ image = BitmapFactory.decodeFile(imagePath);
+ }
+ if (image != null && predictor.isLoaded()) {
+ predictor.setInputImage(image);
+ runModel();
+ }
+ } catch (IOException e) {
+ Toast.makeText(MainActivity.this, "Load image failed!", Toast.LENGTH_SHORT).show();
+ e.printStackTrace();
+ }
+ }
+
+ public void onLoadModelFailed() {
+ }
+
+ public void onRunModelSuccessed() {
+ // Obtain results and update UI
+ tvInferenceTime.setText("Inference time: " + predictor.inferenceTime() + " ms");
+ Bitmap outputImage = predictor.outputImage();
+ if (outputImage != null) {
+ ivInputImage.setImageBitmap(outputImage);
+ }
+ tvOutputResult.setText(predictor.outputResult());
+ tvOutputResult.scrollTo(0, 0);
+ }
+
+ public void onRunModelFailed() {
+ }
+
+ public void onImageChanged(Bitmap image) {
+ // Rerun model if users pick test image from gallery or camera
+ if (image != null && predictor.isLoaded()) {
+ predictor.setInputImage(image);
+ runModel();
+ }
+ }
+
+ public void onSettingsClicked() {
+ startActivity(new Intent(MainActivity.this, SettingsActivity.class));
+ }
+
+ @Override
+ public boolean onCreateOptionsMenu(Menu menu) {
+ MenuInflater inflater = getMenuInflater();
+ inflater.inflate(R.menu.menu_action_options, menu);
+ return true;
+ }
+
+ public boolean onPrepareOptionsMenu(Menu menu) {
+ boolean isLoaded = predictor.isLoaded();
+ menu.findItem(R.id.open_gallery).setEnabled(isLoaded);
+ menu.findItem(R.id.take_photo).setEnabled(isLoaded);
+ return super.onPrepareOptionsMenu(menu);
+ }
+
+ @Override
+ public boolean onOptionsItemSelected(MenuItem item) {
+ switch (item.getItemId()) {
+ case android.R.id.home:
+ finish();
+ break;
+ case R.id.open_gallery:
+ if (requestAllPermissions()) {
+ openGallery();
+ }
+ break;
+ case R.id.take_photo:
+ if (requestAllPermissions()) {
+ takePhoto();
+ }
+ break;
+ case R.id.settings:
+ if (requestAllPermissions()) {
+ // Make sure we have SDCard r&w permissions to load model from SDCard
+ onSettingsClicked();
+ }
+ break;
+ }
+ return super.onOptionsItemSelected(item);
+ }
+
+ @Override
+ public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions,
+ @NonNull int[] grantResults) {
+ super.onRequestPermissionsResult(requestCode, permissions, grantResults);
+ if (grantResults[0] != PackageManager.PERMISSION_GRANTED || grantResults[1] != PackageManager.PERMISSION_GRANTED) {
+ Toast.makeText(this, "Permission Denied", Toast.LENGTH_SHORT).show();
+ }
+ }
+
+ private boolean requestAllPermissions() {
+ if (ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE)
+ != PackageManager.PERMISSION_GRANTED || ContextCompat.checkSelfPermission(this,
+ Manifest.permission.CAMERA)
+ != PackageManager.PERMISSION_GRANTED) {
+ ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE,
+ Manifest.permission.CAMERA},
+ 0);
+ return false;
+ }
+ return true;
+ }
+
+ private void openGallery() {
+ Intent intent = new Intent(Intent.ACTION_PICK, null);
+ intent.setDataAndType(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, "image/*");
+ startActivityForResult(intent, OPEN_GALLERY_REQUEST_CODE);
+ }
+
+ private void takePhoto() {
+ Intent takePhotoIntent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
+ if (takePhotoIntent.resolveActivity(getPackageManager()) != null) {
+ startActivityForResult(takePhotoIntent, TAKE_PHOTO_REQUEST_CODE);
+ }
+ }
+
+ @Override
+ protected void onActivityResult(int requestCode, int resultCode, Intent data) {
+ super.onActivityResult(requestCode, resultCode, data);
+ if (resultCode == RESULT_OK && data != null) {
+ switch (requestCode) {
+ case OPEN_GALLERY_REQUEST_CODE:
+ try {
+ ContentResolver resolver = getContentResolver();
+ Uri uri = data.getData();
+ Bitmap image = MediaStore.Images.Media.getBitmap(resolver, uri);
+ String[] proj = {MediaStore.Images.Media.DATA};
+ Cursor cursor = managedQuery(uri, proj, null, null, null);
+ cursor.moveToFirst();
+ onImageChanged(image);
+ } catch (IOException e) {
+ Log.e(TAG, e.toString());
+ }
+ break;
+ case TAKE_PHOTO_REQUEST_CODE:
+ Bundle extras = data.getExtras();
+ Bitmap image = (Bitmap) extras.get("data");
+ onImageChanged(image);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ @Override
+ protected void onDestroy() {
+ if (predictor != null) {
+ predictor.releaseModel();
+ }
+ worker.quit();
+ super.onDestroy();
+ }
+}
diff --git a/deploy/android_demo/app/src/main/java/com/baidu/paddle/lite/demo/ocr/OCRPredictorNative.java b/deploy/android_demo/app/src/main/java/com/baidu/paddle/lite/demo/ocr/OCRPredictorNative.java
new file mode 100644
index 0000000000000000000000000000000000000000..103d5d37aec3ddc026d48a202df17b140e3e4533
--- /dev/null
+++ b/deploy/android_demo/app/src/main/java/com/baidu/paddle/lite/demo/ocr/OCRPredictorNative.java
@@ -0,0 +1,100 @@
+package com.baidu.paddle.lite.demo.ocr;
+
+import android.graphics.Bitmap;
+import android.util.Log;
+
+import java.util.ArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public class OCRPredictorNative {
+
+ private static final AtomicBoolean isSOLoaded = new AtomicBoolean();
+
+ public static void loadLibrary() throws RuntimeException {
+ if (!isSOLoaded.get() && isSOLoaded.compareAndSet(false, true)) {
+ try {
+ System.loadLibrary("Native");
+ } catch (Throwable e) {
+ RuntimeException exception = new RuntimeException(
+ "Load libNative.so failed, please check it exists in apk file.", e);
+ throw exception;
+ }
+ }
+ }
+
+ private Config config;
+
+ private long nativePointer = 0;
+
+ public OCRPredictorNative(Config config) {
+ this.config = config;
+ loadLibrary();
+ nativePointer = init(config.detModelFilename, config.recModelFilename,
+ config.cpuThreadNum, config.cpuPower);
+ Log.i("OCRPredictorNative", "load success " + nativePointer);
+
+ }
+
+ public void release(){
+ if (nativePointer != 0){
+ nativePointer = 0;
+ destory(nativePointer);
+ }
+ }
+
+ public ArrayList
+#include