Unverified Commit 5ce3af84 authored by Wei-JL's avatar Wei-JL Committed by GitHub
Browse files

Merge branch 'PaddlePaddle:dygraph' into dygraph

parents d9f64d24 e903fb80
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
# #
#Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
#You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
#Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
#limitations under the License. # limitations under the License.
import os import os
import argparse import argparse
import json import json
...@@ -31,7 +31,9 @@ def gen_det_label(root_path, input_dir, out_label): ...@@ -31,7 +31,9 @@ def gen_det_label(root_path, input_dir, out_label):
for label_file in os.listdir(input_dir): for label_file in os.listdir(input_dir):
img_path = root_path + label_file[3:-4] + ".jpg" img_path = root_path + label_file[3:-4] + ".jpg"
label = [] label = []
with open(os.path.join(input_dir, label_file), 'r') as f: with open(
os.path.join(input_dir, label_file), 'r',
encoding='utf-8-sig') as f:
for line in f.readlines(): for line in f.readlines():
tmp = line.strip("\n\r").replace("\xef\xbb\xbf", tmp = line.strip("\n\r").replace("\xef\xbb\xbf",
"").split(',') "").split(',')
......
...@@ -2,8 +2,8 @@ include LICENSE ...@@ -2,8 +2,8 @@ include LICENSE
include README.md include README.md
recursive-include ppocr/utils *.txt utility.py logging.py network.py recursive-include ppocr/utils *.txt utility.py logging.py network.py
recursive-include ppocr/data/ *.py recursive-include ppocr/data *.py
recursive-include ppocr/postprocess *.py recursive-include ppocr/postprocess *.py
recursive-include tools/infer *.py recursive-include tools/infer *.py
recursive-include test1 *.py recursive-include ppstructure *.py
# PaddleStructure
PaddleStructure is an OCR toolkit for complex layout analysis. It can divide document data in the form of pictures into **text, table, title, picture and list** 5 types of areas, and extract the table area as excel
## 1. Quick start
### install
**install layoutparser**
```sh
pip3 install https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
```
**install paddlestructure**
install by pypi
```bash
pip install paddlestructure
```
build own whl package and install
```bash
python3 setup.py bdist_wheel
pip3 install dist/paddlestructure-x.x.x-py3-none-any.whl # x.x.x is the version of paddlestructure
```
### 1.2 Use
#### 1.2.1 Use by command line
```bash
paddlestructure --image_dir=../doc/table/1.png
```
#### 1.2.2 Use by code
```python
import os
import cv2
from paddlestructure import PaddleStructure,draw_result,save_res
table_engine = PaddleStructure(show_log=True)
save_folder = './output/table'
img_path = '../doc/table/1.png'
img = cv2.imread(img_path)
result = table_engine(img)
save_res(result, save_folder,os.path.basename(img_path).split('.')[0])
for line in result:
print(line)
from PIL import Image
font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包
image = Image.open(img_path).convert('RGB')
im_show = draw_result(image, result,font_path=font_path)
im_show = Image.fromarray(im_show)
im_show.save('result.jpg')
```
#### 1.2.3 Parameter Description:
| Parameter | Description | Default value |
| --------------- | ---------------------------------------- | ------------------------------------------- |
| output | The path where excel and recognition results are saved | ./output/table |
| table_max_len | The long side of the image is resized in table structure model | 488 |
| table_model_dir | inference model path of table structure model | None |
| table_char_type | dict path of table structure model | ../ppocr/utils/dict/table_structure_dict.tx |
Most of the parameters are consistent with the paddleocr whl package, see [doc of whl](../doc/doc_en/whl_en.md)
After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel, and the excel file name will be the coordinates of the table in the image.
## 2. PaddleStructure Pipeline
the process is as follows
![pipeline](../doc/table/pipeline_en.jpg)
In PaddleStructure, the image will be analyzed by layoutparser first. In the layout analysis, the area in the image will be classified, including **text, title, image, list and table** 5 categories. For the first 4 types of areas, directly use the PP-OCR to complete the text detection and recognition. The table area will be converted to an excel file of the same table style via Table OCR.
### 2.1 LayoutParser
Layout analysis divides the document data into regions, including the use of Python scripts for layout analysis tools, extraction of special category detection boxes, performance indicators, and custom training layout analysis models. For details, please refer to [document](layout/README.md).
### 2.2 Table OCR
Table OCR converts table image into excel documents, which include the detection and recognition of table text and the prediction of table structure and cell coordinates. For detailed, please refer to [document](table/README.md)
### 3. Predictive by inference engine
Use the following commands to complete the inference.
```python
python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table
```
After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel, and the excel file name will be the coordinates of the table in the image.
# 3. Model List
|model name|description|config|model size|download|
| --- | --- | --- | --- | --- |
|en_ppocr_mobile_v2.0_table_det|Text detection in English table scene|[ch_det_mv3_db_v2.0.yml](../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| 4.7M |[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) |
|en_ppocr_mobile_v2.0_table_rec|Text recognition in English table scene|[rec_chinese_lite_train_v2.0.yml](..//configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) |
|en_ppocr_mobile_v2.0_table_structure|Table structure prediction for English table scenarios|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) |
\ No newline at end of file
# PaddleStructure # PaddleStructure
安装layoutparser PaddleStructure是一个用于复杂版面分析的OCR工具包,其能够对图片形式的文档数据划分**文字、表格、标题、图片以及列表**5类区域,并将表格区域提取为excel
```sh
wget https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
pip3 install layoutparser-0.0.0-py3-none-any.whl
```
## 1. pipeline介绍
PaddleStructure 是一个用于复杂板式文字OCR的工具包,流程如下 ## 1. 快速开始
![pipeline](../doc/table/pipeline.png)
在PaddleStructure中,图片会先经由layoutparser进行版面分析,在版面分析中,会对图片里的区域进行分类,根据根据类别进行对于的ocr流程。 ### 1.1 安装
目前layoutparser会输出五个类别: **安装 layoutparser**
1. Text ```sh
2. Title pip3 install https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl
3. Figure ```
4. List **安装 paddlestructure**
5. Table
1-4类走传统的OCR流程,5走表格的OCR流程。
## 2. LayoutParser
[文档](layout/README.md) pip安装
```bash
pip install paddlestructure
```
## 3. Table OCR 本地构建并安装
```bash
python3 setup.py bdist_wheel
pip3 install dist/paddlestructure-x.x.x-py3-none-any.whl # x.x.x是 paddlestructure 的版本号
```
[文档](table/README_ch.md) ### 1.2 PaddleStructure whl包使用
## 4. 预测引擎推理 #### 1.2.1 命令行使用
使用如下命令即可完成预测引擎的推理 ```bash
```python paddlestructure --image_dir=../doc/table/1.png
python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table
``` ```
运行完成后,每张图片会output字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。
## 5. PaddleStructure whl包介绍
### 5.1 使用 #### 1.2.2 Python脚本使用
5.1.1 代码使用
```python ```python
import os import os
import cv2 import cv2
...@@ -61,26 +51,57 @@ for line in result: ...@@ -61,26 +51,57 @@ for line in result:
from PIL import Image from PIL import Image
font_path = 'path/tp/PaddleOCR/doc/fonts/simfang.ttf' font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包
image = Image.open(img_path).convert('RGB') image = Image.open(img_path).convert('RGB')
im_show = draw_result(image, result,font_path=font_path) im_show = draw_result(image, result,font_path=font_path)
im_show = Image.fromarray(im_show) im_show = Image.fromarray(im_show)
im_show.save('result.jpg') im_show.save('result.jpg')
``` ```
5.1.2 命令行使用
```bash
paddlestructure --image_dir=../doc/table/1.png
```
### 参数说明 #### 1.2.3 参数说明
| 字段 | 说明 | 默认值 |
| --------------- | ---------------------------------------- | ------------------------------------------- |
| output | excel和识别结果保存的地址 | ./output/table |
| table_max_len | 表格结构模型预测时,图像的长边resize尺度 | 488 |
| table_model_dir | 表格结构模型 inference 模型地址 | None |
| table_char_type | 表格结构模型所用字典地址 | ../ppocr/utils/dict/table_structure_dict.tx |
大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md) 大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md)
| 字段 | 说明 | 默认值 | 运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。
|------------------------|------------------------------------------------------|------------------|
| output | excel和识别结果保存的地址 | ./output/table |
| table_max_len | 表格结构模型预测时,图像的长边resize尺度 | 488 | ## 2. PaddleStructure Pipeline
| table_model_dir | 表格结构模型 inference 模型地址 | None |
| table_char_type | 表格结构模型所用字典地址 | ../ppocr/utils/dict/table_structure_dict.tx | 流程如下
![pipeline](../doc/table/pipeline.jpg)
在PaddleStructure中,图片会先经由layoutparser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过Table OCR处理后,表格图片转换为相同表格样式的Excel文件。
### 2.1 LayoutParser
版面分析对文档数据进行区域分类,其中包括版面分析工具的Python脚本使用、提取指定类别检测框、性能指标以及自定义训练版面分析模型,详细内容可以参考[文档](layout/README.md)
### 2.2 Table OCR
Table OCR将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md)
### 3. 预测引擎推理
使用如下命令即可完成预测引擎的推理
```python
python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table
```
运行完成后,每张图片会output字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。
# 3. Model List
|模型名称|模型简介|配置文件|推理模型大小|下载地址|
| --- | --- | --- | --- | --- |
|en_ppocr_mobile_v2.0_table_det|英文表格场景的文字检测|[ch_det_mv3_db_v2.0.yml](../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| 4.7M |[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) |
|en_ppocr_mobile_v2.0_table_rec|英文表格场景的文字识别|[rec_chinese_lite_train_v2.0.yml](..//configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) |
|en_ppocr_mobile_v2.0_table_structure|英文表格场景的表格结构预测|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) |
\ No newline at end of file
...@@ -12,6 +12,6 @@ ...@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .paddlestructure import PaddleStructure, draw_result, to_excel from .paddlestructure import PaddleStructure, draw_result, save_res
__all__ = ['PaddleStructure', 'draw_result', 'to_excel'] __all__ = ['PaddleStructure', 'draw_result', 'save_res']
...@@ -24,9 +24,8 @@ import numpy as np ...@@ -24,9 +24,8 @@ import numpy as np
from pathlib import Path from pathlib import Path
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
from test1.predict_system import OCRSystem, save_res from ppstructure.predict_system import OCRSystem, save_res
from test1.table.predict_table import to_excel from ppstructure.utility import init_args, draw_result
from test1.utility import init_args, draw_result
logger = get_logger() logger = get_logger()
from ppocr.utils.utility import check_and_read_gif, get_image_file_list from ppocr.utils.utility import check_and_read_gif, get_image_file_list
...@@ -145,4 +144,4 @@ def main(): ...@@ -145,4 +144,4 @@ def main():
for item in result: for item in result:
logger.info(item['res']) logger.info(item['res'])
save_res(result, save_folder, img_name) save_res(result, save_folder, img_name)
logger.info('result save to {}'.format(os.path.join(save_folder, img_name))) logger.info('result save to {}'.format(os.path.join(save_folder, img_name)))
\ No newline at end of file
...@@ -31,8 +31,8 @@ import layoutparser as lp ...@@ -31,8 +31,8 @@ import layoutparser as lp
from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
from tools.infer.predict_system import TextSystem from tools.infer.predict_system import TextSystem
from test1.table.predict_table import TableSystem, to_excel from ppstructure.table.predict_table import TableSystem, to_excel
from test1.utility import parse_args, draw_result from ppstructure.utility import parse_args, draw_result
logger = get_logger() logger = get_logger()
......
...@@ -23,14 +23,14 @@ with open('../requirements.txt', encoding="utf-8-sig") as f: ...@@ -23,14 +23,14 @@ with open('../requirements.txt', encoding="utf-8-sig") as f:
def readme(): def readme():
with open('api_ch.md', encoding="utf-8-sig") as f: with open('README_ch.md', encoding="utf-8-sig") as f:
README = f.read() README = f.read()
return README return README
shutil.copytree('./table', './test1/table') shutil.copytree('./table', './ppstructure/table')
shutil.copyfile('./predict_system.py', './test1/predict_system.py') shutil.copyfile('./predict_system.py', './ppstructure/predict_system.py')
shutil.copyfile('./utility.py', './test1/utility.py') shutil.copyfile('./utility.py', './ppstructure/utility.py')
shutil.copytree('../ppocr', './ppocr') shutil.copytree('../ppocr', './ppocr')
shutil.copytree('../tools', './tools') shutil.copytree('../tools', './tools')
shutil.copyfile('../LICENSE', './LICENSE') shutil.copyfile('../LICENSE', './LICENSE')
...@@ -66,5 +66,5 @@ setup( ...@@ -66,5 +66,5 @@ setup(
shutil.rmtree('ppocr') shutil.rmtree('ppocr')
shutil.rmtree('tools') shutil.rmtree('tools')
shutil.rmtree('test1') shutil.rmtree('ppstructure')
os.remove('LICENSE') os.remove('LICENSE')
...@@ -8,7 +8,7 @@ The ocr of the table mainly contains three models ...@@ -8,7 +8,7 @@ The ocr of the table mainly contains three models
The table ocr flow chart is as follows The table ocr flow chart is as follows
![tableocr_pipeline](../../doc/table/tableocr_pipeline.png) ![tableocr_pipeline](../../doc/table/tableocr_pipeline_en.jpg)
1. The coordinates of single-line text is detected by DB model, and then sends it to the recognition model to get the recognition result. 1. The coordinates of single-line text is detected by DB model, and then sends it to the recognition model to get the recognition result.
2. The table structure and cell coordinates is predicted by RARE model. 2. The table structure and cell coordinates is predicted by RARE model.
...@@ -19,7 +19,34 @@ The table ocr flow chart is as follows ...@@ -19,7 +19,34 @@ The table ocr flow chart is as follows
### 2.1 Train ### 2.1 Train
TBD
In this chapter, we only introduce the training of the table structure model, For model training of [text detection](../../doc/doc_en/detection_en.md) and [text recognition](../../doc/doc_en/recognition_en.md), please refer to the corresponding documents
#### data preparation
The training data uses public data set [PubTabNet](https://arxiv.org/abs/1911.10683 ), Can be downloaded from the official [website](https://github.com/ibm-aur-nlp/PubTabNet) 。The PubTabNet data set contains about 500,000 images, as well as annotations in html format。
#### Start training
*If you are installing the cpu version of paddle, please modify the `use_gpu` field in the configuration file to false*
```shell
# single GPU training
python3 tools/train.py -c configs/table/table_mv3.yml
# multi-GPU training
# Set the GPU ID used by the '--gpus' parameter.
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/table/table_mv3.yml
```
In the above instruction, use `-c` to select the training to use the `configs/table/table_mv3.yml` configuration file.
For a detailed explanation of the configuration file, please refer to [config](../../doc/doc_en/config_en.md).
#### load trained model and continue training
If you expect to load trained model and continue the training again, you can specify the parameter `Global.checkpoints` as the model path to be loaded.
```shell
python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./your/trained/model
```
**Note**: The priority of `Global.checkpoints` is higher than that of `Global.pretrain_weights`, that is, when two parameters are specified at the same time, the model specified by `Global.checkpoints` will be loaded first. If the model path specified by `Global.checkpoints` is wrong, the one specified by `Global.pretrain_weights` will be loaded.
### 2.2 Eval ### 2.2 Eval
First cd to the PaddleOCR/ppstructure directory First cd to the PaddleOCR/ppstructure directory
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
具体流程图如下 具体流程图如下
![tableocr_pipeline](../../doc/table/tableocr_pipeline.png) ![tableocr_pipeline](../../doc/table/tableocr_pipeline.jpg)
1. 图片由单行文字检测检测模型到单行文字的坐标,然后送入识别模型拿到识别结果。 1. 图片由单行文字检测检测模型到单行文字的坐标,然后送入识别模型拿到识别结果。
2. 图片由表格结构和cell坐标预测模型拿到表格的结构信息和单元格的坐标信息。 2. 图片由表格结构和cell坐标预测模型拿到表格的结构信息和单元格的坐标信息。
...@@ -17,8 +17,9 @@ ...@@ -17,8 +17,9 @@
## 2. 使用 ## 2. 使用
### 2.1 训练 ### 2.1 训练
在这一章节中,我们仅介绍表格结构模型的训练,[文字检测](../../doc/doc_ch/detection.md)[文字识别](../../doc/doc_ch/recognition.md)的模型训练请参考对应的文档。
#### 数据准备 #### 数据准备
训练数据使用公开数据集[PubTabNet](https://arxiv.org/abs/1911.10683),可以从[官网](https://github.com/ibm-aur-nlp/PubTabNet)下载。PubTabNet数据集包含约50万张表格数据的图像,以及图像对应的html格式的注释。 训练数据使用公开数据集[PubTabNet](https://arxiv.org/abs/1911.10683),可以从[官网](https://github.com/ibm-aur-nlp/PubTabNet)下载。PubTabNet数据集包含约50万张表格数据的图像,以及图像对应的html格式的注释。
...@@ -31,7 +32,7 @@ python3 tools/train.py -c configs/table/table_mv3.yml ...@@ -31,7 +32,7 @@ python3 tools/train.py -c configs/table/table_mv3.yml
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/table/table_mv3.yml python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/table/table_mv3.yml
``` ```
上述指令中,通过-c 选择训练使用configs/table/table_mv3.yml配置文件。有关配置文件的详细解释,请参考[链接](./config.md) 上述指令中,通过-c 选择训练使用configs/table/table_mv3.yml配置文件。有关配置文件的详细解释,请参考[链接](../../doc/doc_ch/config.md)
#### 断点训练 #### 断点训练
......
...@@ -20,9 +20,9 @@ sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) ...@@ -20,9 +20,9 @@ sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
import cv2 import cv2
import json import json
from tqdm import tqdm from tqdm import tqdm
from test1.table.table_metric import TEDS from ppstructure.table.table_metric import TEDS
from test1.table.predict_table import TableSystem from ppstructure.table.predict_table import TableSystem
from test1.utility import init_args from ppstructure.utility import init_args
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
logger = get_logger() logger = get_logger()
......
...@@ -22,17 +22,14 @@ os.environ["FLAGS_allocator_strategy"] = 'auto_growth' ...@@ -22,17 +22,14 @@ os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
import cv2 import cv2
import numpy as np import numpy as np
import math
import time import time
import traceback
import paddle
import tools.infer.utility as utility import tools.infer.utility as utility
from ppocr.data import create_operators, transform from ppocr.data import create_operators, transform
from ppocr.postprocess import build_post_process from ppocr.postprocess import build_post_process
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from test1.utility import parse_args from ppstructure.utility import parse_args
logger = get_logger() logger = get_logger()
......
...@@ -30,9 +30,9 @@ import tools.infer.predict_rec as predict_rec ...@@ -30,9 +30,9 @@ import tools.infer.predict_rec as predict_rec
import tools.infer.predict_det as predict_det import tools.infer.predict_det as predict_det
from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
from test1.table.matcher import distance, compute_iou from ppstructure.table.matcher import distance, compute_iou
from test1.utility import parse_args from ppstructure.utility import parse_args
import test1.table.predict_structure as predict_strture import ppstructure.table.predict_structure as predict_strture
logger = get_logger() logger = get_logger()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment