pdf_extract_kit.py 11.6 KB
Newer Older
1
# flake8: noqa
myhloli's avatar
myhloli committed
2
import os
3
import time
4

5
import cv2
6
7
import numpy as np
import torch
8
import yaml
9
from loguru import logger
10
from PIL import Image
11
12

os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1'  # 禁止albumentations检查更新
13

myhloli's avatar
myhloli committed
14
try:
15
    import torchtext
16

17
    if torchtext.__version__ >= '0.18.0':
18
        torchtext.disable_torchtext_deprecation_warning()
19
20
except ImportError:
    pass
21

22
from magic_pdf.config.constants import *
23
from magic_pdf.model.model_list import AtomicModel
24
from magic_pdf.model.sub_modules.model_init import AtomModelSingleton
25
26
27
28
from magic_pdf.model.sub_modules.model_utils import (
    clean_vram, crop_img, get_res_list_from_layout_res)
from magic_pdf.model.sub_modules.ocr.paddleocr.ocr_utils import (
    get_adjusted_mfdetrec_res, get_ocr_result_list)
29
30


31
class CustomPEKModel:
32

33
34
35
36
37
38
39
40
41
42
43
44
45
46
    def __init__(self, ocr: bool = False, show_log: bool = False, **kwargs):
        """
        ======== model init ========
        """
        # 获取当前文件(即 pdf_extract_kit.py)的绝对路径
        current_file_path = os.path.abspath(__file__)
        # 获取当前文件所在的目录(model)
        current_dir = os.path.dirname(current_file_path)
        # 上一级目录(magic_pdf)
        root_dir = os.path.dirname(current_dir)
        # model_config目录
        model_config_dir = os.path.join(root_dir, 'resources', 'model_config')
        # 构建 model_configs.yaml 文件的完整路径
        config_path = os.path.join(model_config_dir, 'model_configs.yaml')
47
        with open(config_path, 'r', encoding='utf-8') as f:
48
49
            self.configs = yaml.load(f, Loader=yaml.FullLoader)
        # 初始化解析配置
50
51

        # layout config
52
53
54
55
        self.layout_config = kwargs.get('layout_config')
        self.layout_model_name = self.layout_config.get(
            'model', MODEL_NAME.DocLayout_YOLO
        )
56
57

        # formula config
58
59
60
61
62
63
64
65
        self.formula_config = kwargs.get('formula_config')
        self.mfd_model_name = self.formula_config.get(
            'mfd_model', MODEL_NAME.YOLO_V8_MFD
        )
        self.mfr_model_name = self.formula_config.get(
            'mfr_model', MODEL_NAME.UniMerNet_v2_Small
        )
        self.apply_formula = self.formula_config.get('enable', True)
66

67
        # table config
68
69
70
71
        self.table_config = kwargs.get('table_config')
        self.apply_table = self.table_config.get('enable', False)
        self.table_max_time = self.table_config.get('max_time', TABLE_MAX_TIME_VALUE)
        self.table_model_name = self.table_config.get('model', MODEL_NAME.RAPID_TABLE)
72
73

        # ocr config
74
        self.apply_ocr = ocr
75
        self.lang = kwargs.get('lang', None)
76

77
        logger.info(
78
79
80
81
82
83
84
85
            'DocAnalysis init, this may take some times, layout_model: {}, apply_formula: {}, apply_ocr: {}, '
            'apply_table: {}, table_model: {}, lang: {}'.format(
                self.layout_model_name,
                self.apply_formula,
                self.apply_ocr,
                self.apply_table,
                self.table_model_name,
                self.lang,
赵小蒙's avatar
update:  
赵小蒙 committed
86
            )
87
88
        )
        # 初始化解析方案
89
90
91
92
93
94
        self.device = kwargs.get('device', 'cpu')
        logger.info('using device: {}'.format(self.device))
        models_dir = kwargs.get(
            'models_dir', os.path.join(root_dir, 'resources', 'models')
        )
        logger.info('using models_dir: {}'.format(models_dir))
95

96
97
        atom_model_manager = AtomModelSingleton()

98
99
100
        # 初始化公式识别
        if self.apply_formula:
            # 初始化公式检测模型
101
102
            self.mfd_model = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.MFD,
103
104
105
106
107
108
                mfd_weights=str(
                    os.path.join(
                        models_dir, self.configs['weights'][self.mfd_model_name]
                    )
                ),
                device=self.device,
109
            )
110

111
            # 初始化公式解析模型
112
113
114
115
            mfr_weight_dir = str(
                os.path.join(models_dir, self.configs['weights'][self.mfr_model_name])
            )
            mfr_cfg_path = str(os.path.join(model_config_dir, 'UniMERNet', 'demo.yaml'))
116
            self.mfr_model = atom_model_manager.get_atom_model(
117
118
119
                atom_model_name=AtomicModel.MFR,
                mfr_weight_dir=mfr_weight_dir,
                mfr_cfg_path=mfr_cfg_path,
120
                device=self.device,
121
            )
122
123

        # 初始化layout模型
124
125
126
127
        if self.layout_model_name == MODEL_NAME.LAYOUTLMv3:
            self.layout_model = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.Layout,
                layout_model_name=MODEL_NAME.LAYOUTLMv3,
128
129
130
131
132
133
134
135
136
137
138
                layout_weights=str(
                    os.path.join(
                        models_dir, self.configs['weights'][self.layout_model_name]
                    )
                ),
                layout_config_file=str(
                    os.path.join(
                        model_config_dir, 'layoutlmv3', 'layoutlmv3_base_inference.yaml'
                    )
                ),
                device=self.device,
139
140
141
142
143
            )
        elif self.layout_model_name == MODEL_NAME.DocLayout_YOLO:
            self.layout_model = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.Layout,
                layout_model_name=MODEL_NAME.DocLayout_YOLO,
144
145
146
147
148
149
                doclayout_yolo_weights=str(
                    os.path.join(
                        models_dir, self.configs['weights'][self.layout_model_name]
                    )
                ),
                device=self.device,
150
            )
151
        # 初始化ocr
152
153
154
        self.ocr_model = atom_model_manager.get_atom_model(
            atom_model_name=AtomicModel.OCR,
            ocr_show_log=show_log,
155
156
157
            det_db_box_thresh=0.3,
            lang=self.lang
        )
158
        # init table model
159
        if self.apply_table:
160
            table_model_dir = self.configs['weights'][self.table_model_name]
161
162
163
164
165
            self.table_model = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.Table,
                table_model_name=self.table_model_name,
                table_model_path=str(os.path.join(models_dir, table_model_dir)),
                table_max_time=self.table_max_time,
166
                device=self.device,
167
            )
drunkpig's avatar
drunkpig committed
168

169
        logger.info('DocAnalysis init done!')
赵小蒙's avatar
update:  
赵小蒙 committed
170

171
    def __call__(self, image):
172

173
174
175
176
        pil_img = Image.fromarray(image)
        width, height = pil_img.size
        # logger.info(f'width: {width}, height: {height}')

177
178
        # layout检测
        layout_start = time.time()
179
        layout_res = []
180
181
182
183
184
        if self.layout_model_name == MODEL_NAME.LAYOUTLMv3:
            # layoutlmv3
            layout_res = self.layout_model(image, ignore_catids=[])
        elif self.layout_model_name == MODEL_NAME.DocLayout_YOLO:
            # doclayout_yolo
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
            if height > width:
                input_res = {"poly":[0,0,width,0,width,height,0,height]}
                new_image, useful_list = crop_img(input_res, pil_img, crop_paste_x=width//2, crop_paste_y=0)
                paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
                layout_res = self.layout_model.predict(new_image)
                for res in layout_res:
                    p1, p2, p3, p4, p5, p6, p7, p8 = res['poly']
                    p1 = p1 - paste_x + xmin
                    p2 = p2 - paste_y + ymin
                    p3 = p3 - paste_x + xmin
                    p4 = p4 - paste_y + ymin
                    p5 = p5 - paste_x + xmin
                    p6 = p6 - paste_y + ymin
                    p7 = p7 - paste_x + xmin
                    p8 = p8 - paste_y + ymin
                    res['poly'] = [p1, p2, p3, p4, p5, p6, p7, p8]
            else:
                layout_res = self.layout_model.predict(image)
203

204
        layout_cost = round(time.time() - layout_start, 2)
205
        logger.info(f'layout detection time: {layout_cost}')
206

207
208
        if self.apply_formula:
            # 公式检测
209
            mfd_start = time.time()
210
            mfd_res = self.mfd_model.predict(image)
211
            logger.info(f'mfd time: {round(time.time() - mfd_start, 2)}')
212
213
214

            # 公式识别
            mfr_start = time.time()
215
216
            formula_list = self.mfr_model.predict(mfd_res, image)
            layout_res.extend(formula_list)
217
            mfr_cost = round(time.time() - mfr_start, 2)
218
            logger.info(f'formula nums: {len(formula_list)}, mfr time: {mfr_cost}')
219
220
221
222
223

        # 清理显存
        clean_vram(self.device, vram_threshold=8)

        # 从layout_res中获取ocr区域、表格区域、公式区域
224
225
226
        ocr_res_list, table_res_list, single_page_mfdetrec_res = (
            get_res_list_from_layout_res(layout_res)
        )
227

myhloli's avatar
myhloli committed
228
        # ocr识别
229
230
231
232
233
234
235
236
        ocr_start = time.time()
        # Process each area that requires OCR processing
        for res in ocr_res_list:
            new_image, useful_list = crop_img(res, pil_img, crop_paste_x=50, crop_paste_y=50)
            adjusted_mfdetrec_res = get_adjusted_mfdetrec_res(single_page_mfdetrec_res, useful_list)

            # OCR recognition
            new_image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)
237

238
239
240
241
            if self.apply_ocr:
                ocr_res = self.ocr_model.ocr(new_image, mfd_res=adjusted_mfdetrec_res)[0]
            else:
                ocr_res = self.ocr_model.ocr(new_image, mfd_res=adjusted_mfdetrec_res, rec=False)[0]
242

243
244
245
246
            # Integration results
            if ocr_res:
                ocr_result_list = get_ocr_result_list(ocr_res, useful_list)
                layout_res.extend(ocr_result_list)
247

248
249
        ocr_cost = round(time.time() - ocr_start, 2)
        if self.apply_ocr:
250
            logger.info(f"ocr time: {ocr_cost}")
251
252
        else:
            logger.info(f"det time: {ocr_cost}")
253

254
255
        # 表格识别 table recognition
        if self.apply_table:
256
257
258
259
            table_start = time.time()
            for res in table_res_list:
                new_image, _ = crop_img(res, pil_img)
                single_table_start_time = time.time()
260
                html_code = None
261
                if self.table_model_name == MODEL_NAME.STRUCT_EQTABLE:
262
                    with torch.no_grad():
263
                        table_result = self.table_model.predict(new_image, 'html')
264
265
                        if len(table_result) > 0:
                            html_code = table_result[0]
266
                elif self.table_model_name == MODEL_NAME.TABLE_MASTER:
267
                    html_code = self.table_model.img2html(new_image)
268
                elif self.table_model_name == MODEL_NAME.RAPID_TABLE:
269
270
271
                    html_code, table_cell_bboxes, elapse = self.table_model.predict(
                        new_image
                    )
272
273
                run_time = time.time() - single_table_start_time
                if run_time > self.table_max_time:
274
275
276
                    logger.warning(
                        f'table recognition processing exceeds max time {self.table_max_time}s'
                    )
277
                # 判断是否返回正常
278
                if html_code:
279
280
281
                    expected_ending = html_code.strip().endswith(
                        '</html>'
                    ) or html_code.strip().endswith('</table>')
282
                    if expected_ending:
283
                        res['html'] = html_code
284
                    else:
285
286
287
                        logger.warning(
                            'table recognition processing fails, not found expected HTML table end'
                        )
288
                else:
289
290
291
292
                    logger.warning(
                        'table recognition processing fails, not get html return'
                    )
            logger.info(f'table time: {round(time.time() - table_start, 2)}')
293

294
        return layout_res