"official/utils/logs/hooks_test.py" did not exist on "adfd5a3aca41638aa9fb297c5095f33d64446d8f"
pdf_extract_kit.py 18.2 KB
Newer Older
1
from loguru import logger
myhloli's avatar
myhloli committed
2
import os
3
import time
4

5
from magic_pdf.libs.Constants import *
6
from magic_pdf.model.model_list import AtomicModel
7
8

os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1'  # 禁止albumentations检查更新
myhloli's avatar
myhloli committed
9
10
11
12
13
14
try:
    import cv2
    import yaml
    import argparse
    import numpy as np
    import torch
15
    import torchtext
16

17
18
    if torchtext.__version__ >= "0.18.0":
        torchtext.disable_torchtext_deprecation_warning()
myhloli's avatar
myhloli committed
19
20
21
22
23
24
25
    from PIL import Image
    from torchvision import transforms
    from torch.utils.data import Dataset, DataLoader
    from ultralytics import YOLO
    from unimernet.common.config import Config
    import unimernet.tasks as tasks
    from unimernet.processors import load_processor
赵小蒙's avatar
update:  
赵小蒙 committed
26

27
28
except ImportError as e:
    logger.exception(e)
29
30
    logger.error(
        'Required dependency not installed, please install by \n'
31
        '"pip install magic-pdf[full] --extra-index-url https://myhloli.github.io/wheels/"')
myhloli's avatar
myhloli committed
32
    exit(1)
赵小蒙's avatar
update:  
赵小蒙 committed
33

34
35
36
from magic_pdf.model.pek_sub_modules.layoutlmv3.model_init import Layoutlmv3_Predictor
from magic_pdf.model.pek_sub_modules.post_process import get_croped_image, latex_rm_whitespace
from magic_pdf.model.pek_sub_modules.self_modify import ModifiedPaddleOCR
37
from magic_pdf.model.pek_sub_modules.structeqtable.StructTableModel import StructTableModel
38
39
40
41
42
43
44
45
46
47
48
49
from magic_pdf.model.ppTableModel import ppTableModel


def table_model_init(table_model_type, model_path, max_time, _device_='cpu'):
    if table_model_type == STRUCT_EQTABLE:
        table_model = StructTableModel(model_path, max_time=max_time, device=_device_)
    else:
        config = {
            "model_dir": model_path,
            "device": _device_
        }
        table_model = ppTableModel(config)
50
    return table_model
51

赵小蒙's avatar
update:  
赵小蒙 committed
52

53
54
55
def mfd_model_init(weight):
    mfd_model = YOLO(weight)
    return mfd_model
赵小蒙's avatar
update:  
赵小蒙 committed
56
57


58
def mfr_model_init(weight_dir, cfg_path, _device_='cpu'):
59
60
    args = argparse.Namespace(cfg_path=cfg_path, options=None)
    cfg = Config(args)
61
    cfg.config.model.pretrained = os.path.join(weight_dir, "pytorch_model.pth")
62
63
64
65
    cfg.config.model.model_config.model_name = weight_dir
    cfg.config.model.tokenizer_config.path = weight_dir
    task = tasks.setup_task(cfg)
    model = task.build_model(cfg)
66
    model = model.to(_device_)
67
    vis_processor = load_processor('formula_image_eval', cfg.config.datasets.formula_rec_eval.vis_processor.eval)
68
69
    mfr_transform = transforms.Compose([vis_processor, ])
    return [model, mfr_transform]
赵小蒙's avatar
update:  
赵小蒙 committed
70
71


72
73
74
75
76
def layout_model_init(weight, config_file, device):
    model = Layoutlmv3_Predictor(weight, config_file, device)
    return model


quyuan's avatar
quyuan committed
77
<<<<<<< HEAD
78
79
80
81
82
def ocr_model_init(show_log: bool = False, det_db_box_thresh=0.3, lang=None):
    if lang is not None:
        model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=det_db_box_thresh, lang=lang)
    else:
        model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=det_db_box_thresh)
quyuan's avatar
quyuan committed
83
=======
drunkpig's avatar
drunkpig committed
84
85
def ocr_model_init(show_log: bool = False, det_db_box_thresh=0.3):
    model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=det_db_box_thresh)
quyuan's avatar
quyuan committed
86
>>>>>>> 0140d7d271ac3b1561ca2272030e9e038b469999
87
88
89
    return model


90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
class MathDataset(Dataset):
    def __init__(self, image_paths, transform=None):
        self.image_paths = image_paths
        self.transform = transform

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        # if not pil image, then convert to pil image
        if isinstance(self.image_paths[idx], str):
            raw_image = Image.open(self.image_paths[idx])
        else:
            raw_image = self.image_paths[idx]
        if self.transform:
            image = self.transform(raw_image)
106
            return image
107
108


109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
class AtomModelSingleton:
    _instance = None
    _models = {}

    def __new__(cls, *args, **kwargs):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance

    def get_atom_model(self, atom_model_name: str, **kwargs):
        if atom_model_name not in self._models:
            self._models[atom_model_name] = atom_model_init(model_name=atom_model_name, **kwargs)
        return self._models[atom_model_name]


def atom_model_init(model_name: str, **kwargs):

    if model_name == AtomicModel.Layout:
        atom_model = layout_model_init(
            kwargs.get("layout_weights"),
            kwargs.get("layout_config_file"),
            kwargs.get("device")
        )
    elif model_name == AtomicModel.MFD:
        atom_model = mfd_model_init(
            kwargs.get("mfd_weights")
        )
    elif model_name == AtomicModel.MFR:
        atom_model = mfr_model_init(
            kwargs.get("mfr_weight_dir"),
            kwargs.get("mfr_cfg_path"),
            kwargs.get("device")
        )
    elif model_name == AtomicModel.OCR:
        atom_model = ocr_model_init(
            kwargs.get("ocr_show_log"),
quyuan's avatar
quyuan committed
145
<<<<<<< HEAD
146
147
            kwargs.get("det_db_box_thresh"),
            kwargs.get("lang")
quyuan's avatar
quyuan committed
148
=======
drunkpig's avatar
drunkpig committed
149
            kwargs.get("det_db_box_thresh")
quyuan's avatar
quyuan committed
150
>>>>>>> 0140d7d271ac3b1561ca2272030e9e038b469999
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
        )
    elif model_name == AtomicModel.Table:
        atom_model = table_model_init(
            kwargs.get("table_model_type"),
            kwargs.get("table_model_path"),
            kwargs.get("table_max_time"),
            kwargs.get("device")
        )
    else:
        logger.error("model name not allow")
        exit(1)

    return atom_model


166
class CustomPEKModel:
167

168
169
170
171
172
173
174
175
176
177
178
179
180
181
    def __init__(self, ocr: bool = False, show_log: bool = False, **kwargs):
        """
        ======== model init ========
        """
        # 获取当前文件(即 pdf_extract_kit.py)的绝对路径
        current_file_path = os.path.abspath(__file__)
        # 获取当前文件所在的目录(model)
        current_dir = os.path.dirname(current_file_path)
        # 上一级目录(magic_pdf)
        root_dir = os.path.dirname(current_dir)
        # model_config目录
        model_config_dir = os.path.join(root_dir, 'resources', 'model_config')
        # 构建 model_configs.yaml 文件的完整路径
        config_path = os.path.join(model_config_dir, 'model_configs.yaml')
182
        with open(config_path, "r", encoding='utf-8') as f:
183
184
185
186
            self.configs = yaml.load(f, Loader=yaml.FullLoader)
        # 初始化解析配置
        self.apply_layout = kwargs.get("apply_layout", self.configs["config"]["layout"])
        self.apply_formula = kwargs.get("apply_formula", self.configs["config"]["formula"])
187
        # table config
188
        self.table_config = kwargs.get("table_config", self.configs["config"]["table_config"])
189
        self.apply_table = self.table_config.get("is_table_recog_enable", False)
190
        self.table_max_time = self.table_config.get("max_time", TABLE_MAX_TIME_VALUE)
191
        self.table_model_type = self.table_config.get("model", TABLE_MASTER)
192
        self.apply_ocr = ocr
193
        self.lang = kwargs.get("lang", None)
194
        logger.info(
195
196
            "DocAnalysis init, this may take some times. apply_layout: {}, apply_formula: {}, apply_ocr: {}, apply_table: {}, lang: {}".format(
                self.apply_layout, self.apply_formula, self.apply_ocr, self.apply_table, self.lang
赵小蒙's avatar
update:  
赵小蒙 committed
197
            )
198
199
200
        )
        assert self.apply_layout, "DocAnalysis must contain layout model."
        # 初始化解析方案
201
        self.device = kwargs.get("device", self.configs["config"]["device"])
202
        logger.info("using device: {}".format(self.device))
203
        models_dir = kwargs.get("models_dir", os.path.join(root_dir, "resources", "models"))
204
        logger.info("using models_dir: {}".format(models_dir))
205

206
207
        atom_model_manager = AtomModelSingleton()

208
209
210
        # 初始化公式识别
        if self.apply_formula:
            # 初始化公式检测模型
211
212
213
214
215
            # self.mfd_model = mfd_model_init(str(os.path.join(models_dir, self.configs["weights"]["mfd"])))
            self.mfd_model = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.MFD,
                mfd_weights=str(os.path.join(models_dir, self.configs["weights"]["mfd"]))
            )
216
            # 初始化公式解析模型
217
218
            mfr_weight_dir = str(os.path.join(models_dir, self.configs["weights"]["mfr"]))
            mfr_cfg_path = str(os.path.join(model_config_dir, "UniMERNet", "demo.yaml"))
219
220
221
222
223
224
225
226
            # self.mfr_model, mfr_vis_processors = mfr_model_init(mfr_weight_dir, mfr_cfg_path, _device_=self.device)
            # self.mfr_transform = transforms.Compose([mfr_vis_processors, ])
            self.mfr_model, self.mfr_transform = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.MFR,
                mfr_weight_dir=mfr_weight_dir,
                mfr_cfg_path=mfr_cfg_path,
                device=self.device
            )
227
228

        # 初始化layout模型
229
230
231
232
233
234
235
236
237
        # self.layout_model = Layoutlmv3_Predictor(
        #     str(os.path.join(models_dir, self.configs['weights']['layout'])),
        #     str(os.path.join(model_config_dir, "layoutlmv3", "layoutlmv3_base_inference.yaml")),
        #     device=self.device
        # )
        self.layout_model = atom_model_manager.get_atom_model(
            atom_model_name=AtomicModel.Layout,
            layout_weights=str(os.path.join(models_dir, self.configs['weights']['layout'])),
            layout_config_file=str(os.path.join(model_config_dir, "layoutlmv3", "layoutlmv3_base_inference.yaml")),
238
239
            device=self.device
        )
240
241
        # 初始化ocr
        if self.apply_ocr:
drunkpig's avatar
drunkpig committed
242

243
244
245
246
            # self.ocr_model = ModifiedPaddleOCR(show_log=show_log, det_db_box_thresh=0.3)
            self.ocr_model = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.OCR,
                ocr_show_log=show_log,
quyuan's avatar
quyuan committed
247
<<<<<<< HEAD
248
249
                det_db_box_thresh=0.3,
                lang=self.lang
quyuan's avatar
quyuan committed
250
=======
drunkpig's avatar
drunkpig committed
251
                det_db_box_thresh=0.3
quyuan's avatar
quyuan committed
252
>>>>>>> 0140d7d271ac3b1561ca2272030e9e038b469999
253
            )
254
        # init table model
255
        if self.apply_table:
256
            table_model_dir = self.configs["weights"][self.table_model_type]
257
258
259
260
261
262
263
264
265
            # self.table_model = table_model_init(self.table_model_type, str(os.path.join(models_dir, table_model_dir)),
            #                                     max_time=self.table_max_time, _device_=self.device)
            self.table_model = atom_model_manager.get_atom_model(
                atom_model_name=AtomicModel.Table,
                table_model_type=self.table_model_type,
                table_model_path=str(os.path.join(models_dir, table_model_dir)),
                table_max_time=self.table_max_time,
                device=self.device
            )
drunkpig's avatar
drunkpig committed
266

267
        logger.info('DocAnalysis init done!')
赵小蒙's avatar
update:  
赵小蒙 committed
268

269
270
    def __call__(self, image):

271
272
273
        latex_filling_list = []
        mf_image_list = []

274
275
276
277
278
279
        # layout检测
        layout_start = time.time()
        layout_res = self.layout_model(image, ignore_catids=[])
        layout_cost = round(time.time() - layout_start, 2)
        logger.info(f"layout detection cost: {layout_cost}")

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
        if self.apply_formula:
            # 公式检测
            mfd_res = self.mfd_model.predict(image, imgsz=1888, conf=0.25, iou=0.45, verbose=True)[0]
            for xyxy, conf, cla in zip(mfd_res.boxes.xyxy.cpu(), mfd_res.boxes.conf.cpu(), mfd_res.boxes.cls.cpu()):
                xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy]
                new_item = {
                    'category_id': 13 + int(cla.item()),
                    'poly': [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax],
                    'score': round(float(conf.item()), 2),
                    'latex': '',
                }
                layout_res.append(new_item)
                latex_filling_list.append(new_item)
                bbox_img = get_croped_image(Image.fromarray(image), [xmin, ymin, xmax, ymax])
                mf_image_list.append(bbox_img)

            # 公式识别
            mfr_start = time.time()
            dataset = MathDataset(mf_image_list, transform=self.mfr_transform)
            dataloader = DataLoader(dataset, batch_size=64, num_workers=0)
            mfr_res = []
            for mf_img in dataloader:
                mf_img = mf_img.to(self.device)
                output = self.mfr_model.generate({'image': mf_img})
                mfr_res.extend(output['pred_str'])
            for res, latex in zip(latex_filling_list, mfr_res):
                res['latex'] = latex_rm_whitespace(latex)
            mfr_cost = round(time.time() - mfr_start, 2)
            logger.info(f"formula nums: {len(mf_image_list)}, mfr time: {mfr_cost}")
309

310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
        # Select regions for OCR / formula regions / table regions
        ocr_res_list = []
        table_res_list = []
        single_page_mfdetrec_res = []
        for res in layout_res:
            if int(res['category_id']) in [13, 14]:
                single_page_mfdetrec_res.append({
                    "bbox": [int(res['poly'][0]), int(res['poly'][1]),
                             int(res['poly'][4]), int(res['poly'][5])],
                })
            elif int(res['category_id']) in [0, 1, 2, 4, 6, 7]:
                ocr_res_list.append(res)
            elif int(res['category_id']) in [5]:
                table_res_list.append(res)

        #  Unified crop img logic
        def crop_img(input_res, input_pil_img, crop_paste_x=0, crop_paste_y=0):
            crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1])
            crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5])
            # Create a white background with an additional width and height of 50
            crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2
            crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2
            return_image = Image.new('RGB', (crop_new_width, crop_new_height), 'white')

            # Crop image
            crop_box = (crop_xmin, crop_ymin, crop_xmax, crop_ymax)
            cropped_img = input_pil_img.crop(crop_box)
            return_image.paste(cropped_img, (crop_paste_x, crop_paste_y))
            return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width, crop_new_height]
            return return_image, return_list

        pil_img = Image.fromarray(image)

myhloli's avatar
myhloli committed
343
        # ocr识别
344
        if self.apply_ocr:
345
            ocr_start = time.time()
346
            # Process each area that requires OCR processing
347
            for res in ocr_res_list:
348
349
350
                new_image, useful_list = crop_img(res, pil_img, crop_paste_x=50, crop_paste_y=50)
                paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
                # Adjust the coordinates of the formula area
351
352
353
                adjusted_mfdetrec_res = []
                for mf_res in single_page_mfdetrec_res:
                    mf_xmin, mf_ymin, mf_xmax, mf_ymax = mf_res["bbox"]
354
                    # Adjust the coordinates of the formula area to the coordinates relative to the cropping area
355
356
357
358
                    x0 = mf_xmin - xmin + paste_x
                    y0 = mf_ymin - ymin + paste_y
                    x1 = mf_xmax - xmin + paste_x
                    y1 = mf_ymax - ymin + paste_y
359
                    # Filter formula blocks outside the graph
360
                    if any([x1 < 0, y1 < 0]) or any([x0 > new_width, y0 > new_height]):
361
362
363
364
365
366
                        continue
                    else:
                        adjusted_mfdetrec_res.append({
                            "bbox": [x0, y0, x1, y1],
                        })

367
                # OCR recognition
368
369
                new_image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)
                ocr_res = self.ocr_model.ocr(new_image, mfd_res=adjusted_mfdetrec_res)[0]
370

371
                # Integration results
372
373
374
375
376
                if ocr_res:
                    for box_ocr_res in ocr_res:
                        p1, p2, p3, p4 = box_ocr_res[0]
                        text, score = box_ocr_res[1]

377
                        # Convert the coordinates back to the original coordinate system
378
379
380
381
382
383
384
385
386
387
388
389
                        p1 = [p1[0] - paste_x + xmin, p1[1] - paste_y + ymin]
                        p2 = [p2[0] - paste_x + xmin, p2[1] - paste_y + ymin]
                        p3 = [p3[0] - paste_x + xmin, p3[1] - paste_y + ymin]
                        p4 = [p4[0] - paste_x + xmin, p4[1] - paste_y + ymin]

                        layout_res.append({
                            'category_id': 15,
                            'poly': p1 + p2 + p3 + p4,
                            'score': round(score, 2),
                            'text': text,
                        })

390
391
392
            ocr_cost = round(time.time() - ocr_start, 2)
            logger.info(f"ocr cost: {ocr_cost}")

393
394
        # 表格识别 table recognition
        if self.apply_table:
395
396
397
398
399
            table_start = time.time()
            for res in table_res_list:
                new_image, _ = crop_img(res, pil_img)
                single_table_start_time = time.time()
                logger.info("------------------table recognition processing begins-----------------")
400
401
                latex_code = None
                html_code = None
402
403
                if self.table_model_type == STRUCT_EQTABLE:
                    with torch.no_grad():
404
                        latex_code = self.table_model.image2latex(new_image)[0]
405
406
                else:
                    html_code = self.table_model.img2html(new_image)
drunkpig's avatar
drunkpig committed
407

408
409
410
411
412
                run_time = time.time() - single_table_start_time
                logger.info(f"------------table recognition processing ends within {run_time}s-----")
                if run_time > self.table_max_time:
                    logger.warning(f"------------table recognition processing exceeds max time {self.table_max_time}s----------")
                # 判断是否返回正常
413
414
415
416
417
418
419
420
421
422

                if latex_code:
                    expected_ending = latex_code.strip().endswith('end{tabular}') or latex_code.strip().endswith(
                        'end{table}')
                    if expected_ending:
                        res["latex"] = latex_code
                    else:
                        logger.warning(f"------------table recognition processing fails----------")
                elif html_code:
                    res["html"] = html_code
423
424
425
426
427
                else:
                    logger.warning(f"------------table recognition processing fails----------")
            table_cost = round(time.time() - table_start, 2)
            logger.info(f"table cost: {table_cost}")

428
        return layout_res