batch_analyze.py 10.9 KB
Newer Older
1
2
3
import time
import cv2
from loguru import logger
4
from tqdm import tqdm
5
6

from magic_pdf.config.constants import MODEL_NAME
7
from magic_pdf.model.sub_modules.model_init import AtomModelSingleton
8
from magic_pdf.model.sub_modules.model_utils import (
icecraft's avatar
icecraft committed
9
    clean_vram, crop_img, get_res_list_from_layout_res)
10
from magic_pdf.model.sub_modules.ocr.paddleocr2pytorch.ocr_utils import (
icecraft's avatar
icecraft committed
11
    get_adjusted_mfdetrec_res, get_ocr_result_list)
12

13
YOLO_LAYOUT_BASE_BATCH_SIZE = 1
14
15
16
17
18
MFD_BASE_BATCH_SIZE = 1
MFR_BASE_BATCH_SIZE = 16


class BatchAnalyze:
19
20
    def __init__(self, model_manager, batch_ratio: int, show_log, layout_model, formula_enable, table_enable):
        self.model_manager = model_manager
21
        self.batch_ratio = batch_ratio
22
23
24
25
26
27
28
29
30
        self.show_log = show_log
        self.layout_model = layout_model
        self.formula_enable = formula_enable
        self.table_enable = table_enable

    def __call__(self, images_with_extra_info: list) -> list:
        if len(images_with_extra_info) == 0:
            return []
    
31
        images_layout_res = []
32
        layout_start_time = time.time()
33
34
35
36
37
        _, fst_ocr, fst_lang = images_with_extra_info[0]
        self.model = self.model_manager.get_model(fst_ocr, self.show_log, fst_lang, self.layout_model, self.formula_enable, self.table_enable)

        images = [image for image, _, _ in images_with_extra_info]

38
39
40
41
42
43
44
        if self.model.layout_model_name == MODEL_NAME.LAYOUTLMv3:
            # layoutlmv3
            for image in images:
                layout_res = self.model.layout_model(image, ignore_catids=[])
                images_layout_res.append(layout_res)
        elif self.model.layout_model_name == MODEL_NAME.DocLayout_YOLO:
            # doclayout_yolo
45
46
            layout_images = []
            for image_index, image in enumerate(images):
47
                layout_images.append(image)
48

49
            images_layout_res += self.model.layout_model.batch_predict(
50
51
                # layout_images, self.batch_ratio * YOLO_LAYOUT_BASE_BATCH_SIZE
                layout_images, YOLO_LAYOUT_BASE_BATCH_SIZE
52
53
            )

54
55
56
        # logger.info(
        #     f'layout time: {round(time.time() - layout_start_time, 2)}, image num: {len(images)}'
        # )
57

58
59
        if self.model.apply_formula:
            # 公式检测
60
            mfd_start_time = time.time()
61
            images_mfd_res = self.model.mfd_model.batch_predict(
62
63
                # images, self.batch_ratio * MFD_BASE_BATCH_SIZE
                images, MFD_BASE_BATCH_SIZE
64
            )
65
66
67
            # logger.info(
            #     f'mfd time: {round(time.time() - mfd_start_time, 2)}, image num: {len(images)}'
            # )
68
69

            # 公式识别
70
            mfr_start_time = time.time()
71
72
73
74
75
            images_formula_list = self.model.mfr_model.batch_predict(
                images_mfd_res,
                images,
                batch_size=self.batch_ratio * MFR_BASE_BATCH_SIZE,
            )
76
            mfr_count = 0
77
78
            for image_index in range(len(images)):
                images_layout_res[image_index] += images_formula_list[image_index]
79
                mfr_count += len(images_formula_list[image_index])
80
81
82
            # logger.info(
            #     f'mfr time: {round(time.time() - mfr_start_time, 2)}, image num: {mfr_count}'
            # )
83
84

        # 清理显存
85
        # clean_vram(self.model.device, vram_threshold=8)
86

87
88
        ocr_res_list_all_page = []
        table_res_list_all_page = []
89
        for index in range(len(images)):
90
            _, ocr_enable, _lang = images_with_extra_info[index]
91
            layout_res = images_layout_res[index]
92
            np_array_img = images[index]
93
94
95
96

            ocr_res_list, table_res_list, single_page_mfdetrec_res = (
                get_res_list_from_layout_res(layout_res)
            )
97
98
99
100
101
102
103
104

            ocr_res_list_all_page.append({'ocr_res_list':ocr_res_list,
                                          'lang':_lang,
                                          'ocr_enable':ocr_enable,
                                          'np_array_img':np_array_img,
                                          'single_page_mfdetrec_res':single_page_mfdetrec_res,
                                          'layout_res':layout_res,
                                          })
105
106
107
108
109
110
111

            for table_res in table_res_list:
                table_img, _ = crop_img(table_res, np_array_img)
                table_res_list_all_page.append({'table_res':table_res,
                                                'lang':_lang,
                                                'table_img':table_img,
                                              })
112
113
114
115
116
117

        # 文本框检测
        det_start = time.time()
        det_count = 0
        # for ocr_res_list_dict in ocr_res_list_all_page:
        for ocr_res_list_dict in tqdm(ocr_res_list_all_page, desc="OCR-det Predict"):
118
            # Process each area that requires OCR processing
119
120
121
122
123
124
125
126
127
128
            _lang = ocr_res_list_dict['lang']
            # Get OCR results for this language's images
            atom_model_manager = AtomModelSingleton()
            ocr_model = atom_model_manager.get_atom_model(
                atom_model_name='ocr',
                ocr_show_log=False,
                det_db_box_thresh=0.3,
                lang=_lang
            )
            for res in ocr_res_list_dict['ocr_res_list']:
129
                new_image, useful_list = crop_img(
130
                    res, ocr_res_list_dict['np_array_img'], crop_paste_x=50, crop_paste_y=50
131
132
                )
                adjusted_mfdetrec_res = get_adjusted_mfdetrec_res(
133
                    ocr_res_list_dict['single_page_mfdetrec_res'], useful_list
134
135
                )

136
                # OCR-det
137
                new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
138
                ocr_res = ocr_model.ocr(
139
140
                    new_image, mfd_res=adjusted_mfdetrec_res, rec=False
                )[0]
141
142
143

                # Integration results
                if ocr_res:
144
145
146
147
148
149
150
151
152
153
154
                    ocr_result_list = get_ocr_result_list(ocr_res, useful_list, ocr_res_list_dict['ocr_enable'], new_image, _lang)
                    ocr_res_list_dict['layout_res'].extend(ocr_result_list)
            det_count += len(ocr_res_list_dict['ocr_res_list'])
        # logger.info(f'ocr-det time: {round(time.time()-det_start, 2)}, image num: {det_count}')


        # 表格识别 table recognition
        if self.model.apply_table:
            table_start = time.time()
            table_count = 0
            # for table_res_list_dict in table_res_list_all_page:
155
156
            for table_res_dict in tqdm(table_res_list_all_page, desc="Table Predict"):
                _lang = table_res_dict['lang']
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
                atom_model_manager = AtomModelSingleton()
                ocr_engine = atom_model_manager.get_atom_model(
                    atom_model_name='ocr',
                    ocr_show_log=False,
                    det_db_box_thresh=0.5,
                    det_db_unclip_ratio=1.6,
                    lang=_lang
                )
                table_model = atom_model_manager.get_atom_model(
                    atom_model_name='table',
                    table_model_name='rapid_table',
                    table_model_path='',
                    table_max_time=400,
                    device='cpu',
                    ocr_engine=ocr_engine,
                    table_sub_model_name='slanet_plus'
                )
174
175
176
177
178
179
180
181
                html_code, table_cell_bboxes, logic_points, elapse = table_model.predict(table_res_dict['table_img'])
                # 判断是否返回正常
                if html_code:
                    expected_ending = html_code.strip().endswith(
                        '</html>'
                    ) or html_code.strip().endswith('</table>')
                    if expected_ending:
                        table_res_dict['table_res']['html'] = html_code
182
183
                    else:
                        logger.warning(
184
                            'table recognition processing fails, not found expected HTML table end'
185
                        )
186
187
188
189
190
                else:
                    logger.warning(
                        'table recognition processing fails, not get html return'
                    )
            # logger.info(f'table time: {round(time.time() - table_start, 2)}, image num: {len(table_res_list_all_page)}')
191

192
193
194
195
        # Create dictionaries to store items by language
        need_ocr_lists_by_lang = {}  # Dict of lists for each language
        img_crop_lists_by_lang = {}  # Dict of lists for each language

196
197
198
        for layout_res in images_layout_res:
            for layout_res_item in layout_res:
                if layout_res_item['category_id'] in [15]:
199
200
201
202
203
204
205
206
207
208
209
210
211
                    if 'np_img' in layout_res_item and 'lang' in layout_res_item:
                        lang = layout_res_item['lang']

                        # Initialize lists for this language if not exist
                        if lang not in need_ocr_lists_by_lang:
                            need_ocr_lists_by_lang[lang] = []
                            img_crop_lists_by_lang[lang] = []

                        # Add to the appropriate language-specific lists
                        need_ocr_lists_by_lang[lang].append(layout_res_item)
                        img_crop_lists_by_lang[lang].append(layout_res_item['np_img'])

                        # Remove the fields after adding to lists
212
                        layout_res_item.pop('np_img')
213
214
215
216
217
218
219
220
221
222
223
224
225
226
                        layout_res_item.pop('lang')


        if len(img_crop_lists_by_lang) > 0:

            # Process OCR by language
            rec_time = 0
            rec_start = time.time()
            total_processed = 0

            # Process each language separately
            for lang, img_crop_list in img_crop_lists_by_lang.items():
                if len(img_crop_list) > 0:
                    # Get OCR results for this language's images
227
228
229
230
231
232
233
                    atom_model_manager = AtomModelSingleton()
                    ocr_model = atom_model_manager.get_atom_model(
                        atom_model_name='ocr',
                        ocr_show_log=False,
                        det_db_box_thresh=0.3,
                        lang=lang
                    )
234
                    ocr_res_list = ocr_model.ocr(img_crop_list, det=False, tqdm_enable=True)[0]
235
236
237

                    # Verify we have matching counts
                    assert len(ocr_res_list) == len(
238
                        need_ocr_lists_by_lang[lang]), f'ocr_res_list: {len(ocr_res_list)}, need_ocr_list: {len(need_ocr_lists_by_lang[lang])} for lang: {lang}'
239
240

                    # Process OCR results for this language
241
                    for index, layout_res_item in enumerate(need_ocr_lists_by_lang[lang]):
242
243
                        ocr_text, ocr_score = ocr_res_list[index]
                        layout_res_item['text'] = ocr_text
244
                        layout_res_item['score'] = float(f"{ocr_score:.3f}")
245
246

                    total_processed += len(img_crop_list)
247

248
            rec_time += time.time() - rec_start
249
            # logger.info(f'ocr-rec time: {round(rec_time, 2)}, total images processed: {total_processed}')
250
251
252



253
        return images_layout_res