"examples/community/img2img_inpainting.py" did not exist on "ca749513230f5c425049d47c3dad9c099fb2a769"
pipeline.py 27.5 KB
Newer Older
赵小蒙's avatar
赵小蒙 committed
1
2
3
4
5
# coding=utf8
import sys
import time
from urllib.parse import quote

赵小蒙's avatar
赵小蒙 committed
6
7
8
from magic_pdf.dict2md.ocr_mkcontent import ocr_mk_mm_markdown, ocr_mk_nlp_markdown_with_para, \
    ocr_mk_mm_markdown_with_para_and_pagination, ocr_mk_mm_markdown_with_para, ocr_mk_mm_standard_format, \
    make_standard_format_with_para
许瑞's avatar
许瑞 committed
9
10
11
12
13
14
15
from magic_pdf.libs.commons import (
    read_file,
    join_path,
    parse_bucket_key,
    formatted_time,
    s3_image_save_path,
)
赵小蒙's avatar
赵小蒙 committed
16
17
from magic_pdf.libs.drop_reason import DropReason
from magic_pdf.libs.json_compressor import JsonCompressor
18
from magic_pdf.dict2md.mkcontent import mk_nlp_markdown, mk_universal_format
赵小蒙's avatar
赵小蒙 committed
19
20
21
22
from magic_pdf.pdf_parse_by_model import parse_pdf_by_model
from magic_pdf.filter.pdf_classify_by_type import classify
from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
from loguru import logger
赵小蒙's avatar
赵小蒙 committed
23
24

from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
许瑞's avatar
许瑞 committed
25
from magic_pdf.pdf_parse_for_train import parse_pdf_for_train
26
from magic_pdf.spark.base import exception_handler, get_data_source
许瑞's avatar
许瑞 committed
27
from magic_pdf.train_utils.convert_to_train_format import convert_to_train_format
赵小蒙's avatar
赵小蒙 committed
28
29
30
31
from app.common.s3 import get_s3_config, get_s3_client



32
def get_data_type(jso: dict):
许瑞's avatar
许瑞 committed
33
    data_type = jso.get("data_type")
34
    if data_type is None:
许瑞's avatar
许瑞 committed
35
        data_type = jso.get("file_type")
36
37
38
39
    return data_type


def get_bookid(jso: dict):
许瑞's avatar
许瑞 committed
40
    book_id = jso.get("bookid")
41
    if book_id is None:
许瑞's avatar
许瑞 committed
42
        book_id = jso.get("original_file_id")
43
44
45
    return book_id


赵小蒙's avatar
赵小蒙 committed
46
def meta_scan(jso: dict, doc_layout_check=True) -> dict:
许瑞's avatar
许瑞 committed
47
    s3_pdf_path = jso.get("file_location")
赵小蒙's avatar
赵小蒙 committed
48
49
    s3_config = get_s3_config(s3_pdf_path)
    if doc_layout_check:
许瑞's avatar
许瑞 committed
50
51
52
53
54
        if (
            "doc_layout_result" not in jso
        ):  # 检测json中是存在模型数据,如果没有则需要跳过该pdf
            jso["need_drop"] = True
            jso["drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT
赵小蒙's avatar
赵小蒙 committed
55
56
            return jso
    try:
57
        data_source = get_data_source(jso)
许瑞's avatar
许瑞 committed
58
        file_id = jso.get("file_id")
赵小蒙's avatar
赵小蒙 committed
59
        book_name = f"{data_source}/{file_id}"
赵小蒙's avatar
赵小蒙 committed
60
61
62
63
64
65
66
67
68

        # 首页存在超量drawing问题
        # special_pdf_list = ['zlib/zlib_21822650']
        # if book_name in special_pdf_list:
        #     jso['need_drop'] = True
        #     jso['drop_reason'] = DropReason.SPECIAL_PDF
        #     return jso

        start_time = time.time()  # 记录开始时间
许瑞's avatar
许瑞 committed
69
70
71
72
        logger.info(
            f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
            file=sys.stderr,
        )
赵小蒙's avatar
赵小蒙 committed
73
74
75
76
77
        file_content = read_file(s3_pdf_path, s3_config)
        read_file_time = int(time.time() - start_time)  # 计算执行时间

        start_time = time.time()  # 记录开始时间
        res = pdf_meta_scan(s3_pdf_path, file_content)
许瑞's avatar
许瑞 committed
78
79
80
81
82
        if res.get(
            "need_drop", False
        ):  # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
            jso["need_drop"] = True
            jso["drop_reason"] = res["drop_reason"]
赵小蒙's avatar
赵小蒙 committed
83
        else:  # 正常返回
许瑞's avatar
许瑞 committed
84
85
86
87
            jso["pdf_meta"] = res
            jso["content"] = ""
            jso["remark"] = ""
            jso["data_url"] = ""
赵小蒙's avatar
赵小蒙 committed
88
89
        end_time = time.time()  # 记录结束时间
        meta_scan_time = int(end_time - start_time)  # 计算执行时间
许瑞's avatar
许瑞 committed
90
91
92
93
94
95
        logger.info(
            f"book_name is:{book_name},end_time is:{formatted_time(end_time)},read_file_time is:{read_file_time},meta_scan_time is:{meta_scan_time}",
            file=sys.stderr,
        )
        jso["read_file_time"] = read_file_time
        jso["meta_scan_time"] = meta_scan_time
赵小蒙's avatar
赵小蒙 committed
96
97
98
99
100
101
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def classify_by_type(jso: dict, debug_mode=False) -> dict:
许瑞's avatar
许瑞 committed
102
    # 检测debug开关
赵小蒙's avatar
赵小蒙 committed
103
104
    if debug_mode:
        pass
许瑞's avatar
许瑞 committed
105
106
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
赵小蒙's avatar
赵小蒙 committed
107
108
109
            return jso
    # 开始正式逻辑
    try:
许瑞's avatar
许瑞 committed
110
        pdf_meta = jso.get("pdf_meta")
111
        data_source = get_data_source(jso)
许瑞's avatar
许瑞 committed
112
        file_id = jso.get("file_id")
赵小蒙's avatar
赵小蒙 committed
113
        book_name = f"{data_source}/{file_id}"
赵小蒙's avatar
赵小蒙 committed
114
115
116
117
        total_page = pdf_meta["total_page"]
        page_width = pdf_meta["page_width_pts"]
        page_height = pdf_meta["page_height_pts"]
        img_sz_list = pdf_meta["image_info_per_page"]
许瑞's avatar
许瑞 committed
118
119
120
121
        img_num_list = pdf_meta["imgs_per_page"]
        text_len_list = pdf_meta["text_len_per_page"]
        text_layout_list = pdf_meta["text_layout_per_page"]
        text_language = pdf_meta["text_language"]
赵小蒙's avatar
赵小蒙 committed
122
123
124
125
126
127
        # allow_language = ['zh', 'en']  # 允许的语言,目前只允许简中和英文的

        # if text_language not in allow_language:  # 如果语言不在允许的语言中,则drop
        #     jso['need_drop'] = True
        #     jso['drop_reason'] = DropReason.NOT_ALLOW_LANGUAGE
        #     return jso
许瑞's avatar
许瑞 committed
128
129
130
131
132
133
134
135
        pdf_path = pdf_meta["pdf_path"]
        is_encrypted = pdf_meta["is_encrypted"]
        is_needs_password = pdf_meta["is_needs_password"]
        if (
            is_encrypted or is_needs_password
        ):  # 加密的,需要密码的,没有页面的,都不处理
            jso["need_drop"] = True
            jso["drop_reason"] = DropReason.ENCRYPTED
赵小蒙's avatar
赵小蒙 committed
136
137
        else:
            start_time = time.time()  # 记录开始时间
许瑞's avatar
许瑞 committed
138
139
140
141
142
143
144
145
146
147
            is_text_pdf, results = classify(
                pdf_path,
                total_page,
                page_width,
                page_height,
                img_sz_list,
                text_len_list,
                img_num_list,
                text_layout_list,
            )
赵小蒙's avatar
赵小蒙 committed
148
149
            classify_time = int(time.time() - start_time)  # 计算执行时间
            if is_text_pdf:
许瑞's avatar
许瑞 committed
150
151
152
                pdf_meta["is_text_pdf"] = is_text_pdf
                jso["pdf_meta"] = pdf_meta
                jso["classify_time"] = classify_time
赵小蒙's avatar
赵小蒙 committed
153
154
                # print(json.dumps(pdf_meta, ensure_ascii=False))

许瑞's avatar
许瑞 committed
155
156
157
158
159
160
                allow_language = ["zh", "en"]  # 允许的语言,目前只允许简中和英文的
                if (
                    text_language not in allow_language
                ):  # 如果语言不在允许的语言中,则drop
                    jso["need_drop"] = True
                    jso["drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE
赵小蒙's avatar
赵小蒙 committed
161
162
163
                    return jso
            else:
                # 先不drop
许瑞's avatar
许瑞 committed
164
165
166
167
168
                pdf_meta["is_text_pdf"] = is_text_pdf
                jso["pdf_meta"] = pdf_meta
                jso["classify_time"] = classify_time
                jso["need_drop"] = True
                jso["drop_reason"] = DropReason.NOT_IS_TEXT_PDF
赵小蒙's avatar
赵小蒙 committed
169
170
171
172
                extra_info = {"classify_rules": []}
                for condition, result in results.items():
                    if not result:
                        extra_info["classify_rules"].append(condition)
许瑞's avatar
许瑞 committed
173
                jso["extra_info"] = extra_info
赵小蒙's avatar
赵小蒙 committed
174
175
176
177
178
179
180
181
182
183

    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def save_tables_to_s3(jso: dict, debug_mode=False) -> dict:

    if debug_mode:
        pass
许瑞's avatar
许瑞 committed
184
185
186
187
188
189
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            logger.info(
                f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop",
                file=sys.stderr,
            )
赵小蒙's avatar
赵小蒙 committed
190
191
192
            jso["dropped"] = True
            return jso
    try:
193
        data_source = get_data_source(jso)
许瑞's avatar
许瑞 committed
194
        file_id = jso.get("file_id")
赵小蒙's avatar
赵小蒙 committed
195
        book_name = f"{data_source}/{file_id}"
许瑞's avatar
许瑞 committed
196
197
198
        title = jso.get("title")
        url_encode_title = quote(title, safe="")
        if data_source != "scihub":
赵小蒙's avatar
赵小蒙 committed
199
            return jso
许瑞's avatar
许瑞 committed
200
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
赵小蒙's avatar
赵小蒙 committed
201
202
203
204
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
        i = 0
        for page in pdf_intermediate_dict.values():
许瑞's avatar
许瑞 committed
205
206
            if page.get("tables"):
                if len(page["tables"]) > 0:
赵小蒙's avatar
赵小蒙 committed
207
                    j = 0
许瑞's avatar
许瑞 committed
208
                    for table in page["tables"]:
赵小蒙's avatar
赵小蒙 committed
209
                        if debug_mode:
许瑞's avatar
许瑞 committed
210
211
212
213
214
                            image_path = join_path(
                                "s3://mllm-raw-media/pdf2md_img/",
                                book_name,
                                table["image_path"],
                            )
赵小蒙's avatar
赵小蒙 committed
215
                        else:
许瑞's avatar
许瑞 committed
216
217
218
                            image_path = join_path(
                                "s3://mllm-raw-media/pdf2md_img/", table["image_path"]
                            )
赵小蒙's avatar
赵小蒙 committed
219

许瑞's avatar
许瑞 committed
220
                        if image_path.endswith(".jpg"):
赵小蒙's avatar
赵小蒙 committed
221
222
223
224
                            j += 1
                            s3_client = get_s3_client(image_path)
                            bucket_name, bucket_key = parse_bucket_key(image_path)
                            # 通过s3_client获取图片到内存
许瑞's avatar
许瑞 committed
225
226
227
                            image_bytes = s3_client.get_object(
                                Bucket=bucket_name, Key=bucket_key
                            )["Body"].read()
赵小蒙's avatar
赵小蒙 committed
228
229
                            # 保存图片到新的位置
                            if debug_mode:
许瑞's avatar
许瑞 committed
230
231
232
233
234
235
                                new_image_path = join_path(
                                    "s3://mllm-raw-media/pdf2md_img/table_new/",
                                    url_encode_title
                                    + "_"
                                    + table["image_path"].lstrip("tables/"),
                                )
赵小蒙's avatar
赵小蒙 committed
236
                            else:
许瑞's avatar
许瑞 committed
237
238
239
240
                                new_image_path = join_path(
                                    "s3://mllm-raw-media/pdf2md_img/table_new/",
                                    url_encode_title + f"_page{i}_{j}.jpg",
                                )
赵小蒙's avatar
赵小蒙 committed
241
242
243

                            logger.info(new_image_path, file=sys.stderr)
                            bucket_name, bucket_key = parse_bucket_key(new_image_path)
许瑞's avatar
许瑞 committed
244
245
246
                            s3_client.put_object(
                                Bucket=bucket_name, Key=bucket_key, Body=image_bytes
                            )
赵小蒙's avatar
赵小蒙 committed
247
248
249
250
251
252
253
254
255
256
257
258
259
260
                        else:
                            continue
            i += 1

        # 把无用的信息清空
        jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def drop_needdrop_pdf(jso: dict) -> dict:
许瑞's avatar
许瑞 committed
261
262
263
264
265
    if jso.get("need_drop", False):
        logger.info(
            f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop",
            file=sys.stderr,
        )
赵小蒙's avatar
赵小蒙 committed
266
267
268
269
270
271
272
273
        jso["dropped"] = True
    return jso


def pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:

    if debug_mode:
        pass
许瑞's avatar
许瑞 committed
274
275
276
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            book_name = join_path(get_data_source(jso), jso["file_id"])
赵小蒙's avatar
赵小蒙 committed
277
278
279
280
            logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
            jso["dropped"] = True
            return jso
    try:
许瑞's avatar
许瑞 committed
281
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
赵小蒙's avatar
赵小蒙 committed
282
283
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
许瑞's avatar
许瑞 committed
284
285
286
        # markdown_content = mk_nlp_markdown(pdf_intermediate_dict)
        jso["content_list"] = mk_universal_format(pdf_intermediate_dict)
        # jso["content"] = markdown_content
287
        logger.info(f"book_name is:{get_data_source(jso)}/{jso['file_id']}")
赵小蒙's avatar
赵小蒙 committed
288
289
290
291
292
293
294
295
296
297
        # 把无用的信息清空
        jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
许瑞's avatar
许瑞 committed
298
    # 检测debug开关
赵小蒙's avatar
赵小蒙 committed
299
300
    if debug_mode:
        pass
许瑞's avatar
许瑞 committed
301
302
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
赵小蒙's avatar
赵小蒙 committed
303
304
            return jso
    # 开始正式逻辑
许瑞's avatar
许瑞 committed
305
    s3_pdf_path = jso.get("file_location")
赵小蒙's avatar
赵小蒙 committed
306
    s3_config = get_s3_config(s3_pdf_path)
许瑞's avatar
许瑞 committed
307
    model_output_json_list = jso.get("doc_layout_result")
308
    data_source = get_data_source(jso)
许瑞's avatar
许瑞 committed
309
    file_id = jso.get("file_id")
赵小蒙's avatar
赵小蒙 committed
310
    book_name = f"{data_source}/{file_id}"
赵小蒙's avatar
赵小蒙 committed
311
312
313
314
315
316
317
318
319
320

    # 1.23.22已修复
    # if debug_mode:
    #     pass
    # else:
    #     if book_name == "zlib/zlib_21929367":
    #         jso['need_drop'] = True
    #         jso['drop_reason'] = DropReason.SPECIAL_PDF
    #         return jso

许瑞's avatar
许瑞 committed
321
    junk_img_bojids = jso["pdf_meta"]["junk_img_bojids"]
赵小蒙's avatar
赵小蒙 committed
322
323
324
    # total_page = jso['pdf_meta']['total_page']

    # 增加检测 max_svgs 数量的检测逻辑,如果 max_svgs 超过3000则drop
许瑞's avatar
许瑞 committed
325
    svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
赵小蒙's avatar
赵小蒙 committed
326
327
    max_svgs = max(svgs_per_page_list)
    if max_svgs > 3000:
许瑞's avatar
许瑞 committed
328
329
        jso["need_drop"] = True
        jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
赵小蒙's avatar
赵小蒙 committed
330
331
332
333
334
    # elif total_page > 1000:
    #     jso['need_drop'] = True
    #     jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
    else:
        try:
赵小蒙's avatar
赵小蒙 committed
335
            save_path = s3_image_save_path
赵小蒙's avatar
赵小蒙 committed
336
337
338
            image_s3_config = get_s3_config(save_path)
            start_time = time.time()  # 记录开始时间
            # 先打印一下book_name和解析开始的时间
许瑞's avatar
许瑞 committed
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
            logger.info(
                f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
                file=sys.stderr,
            )
            pdf_info_dict = parse_pdf_by_model(
                s3_pdf_path,
                s3_config,
                model_output_json_list,
                save_path,
                book_name,
                pdf_model_profile=None,
                image_s3_config=image_s3_config,
                start_page_id=start_page_id,
                junk_img_bojids=junk_img_bojids,
                debug_mode=debug_mode,
            )
            if pdf_info_dict.get(
                "need_drop", False
            ):  # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
                jso["need_drop"] = True
                jso["drop_reason"] = pdf_info_dict["drop_reason"]
赵小蒙's avatar
赵小蒙 committed
360
361
            else:  # 正常返回,将 pdf_info_dict 压缩并存储
                pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
许瑞's avatar
许瑞 committed
362
                jso["pdf_intermediate_dict"] = pdf_info_dict
赵小蒙's avatar
赵小蒙 committed
363
364
365
            end_time = time.time()  # 记录完成时间
            parse_time = int(end_time - start_time)  # 计算执行时间
            # 解析完成后打印一下book_name和耗时
许瑞's avatar
许瑞 committed
366
367
368
369
370
            logger.info(
                f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
                file=sys.stderr,
            )
            jso["parse_time"] = parse_time
赵小蒙's avatar
赵小蒙 committed
371
372
373
374
        except Exception as e:
            jso = exception_handler(jso, e)
    return jso

许瑞's avatar
许瑞 committed
375
376

"""
377
378
379
统一处理逻辑
1.先调用parse_pdf对文本类pdf进行处理
2.再调用ocr_dropped_parse_pdf,对之前drop的pdf进行处理
许瑞's avatar
许瑞 committed
380
381
382
"""


383
384
385
386
387
def uni_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
    jso = parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
    jso = ocr_dropped_parse_pdf(jso, start_page_id=start_page_id, debug_mode=debug_mode)
    return jso

许瑞's avatar
许瑞 committed
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> dict:
    # 检测debug开关
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            return jso
    # 开始正式逻辑
    s3_pdf_path = jso.get("file_location")
    s3_config = get_s3_config(s3_pdf_path)
    model_output_json_list = jso.get("doc_layout_result")
    data_source = get_data_source(jso)
    file_id = jso.get("file_id")
    book_name = f"{data_source}/{file_id}"

    # 1.23.22已修复
    # if debug_mode:
    #     pass
    # else:
    #     if book_name == "zlib/zlib_21929367":
    #         jso['need_drop'] = True
    #         jso['drop_reason'] = DropReason.SPECIAL_PDF
    #         return jso

    junk_img_bojids = jso["pdf_meta"]["junk_img_bojids"]
    # total_page = jso['pdf_meta']['total_page']

    # 增加检测 max_svgs 数量的检测逻辑,如果 max_svgs 超过3000则drop
    svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
    max_svgs = max(svgs_per_page_list)
    if max_svgs > 3000:
        jso["need_drop"] = True
        jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
    # elif total_page > 1000:
    #     jso['need_drop'] = True
    #     jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
    else:
        try:
            save_path = s3_image_save_path
            image_s3_config = get_s3_config(save_path)
            start_time = time.time()  # 记录开始时间
            # 先打印一下book_name和解析开始的时间
            logger.info(
                f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
                file=sys.stderr,
            )
            pdf_info_dict = parse_pdf_for_train(
                s3_pdf_path,
                s3_config,
                model_output_json_list,
                save_path,
                book_name,
                pdf_model_profile=None,
                image_s3_config=image_s3_config,
                start_page_id=start_page_id,
                junk_img_bojids=junk_img_bojids,
                debug_mode=debug_mode,
            )
            if pdf_info_dict.get(
                "need_drop", False
            ):  # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
                jso["need_drop"] = True
                jso["drop_reason"] = pdf_info_dict["drop_reason"]
            else:  # 正常返回,将 pdf_info_dict 压缩并存储
许瑞's avatar
许瑞 committed
452
                jso["parsed_results"] = convert_to_train_format(pdf_info_dict)
许瑞's avatar
许瑞 committed
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
                pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
                jso["pdf_intermediate_dict"] = pdf_info_dict
            end_time = time.time()  # 记录完成时间
            parse_time = int(end_time - start_time)  # 计算执行时间
            # 解析完成后打印一下book_name和耗时
            logger.info(
                f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
                file=sys.stderr,
            )
            jso["parse_time"] = parse_time
        except Exception as e:
            jso = exception_handler(jso, e)
    return jso


赵小蒙's avatar
赵小蒙 committed
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
# 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
    if not jso.get("need_drop", False):
        return jso
    else:
        jso = ocr_parse_pdf_core(
            jso, start_page_id=start_page_id, debug_mode=debug_mode
        )
        jso["need_drop"] = False
        return jso


def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
    # 检测debug开关
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            return jso

    jso = ocr_parse_pdf_core(jso, start_page_id=start_page_id, debug_mode=debug_mode)
    return jso


def ocr_parse_pdf_core(jso: dict, start_page_id=0, debug_mode=False) -> dict:
    s3_pdf_path = jso.get("file_location")
    s3_config = get_s3_config(s3_pdf_path)
    model_output_json_list = jso.get("doc_layout_result")
    data_source = get_data_source(jso)
    file_id = jso.get("file_id")
    book_name = f"{data_source}/{file_id}"
    try:
        save_path = s3_image_save_path
        image_s3_config = get_s3_config(save_path)
        start_time = time.time()  # 记录开始时间
        # 先打印一下book_name和解析开始的时间
        logger.info(
            f"book_name is:{book_name},start_time is:{formatted_time(start_time)}",
            file=sys.stderr,
        )
        pdf_info_dict = parse_pdf_by_ocr(
            s3_pdf_path,
            s3_config,
            model_output_json_list,
            save_path,
            book_name,
            pdf_model_profile=None,
            image_s3_config=image_s3_config,
            start_page_id=start_page_id,
            debug_mode=debug_mode,
        )
        pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
        jso["pdf_intermediate_dict"] = pdf_info_dict
        end_time = time.time()  # 记录完成时间
        parse_time = int(end_time - start_time)  # 计算执行时间
        # 解析完成后打印一下book_name和耗时
        logger.info(
            f"book_name is:{book_name},end_time is:{formatted_time(end_time)},cost_time is:{parse_time}",
            file=sys.stderr,
        )
        jso["parse_time"] = parse_time
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            book_name = join_path(get_data_source(jso), jso["file_id"])
            logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
            jso["dropped"] = True
            return jso
    try:
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
        markdown_content = ocr_mk_mm_markdown(pdf_intermediate_dict)
        jso["content"] = markdown_content
        logger.info(
            f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
            file=sys.stderr,
        )
        # 把无用的信息清空
        jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, debug_mode=False) -> dict:
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            book_name = join_path(get_data_source(jso), jso["file_id"])
            logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
            jso["dropped"] = True
            return jso
    try:
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
        # markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
        markdown_content = ocr_mk_nlp_markdown_with_para(pdf_intermediate_dict)
        jso["content"] = markdown_content
        logger.info(
            f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
            file=sys.stderr,
        )
        # 把无用的信息清空
        jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, debug_mode=False) -> dict:
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            book_name = join_path(get_data_source(jso), jso["file_id"])
            logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
            jso["dropped"] = True
            return jso
    try:
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
        markdown_content = ocr_mk_mm_markdown_with_para_and_pagination(pdf_intermediate_dict)
        jso["content"] = markdown_content
        logger.info(
            f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
            file=sys.stderr,
        )
        # 把无用的信息清空
        # jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        # jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
        jso: dict, debug_mode=False
) -> dict:
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            book_name = join_path(get_data_source(jso), jso["file_id"])
            logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
            jso["dropped"] = True
            return jso
    try:
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
        markdown_content = ocr_mk_mm_markdown_with_para(pdf_intermediate_dict)
        jso["content_ocr"] = markdown_content
        logger.info(
            f"book_name is:{get_data_source(jso)}/{jso['file_id']},markdown content length is {len(markdown_content)}",
            file=sys.stderr,
        )
        # 把无用的信息清空
        jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        jso["mid_json_ocr"] = pdf_intermediate_dict
        jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) -> dict:
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            book_name = join_path(get_data_source(jso), jso["file_id"])
            logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
            jso["dropped"] = True
            return jso
    try:
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
        standard_format = ocr_mk_mm_standard_format(pdf_intermediate_dict)
        jso["content_list"] = standard_format
        logger.info(
            f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
            file=sys.stderr,
        )
        # 把无用的信息清空
        jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode=False) -> dict:
    if debug_mode:
        pass
    else:  # 如果debug没开,则检测是否有needdrop字段
        if jso.get("need_drop", False):
            book_name = join_path(get_data_source(jso), jso["file_id"])
            logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
            jso["dropped"] = True
            return jso
    try:
        pdf_intermediate_dict = jso["pdf_intermediate_dict"]
        # 将 pdf_intermediate_dict 解压
        pdf_intermediate_dict = JsonCompressor.decompress_json(pdf_intermediate_dict)
        standard_format = make_standard_format_with_para(pdf_intermediate_dict)
        jso["content_list"] = standard_format
        logger.info(
            f"book_name is:{get_data_source(jso)}/{jso['file_id']},content_list length is {len(standard_format)}",
            file=sys.stderr,
        )
        # 把无用的信息清空
        jso["doc_layout_result"] = ""
        jso["pdf_intermediate_dict"] = ""
        jso["pdf_meta"] = ""
    except Exception as e:
        jso = exception_handler(jso, e)
    return jso


赵小蒙's avatar
赵小蒙 committed
706
707
if __name__ == "__main__":
    pass