Commit 2df265c8 authored by zhougaofeng's avatar zhougaofeng
Browse files

Update magic_pdf/__init__.py, magic_pdf/config.ini, magic_pdf/tmp.py,...

Update magic_pdf/__init__.py, magic_pdf/config.ini, magic_pdf/tmp.py, magic_pdf/pdf_parse_by_ocr.py, magic_pdf/pdf_parse_by_txt.py, magic_pdf/user_api.py, magic_pdf/pdf_parse_union_core.py, magic_pdf/pdf_parse_union_core_v2.py, magic_pdf/config/__init__.py, magic_pdf/config/enums.py, magic_pdf/config/exceptions.py, magic_pdf/data/__init__.py, magic_pdf/data/schemas.py, magic_pdf/data/dataset.py, magic_pdf/data/utils.py, magic_pdf/data/read_api.py, magic_pdf/data/data_reader_writer/__init__.py, magic_pdf/data/data_reader_writer/base.py, magic_pdf/data/data_reader_writer/filebase.py, magic_pdf/data/data_reader_writer/s3.py, magic_pdf/data/data_reader_writer/multi_bucket_s3.py, magic_pdf/data/io/__init__.py, magic_pdf/data/io/base.py, magic_pdf/data/io/s3.py, magic_pdf/data/io/http.py, magic_pdf/dict2md/__init__.py, magic_pdf/dict2md/ocr_vllm_client.py, magic_pdf/dict2md/ocr_vllm_server.py, magic_pdf/dict2md/ocr_mkcontent.py, magic_pdf/dict2md/mkcontent.py, magic_pdf/dict2md/ocr_server.py, magic_pdf/dict2md/ocr_client.py, magic_pdf/filter/__init__.py, magic_pdf/filter/pdf_classify_by_type.py, magic_pdf/filter/pdf_meta_scan.py, magic_pdf/integrations/__init__.py, magic_pdf/integrations/rag/__init__.py, magic_pdf/integrations/rag/api.py, magic_pdf/integrations/rag/utils.py, magic_pdf/integrations/rag/type.py, magic_pdf/layout/__init__.py, magic_pdf/layout/layout_det_utils.py, magic_pdf/layout/layout_spiler_recog.py, magic_pdf/layout/mcol_sort.py, magic_pdf/layout/layout_sort.py, magic_pdf/layout/bbox_sort.py, magic_pdf/libs/__init__.py, magic_pdf/libs/boxbase.py, magic_pdf/libs/calc_span_stats.py, magic_pdf/libs/clean_memory.py, magic_pdf/libs/convert_utils.py, magic_pdf/libs/detect_language_from_model.py, magic_pdf/libs/coordinate_transform.py, magic_pdf/libs/drop_tag.py, magic_pdf/libs/nlp_utils.py, magic_pdf/libs/ocr_content_type.py, magic_pdf/libs/hash_utils.py, magic_pdf/libs/path_utils.py, magic_pdf/libs/pdf_check.py, magic_pdf/libs/language.py, magic_pdf/libs/markdown_utils.py, magic_pdf/libs/drop_reason.py, magic_pdf/libs/pdf_image_tools.py, magic_pdf/libs/config_reader.py, magic_pdf/libs/Constants.py, magic_pdf/libs/local_math.py, magic_pdf/libs/ModelBlockTypeEnum.py, magic_pdf/libs/draw_bbox.py, magic_pdf/libs/vis_utils.py, magic_pdf/libs/textbase.py, magic_pdf/libs/safe_filename.py, magic_pdf/libs/MakeContentConfig.py, magic_pdf/libs/version.py, magic_pdf/libs/json_compressor.py, magic_pdf/libs/commons.py, magic_pdf/model/__init__.py, magic_pdf/model/doc_analyze_by_custom_model.py, magic_pdf/model/magic_model.py, magic_pdf/model/model_list.py, magic_pdf/model/pp_structure_v2.py, magic_pdf/model/ppTableModel.py, magic_pdf/model/pdf_extract_kit.py, magic_pdf/model/pek_sub_modules/__init__.py, magic_pdf/model/pek_sub_modules/self_modify.py, magic_pdf/model/pek_sub_modules/post_process.py, magic_pdf/model/pek_sub_modules/layoutlmv3/__init__.py, magic_pdf/model/pek_sub_modules/layoutlmv3/backbone.py, magic_pdf/model/pek_sub_modules/layoutlmv3/beit.py, magic_pdf/model/pek_sub_modules/layoutlmv3/deit.py, magic_pdf/model/pek_sub_modules/layoutlmv3/model_init.py, magic_pdf/model/pek_sub_modules/layoutlmv3/visualizer.py, magic_pdf/model/pek_sub_modules/layoutlmv3/rcnn_vl.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/__init__.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/__init__.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/funsd.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/data_collator.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/cord.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/image_utils.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/xfund.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/__init__.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/__init__.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/tokenization_layoutlmv3.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/tokenization_layoutlmv3_fast.py, magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/configuration_layoutlmv3.py, magic_pdf/model/pek_sub_modules/structeqtable/__init__.py, magic_pdf/model/pek_sub_modules/structeqtable/StructTableModel.py, magic_pdf/para/__init__.py, magic_pdf/para/commons.py, magic_pdf/para/draw.py, magic_pdf/para/layout_match_processor.py, magic_pdf/para/raw_processor.py, magic_pdf/para/title_processor.py, magic_pdf/para/para_split.py, magic_pdf/para/denoise.py, magic_pdf/para/block_continuation_processor.py, magic_pdf/para/block_termination_processor.py, magic_pdf/para/para_pipeline.py, magic_pdf/para/para_split_v2.py, magic_pdf/para/para_split_v3.py, magic_pdf/para/stats.py, magic_pdf/para/exceptions.py, magic_pdf/parse/__init__.py, magic_pdf/parse/excel_parse.py, magic_pdf/parse/common_parse.py, magic_pdf/parse/ofd_parse.py, magic_pdf/parse/pdf_client.py, magic_pdf/pipe/__init__.py, magic_pdf/pipe/AbsPipe.py, magic_pdf/pipe/OCRPipe.py, magic_pdf/pipe/TXTPipe.py, magic_pdf/pipe/UNIPipe.py, magic_pdf/post_proc/__init__.py, magic_pdf/post_proc/pdf_post_filter.py, magic_pdf/post_proc/remove_spaces_html.py, magic_pdf/post_proc/remove_footnote.py, magic_pdf/post_proc/detect_para.py, magic_pdf/pre_proc/__init__.py, magic_pdf/pre_proc/post_layout_split.py, magic_pdf/pre_proc/citationmarker_remove.py, magic_pdf/pre_proc/detect_equation.py, magic_pdf/pre_proc/detect_footer_by_model.py, magic_pdf/pre_proc/cut_image.py, magic_pdf/pre_proc/construct_page_dict.py, magic_pdf/pre_proc/ocr_detect_layout.py, magic_pdf/pre_proc/pdf_pre_filter.py, magic_pdf/pre_proc/ocr_dict_merge.py, magic_pdf/pre_proc/ocr_span_list_modify.py, magic_pdf/pre_proc/remove_bbox_overlap.py, magic_pdf/pre_proc/remove_colored_strip_bbox.py, magic_pdf/pre_proc/remove_footer_header.py, magic_pdf/pre_proc/detect_header.py, magic_pdf/pre_proc/detect_page_number.py, magic_pdf/pre_proc/detect_tables.py, magic_pdf/pre_proc/detect_footer_header_by_statistics.py, magic_pdf/pre_proc/detect_footnote.py, magic_pdf/pre_proc/remove_rotate_bbox.py, magic_pdf/pre_proc/resolve_bbox_conflict.py, magic_pdf/pre_proc/solve_line_alien.py, magic_pdf/pre_proc/statistics.py, magic_pdf/pre_proc/detect_images.py, magic_pdf/pre_proc/equations_replace.py, magic_pdf/pre_proc/fix_image.py, magic_pdf/pre_proc/ocr_detect_all_bboxes.py, magic_pdf/pre_proc/fix_table.py, magic_pdf/pre_proc/main_text_font.py, magic_pdf/resources/fasttext-langdetect/lid.176.ftz, magic_pdf/resources/model_config/model_configs.yaml, magic_pdf/resources/model_config/layoutlmv3/layoutlmv3_base_inference.yaml, magic_pdf/resources/model_config/UniMERNet/demo.yaml, magic_pdf/rw/__init__.py, magic_pdf/rw/AbsReaderWriter.py, magic_pdf/rw/DiskReaderWriter.py, magic_pdf/rw/draw_ofd.py, magic_pdf/rw/ofdtemplate.py, magic_pdf/rw/pdf_parse.py, magic_pdf/rw/draw_pdf.py, magic_pdf/rw/S3ReaderWriter.py, magic_pdf/spark/__init__.py, magic_pdf/spark/spark_api.py, magic_pdf/tools/__init__.py, magic_pdf/tools/cli.py, magic_pdf/tools/cli_dev.py, magic_pdf/tools/common.py, magic_pdf/tools/file_deal.py, magic_pdf/tools/img_deal.py, magic_pdf/tools/find_seal_img.py, magic_pdf/tools/font_tools.py, magic_pdf/tools/file_parser.py, magic_pdf/tools/parameter_parser.py, magic_pdf/tools/ofd.py, magic_pdf/tools/pdf_server.py, magic_pdf/tools/ofd_parser.py, magic_pdf/utils/__init__.py, magic_pdf/utils/annotations.py files
parent 826086d2
import math
from loguru import logger
from magic_pdf.libs.boxbase import find_bottom_nearest_text_bbox, find_top_nearest_text_bbox
from magic_pdf.libs.commons import join_path
from magic_pdf.libs.ocr_content_type import ContentType
TYPE_INLINE_EQUATION = ContentType.InlineEquation
TYPE_INTERLINE_EQUATION = ContentType.InterlineEquation
UNI_FORMAT_TEXT_TYPE = ['text', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']
@DeprecationWarning
def mk_nlp_markdown_1(para_dict: dict):
"""
对排序后的bboxes拼接内容
"""
content_lst = []
for _, page_info in para_dict.items():
para_blocks = page_info.get("para_blocks")
if not para_blocks:
continue
for block in para_blocks:
item = block["paras"]
for _, p in item.items():
para_text = p["para_text"]
is_title = p["is_para_title"]
title_level = p['para_title_level']
md_title_prefix = "#"*title_level
if is_title:
content_lst.append(f"{md_title_prefix} {para_text}")
else:
content_lst.append(para_text)
content_text = "\n\n".join(content_lst)
return content_text
# 找到目标字符串在段落中的索引
def __find_index(paragraph, target):
index = paragraph.find(target)
if index != -1:
return index
else:
return None
def __insert_string(paragraph, target, postion):
new_paragraph = paragraph[:postion] + target + paragraph[postion:]
return new_paragraph
def __insert_after(content, image_content, target):
"""
在content中找到target,将image_content插入到target后面
"""
index = content.find(target)
if index != -1:
content = content[:index+len(target)] + "\n\n" + image_content + "\n\n" + content[index+len(target):]
else:
logger.error(f"Can't find the location of image {image_content} in the markdown file, search target is {target}")
return content
def __insert_before(content, image_content, target):
"""
在content中找到target,将image_content插入到target前面
"""
index = content.find(target)
if index != -1:
content = content[:index] + "\n\n" + image_content + "\n\n" + content[index:]
else:
logger.error(f"Can't find the location of image {image_content} in the markdown file, search target is {target}")
return content
@DeprecationWarning
def mk_mm_markdown_1(para_dict: dict):
"""拼装多模态markdown"""
content_lst = []
for _, page_info in para_dict.items():
page_lst = [] # 一个page内的段落列表
para_blocks = page_info.get("para_blocks")
pymu_raw_blocks = page_info.get("preproc_blocks")
all_page_images = []
all_page_images.extend(page_info.get("images",[]))
all_page_images.extend(page_info.get("image_backup", []) )
all_page_images.extend(page_info.get("tables",[]))
all_page_images.extend(page_info.get("table_backup",[]) )
if not para_blocks or not pymu_raw_blocks: # 只有图片的拼接的场景
for img in all_page_images:
page_lst.append(f"![]({img['image_path']})") # TODO 图片顺序
page_md = "\n\n".join(page_lst)
else:
for block in para_blocks:
item = block["paras"]
for _, p in item.items():
para_text = p["para_text"]
is_title = p["is_para_title"]
title_level = p['para_title_level']
md_title_prefix = "#"*title_level
if is_title:
page_lst.append(f"{md_title_prefix} {para_text}")
else:
page_lst.append(para_text)
"""拼装成一个页面的文本"""
page_md = "\n\n".join(page_lst)
"""插入图片"""
for img in all_page_images:
imgbox = img['bbox']
img_content = f"![]({img['image_path']})"
# 先看在哪个block内
for block in pymu_raw_blocks:
bbox = block['bbox']
if bbox[0]-1 <= imgbox[0] < bbox[2]+1 and bbox[1]-1 <= imgbox[1] < bbox[3]+1:# 确定在block内
for l in block['lines']:
line_box = l['bbox']
if line_box[0]-1 <= imgbox[0] < line_box[2]+1 and line_box[1]-1 <= imgbox[1] < line_box[3]+1: # 在line内的,插入line前面
line_txt = "".join([s['text'] for s in l['spans']])
page_md = __insert_before(page_md, img_content, line_txt)
break
break
else:# 在行与行之间
# 找到图片x0,y0与line的x0,y0最近的line
min_distance = 100000
min_line = None
for l in block['lines']:
line_box = l['bbox']
distance = math.sqrt((line_box[0] - imgbox[0])**2 + (line_box[1] - imgbox[1])**2)
if distance < min_distance:
min_distance = distance
min_line = l
if min_line:
line_txt = "".join([s['text'] for s in min_line['spans']])
img_h = imgbox[3] - imgbox[1]
if min_distance<img_h: # 文字在图片前面
page_md = __insert_after(page_md, img_content, line_txt)
else:
page_md = __insert_before(page_md, img_content, line_txt)
else:
logger.error(f"Can't find the location of image {img['image_path']} in the markdown file #1")
else:# 应当在两个block之间
# 找到上方最近的block,如果上方没有就找大下方最近的block
top_txt_block = find_top_nearest_text_bbox(pymu_raw_blocks, imgbox)
if top_txt_block:
line_txt = "".join([s['text'] for s in top_txt_block['lines'][-1]['spans']])
page_md = __insert_after(page_md, img_content, line_txt)
else:
bottom_txt_block = find_bottom_nearest_text_bbox(pymu_raw_blocks, imgbox)
if bottom_txt_block:
line_txt = "".join([s['text'] for s in bottom_txt_block['lines'][0]['spans']])
page_md = __insert_before(page_md, img_content, line_txt)
else:
logger.error(f"Can't find the location of image {img['image_path']} in the markdown file #2")
content_lst.append(page_md)
"""拼装成全部页面的文本"""
content_text = "\n\n".join(content_lst)
return content_text
def __insert_after_para(text, type, element, content_list):
"""
在content_list中找到text,将image_path作为一个新的node插入到text后面
"""
for i, c in enumerate(content_list):
content_type = c.get("type")
if content_type in UNI_FORMAT_TEXT_TYPE and text in c.get("text", ''):
if type == "image":
content_node = {
"type": "image",
"img_path": element.get("image_path"),
"img_alt": "",
"img_title": "",
"img_caption": "",
}
elif type == "table":
content_node = {
"type": "table",
"img_path": element.get("image_path"),
"table_latex": element.get("text"),
"table_title": "",
"table_caption": "",
"table_quality": element.get("quality"),
}
content_list.insert(i+1, content_node)
break
else:
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}")
def __insert_before_para(text, type, element, content_list):
"""
在content_list中找到text,将image_path作为一个新的node插入到text前面
"""
for i, c in enumerate(content_list):
content_type = c.get("type")
if content_type in UNI_FORMAT_TEXT_TYPE and text in c.get("text", ''):
if type == "image":
content_node = {
"type": "image",
"img_path": element.get("image_path"),
"img_alt": "",
"img_title": "",
"img_caption": "",
}
elif type == "table":
content_node = {
"type": "table",
"img_path": element.get("image_path"),
"table_latex": element.get("text"),
"table_title": "",
"table_caption": "",
"table_quality": element.get("quality"),
}
content_list.insert(i, content_node)
break
else:
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}")
def mk_universal_format(pdf_info_list: list, img_buket_path):
"""
构造统一格式 https://aicarrier.feishu.cn/wiki/FqmMwcH69iIdCWkkyjvcDwNUnTY
"""
content_lst = []
for page_info in pdf_info_list:
page_lst = [] # 一个page内的段落列表
para_blocks = page_info.get("para_blocks")
pymu_raw_blocks = page_info.get("preproc_blocks")
all_page_images = []
all_page_images.extend(page_info.get("images",[]))
all_page_images.extend(page_info.get("image_backup", []) )
# all_page_images.extend(page_info.get("tables",[]))
# all_page_images.extend(page_info.get("table_backup",[]) )
all_page_tables = []
all_page_tables.extend(page_info.get("tables", []))
if not para_blocks or not pymu_raw_blocks: # 只有图片的拼接的场景
for img in all_page_images:
content_node = {
"type": "image",
"img_path": join_path(img_buket_path, img['image_path']),
"img_alt":"",
"img_title":"",
"img_caption":""
}
page_lst.append(content_node) # TODO 图片顺序
for table in all_page_tables:
content_node = {
"type": "table",
"img_path": join_path(img_buket_path, table['image_path']),
"table_latex": table.get("text"),
"table_title": "",
"table_caption": "",
"table_quality": table.get("quality"),
}
page_lst.append(content_node) # TODO 图片顺序
else:
for block in para_blocks:
item = block["paras"]
for _, p in item.items():
font_type = p['para_font_type']# 对于文本来说,要么是普通文本,要么是个行间公式
if font_type == TYPE_INTERLINE_EQUATION:
content_node = {
"type": "equation",
"latex": p["para_text"]
}
page_lst.append(content_node)
else:
para_text = p["para_text"]
is_title = p["is_para_title"]
title_level = p['para_title_level']
if is_title:
content_node = {
"type": f"h{title_level}",
"text": para_text
}
page_lst.append(content_node)
else:
content_node = {
"type": "text",
"text": para_text
}
page_lst.append(content_node)
content_lst.extend(page_lst)
"""插入图片"""
for img in all_page_images:
insert_img_or_table("image", img, pymu_raw_blocks, content_lst)
"""插入表格"""
for table in all_page_tables:
insert_img_or_table("table", table, pymu_raw_blocks, content_lst)
# end for
return content_lst
def insert_img_or_table(type, element, pymu_raw_blocks, content_lst):
element_bbox = element['bbox']
# 先看在哪个block内
for block in pymu_raw_blocks:
bbox = block['bbox']
if bbox[0] - 1 <= element_bbox[0] < bbox[2] + 1 and bbox[1] - 1 <= element_bbox[1] < bbox[
3] + 1: # 确定在这个大的block内,然后进入逐行比较距离
for l in block['lines']:
line_box = l['bbox']
if line_box[0] - 1 <= element_bbox[0] < line_box[2] + 1 and line_box[1] - 1 <= element_bbox[1] < line_box[
3] + 1: # 在line内的,插入line前面
line_txt = "".join([s['text'] for s in l['spans']])
__insert_before_para(line_txt, type, element, content_lst)
break
break
else: # 在行与行之间
# 找到图片x0,y0与line的x0,y0最近的line
min_distance = 100000
min_line = None
for l in block['lines']:
line_box = l['bbox']
distance = math.sqrt((line_box[0] - element_bbox[0]) ** 2 + (line_box[1] - element_bbox[1]) ** 2)
if distance < min_distance:
min_distance = distance
min_line = l
if min_line:
line_txt = "".join([s['text'] for s in min_line['spans']])
img_h = element_bbox[3] - element_bbox[1]
if min_distance < img_h: # 文字在图片前面
__insert_after_para(line_txt, type, element, content_lst)
else:
__insert_before_para(line_txt, type, element, content_lst)
break
else:
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file #1")
else: # 应当在两个block之间
# 找到上方最近的block,如果上方没有就找大下方最近的block
top_txt_block = find_top_nearest_text_bbox(pymu_raw_blocks, element_bbox)
if top_txt_block:
line_txt = "".join([s['text'] for s in top_txt_block['lines'][-1]['spans']])
__insert_after_para(line_txt, type, element, content_lst)
else:
bottom_txt_block = find_bottom_nearest_text_bbox(pymu_raw_blocks, element_bbox)
if bottom_txt_block:
line_txt = "".join([s['text'] for s in bottom_txt_block['lines'][0]['spans']])
__insert_before_para(line_txt, type, element, content_lst)
else: # TODO ,图片可能独占一列,这种情况上下是没有图片的
logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file #2")
def mk_mm_markdown(content_list):
"""
基于同一格式的内容列表,构造markdown,含图片
"""
content_md = []
for c in content_list:
content_type = c.get("type")
if content_type == "text":
content_md.append(c.get("text"))
elif content_type == "equation":
content = c.get("latex")
if content.startswith("$$") and content.endswith("$$"):
content_md.append(content)
else:
content_md.append(f"\n$$\n{c.get('latex')}\n$$\n")
elif content_type in UNI_FORMAT_TEXT_TYPE:
content_md.append(f"{'#'*int(content_type[1])} {c.get('text')}")
elif content_type == "image":
content_md.append(f"![]({c.get('img_path')})")
return "\n\n".join(content_md)
def mk_nlp_markdown(content_list):
"""
基于同一格式的内容列表,构造markdown,不含图片
"""
content_md = []
for c in content_list:
content_type = c.get("type")
if content_type == "text":
content_md.append(c.get("text"))
elif content_type == "equation":
content_md.append(f"$$\n{c.get('latex')}\n$$")
elif content_type == "table":
content_md.append(f"$$$\n{c.get('table_latex')}\n$$$")
elif content_type in UNI_FORMAT_TEXT_TYPE:
content_md.append(f"{'#'*int(content_type[1])} {c.get('text')}")
return "\n\n".join(content_md)
\ No newline at end of file
import configparser
import os
import json
import requests
from loguru import logger
import argparse
import time
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config_path',
default='/home/practice/magic_pdf-main/magic_pdf/config.ini',
)
parser.add_argument(
'--image_path',
default='/home/wanglch/projects/Qwen2-VL/20240920-163701.png',
)
parser.add_argument(
'--text',
default="描述你在图片中看到的内容",
)
args = parser.parse_args()
return args
def parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line.strip() != ""] # 去除空行
count = 0
parsed_lines = []
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split("`")
if count % 2 == 1:
# 开始代码块
parsed_lines.append(f'<pre><code class="language-{items[-1]}">')
else:
# 结束代码块
parsed_lines.append(f"</code></pre>")
else:
if i > 0 and count % 2 == 1:
# 转义代码块内的特殊字符
line = line.replace("`", r"\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
# 使用空格连接行
if parsed_lines:
parsed_lines[-1] += " " + line
else:
parsed_lines.append(line)
text = "".join(parsed_lines)
return text
def unparse_text(parsed_text):
in_code_block = False
lines = parsed_text.split("\n")
unparsed_lines = []
for line in lines:
if "<pre><code" in line:
in_code_block = True
# 移除开始标签
line = line.split(">", 1)[1]
elif "</code></pre>" in line:
in_code_block = False
# 移除结束标签
line = line.rsplit("<", 1)[0]
# 反转 HTML 实体
line = line.replace("&lt;", "<")
line = line.replace("&gt;", ">")
line = line.replace("&nbsp;", " ")
line = line.replace("&ast;", "*")
line = line.replace("&lowbar;", "_")
line = line.replace("&#45;", "-")
line = line.replace("&#46;", ".")
line = line.replace("&#33;", "!")
line = line.replace("&#40;", "(")
line = line.replace("&#41;", ")")
line = line.replace("&#36;", "$")
# 如果在代码块内,还原反斜杠转义
if in_code_block:
line = line.replace(r"\`", "`")
unparsed_lines.append(line)
# 合并所有行
unparsed_text = "\n".join(unparsed_lines)
return unparsed_text
def compress_image(image_path, max_size=(1024, 1024)):
img = Image.open(image_path)
width, height = img.size
aspect_ratio = width / height
if width > max_size[0] or height > max_size[1]:
if width > height:
new_width = max_size[0]
new_height = int(new_width / aspect_ratio)
else:
new_height = max_size[1]
new_width = int(new_height * aspect_ratio)
img = img.resize((new_width, new_height), Image.LANCZOS)
img.save(image_path, optimize=True, quality=80)
class PredictClient:
def __init__(self, api_url):
self.api_url = api_url
def check_health(self):
health_check_url = f'{self.api_url}/health'
try:
response = requests.get(health_check_url)
if response.status_code == 200:
logger.info("Server is healthy and ready to process requests.")
return True
else:
logger.error(f'Server health check failed with status code:{response.status_code}')
return False
except requests.exceptions.RequestException as e:
logger.error(f'Health check request failed:{e}')
return False
def predict(self, image_path: str, text: str):
payload = {
"image_path": image_path,
"text": text
}
headers = {'Content-Type': 'application/json'}
response = requests.post(f"{self.api_url}/predict", json=payload, headers=headers)
if response.status_code == 200:
result = response.json()
return result.get('Generated Text', '')
else:
raise Exception(f"Predict API request failed with status code {response.status_code}")
def main():
args = parse_args()
config = configparser.ConfigParser()
config.read(args.config_path)
ocr_server = config.get('server', 'ocr_server')
client = PredictClient(ocr_server)
try:
start_time = time.time() # 记录开始时间
# 压缩图片
#compress_image(args.image_path)
generated_text = client.predict(args.image_path, parse_text(args.text))
end_time = time.time() # 记录结束时间
elapsed_time = end_time - start_time # 计算运行时间
if generated_text:
clean_text = unparse_text(generated_text) # 解析生成的文本
logger.info(f"Image Path: {args.image_path}")
logger.info(f"Generated Text: {clean_text}")
logger.info(f"耗时为: {elapsed_time}秒") # 打印运行时间
else:
logger.warning("Received empty generated text.")
except requests.exceptions.RequestException as e:
logger.error(f"Error while making request to predict service: {e}")
except Exception as e:
logger.error(f"Unexpected error occurred: {e}")
if __name__ == "__main__":
main()
import re
from loguru import logger
from magic_pdf.libs.commons import join_path
from magic_pdf.libs.language import detect_lang
from magic_pdf.libs.MakeContentConfig import DropMode, MakeMode
from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
from magic_pdf.libs.ocr_content_type import BlockType, ContentType
from magic_pdf.para.para_split_v3 import ListLineTag
def __is_hyphen_at_line_end(line):
"""
Check if a line ends with one or more letters followed by a hyphen.
Args:
line (str): The line of text to check.
Returns:
bool: True if the line ends with one or more letters followed by a hyphen, False otherwise.
"""
# Use regex to check if the line ends with one or more letters followed by a hyphen
return bool(re.search(r'[A-Za-z]+-\s*$', line))
def ocr_mk_mm_markdown_with_para_and_pagination(pdf_info_dict: list,
img_buket_path):
markdown_with_para_and_pagination = []
page_no = 0
for page_info in pdf_info_dict:
paras_of_layout = page_info.get('para_blocks')
if not paras_of_layout:
continue
page_markdown = ocr_mk_markdown_with_para_core_v2(
paras_of_layout, 'mm', img_buket_path)
markdown_with_para_and_pagination.append({
'page_no':
page_no,
'md_content':
'\n\n'.join(page_markdown)
})
page_no += 1
return markdown_with_para_and_pagination
def ocr_mk_markdown_with_para_core_v2(paras_of_layout,
mode,
img_buket_path='',
):
page_markdown = []
for para_block in paras_of_layout:
para_text = ''
para_type = para_block['type']
if para_type in [BlockType.Text, BlockType.List, BlockType.Index]:
para_text = merge_para_with_text(para_block)
elif para_type == BlockType.Title:
para_text = f'# {merge_para_with_text(para_block)}'
elif para_type == BlockType.InterlineEquation:
para_text = merge_para_with_text(para_block)
elif para_type == BlockType.Image:
if mode == 'nlp':
continue
elif mode == 'mm':
for block in para_block['blocks']: # 1st.拼image_body
if block['type'] == BlockType.ImageBody:
for line in block['lines']:
for span in line['spans']:
if span['type'] == ContentType.Image:
if span.get('image_path', ''):
para_text += f"\n"
for block in para_block['blocks']: # 2nd.拼image_caption
if block['type'] == BlockType.ImageCaption:
para_text += merge_para_with_text(block) + ' \n'
for block in para_block['blocks']: # 3rd.拼image_footnote
if block['type'] == BlockType.ImageFootnote:
para_text += merge_para_with_text(block) + ' \n'
elif para_type == BlockType.Table:
if mode == 'nlp':
continue
elif mode == 'mm':
for block in para_block['blocks']: # 1st.拼table_caption
if block['type'] == BlockType.TableCaption:
para_text += merge_para_with_text(block) + ' \n'
for block in para_block['blocks']: # 2nd.拼table_body
if block['type'] == BlockType.TableBody:
for line in block['lines']:
for span in line['spans']:
if span['type'] == ContentType.Table:
# if processed by table model
if span.get('latex', ''):
para_text += f"\n\n$\n {span['latex']}\n$\n\n"
elif span.get('html', ''):
para_text += f"\n\n{span['html']}\n\n"
elif span.get('image_path', ''):
para_text += span['image_path']
for block in para_block['blocks']: # 3rd.拼table_footnote
if block['type'] == BlockType.TableFootnote:
para_text += merge_para_with_text(block) + ' \n'
if para_text.strip() == '':
continue
else:
page_markdown.append(para_text.strip() + ' ')
return page_markdown
def detect_language(text):
en_pattern = r'[a-zA-Z]+'
en_matches = re.findall(en_pattern, text)
en_length = sum(len(match) for match in en_matches)
if len(text) > 0:
if en_length / len(text) >= 0.5:
return 'en'
else:
return 'unknown'
else:
return 'empty'
def merge_para_with_text(para_block):
para_text = ''
for i, line in enumerate(para_block['lines']):
if i >= 1 and line.get(ListLineTag.IS_LIST_START_LINE, False):
para_text += ' \n'
line_text = ''
line_lang = ''
for span in line['spans']:
span_type = span['type']
if span_type == ContentType.Text:
line_text += span['content'].strip()
if line_text != '':
line_lang = detect_lang(line_text)
for span in line['spans']:
span_type = span['type']
content = ''
if span_type == ContentType.Text:
content = ocr_escape_special_markdown_char(span['content'])
elif span_type == ContentType.InlineEquation:
content = f" ${span['content']}$ "
elif span_type == ContentType.InterlineEquation:
content = f"\n$$\n{span['content']}\n$$\n"
if content != '':
langs = ['zh', 'ja', 'ko']
if line_lang in langs: # 遇到一些一个字一个span的文档,这种单字语言判断不准,需要用整行文本判断
para_text += content # 中文/日语/韩文语境下,content间不需要空格分隔
elif line_lang == 'en':
# 如果是前一行带有-连字符,那么末尾不应该加空格
if __is_hyphen_at_line_end(content):
para_text += content[:-1]
else:
para_text += content + ' '
else:
para_text += content + ' ' # 西方文本语境下 content间需要空格分隔
return para_text
def para_to_standard_format_v2(para_block, img_buket_path, page_idx, drop_reason=None):
para_type = para_block['type']
para_content = {}
if para_type in [BlockType.Text, BlockType.List, BlockType.Index]:
para_content = {
'type': 'text',
'text': merge_para_with_text(para_block),
}
elif para_type == BlockType.Title:
para_content = {
'type': 'text',
'text': merge_para_with_text(para_block),
'text_level': 1,
}
elif para_type == BlockType.InterlineEquation:
para_content = {
'type': 'equation',
'text': merge_para_with_text(para_block),
'text_format': 'latex',
}
elif para_type == BlockType.Image:
para_content = {'type': 'image', 'img_path': '', 'img_caption': [], 'img_footnote': []}
for block in para_block['blocks']:
if block['type'] == BlockType.ImageBody:
for line in block['lines']:
for span in line['spans']:
if span['type'] == ContentType.Image:
if span.get('image_path', ''):
para_content['img_path'] = join_path(img_buket_path, span['image_path'])
if block['type'] == BlockType.ImageCaption:
para_content['img_caption'].append(merge_para_with_text(block))
if block['type'] == BlockType.ImageFootnote:
para_content['img_footnote'].append(merge_para_with_text(block))
elif para_type == BlockType.Table:
para_content = {'type': 'table', 'img_path': '', 'table_caption': [], 'table_footnote': []}
for block in para_block['blocks']:
if block['type'] == BlockType.TableBody:
for line in block['lines']:
for span in line['spans']:
if span['type'] == ContentType.Table:
if span.get('latex', ''):
para_content['table_body'] = f"\n\n$\n {span['latex']}\n$\n\n"
elif span.get('html', ''):
para_content['table_body'] = f"\n\n{span['html']}\n\n"
if span.get('image_path', ''):
para_content['img_path'] = join_path(img_buket_path, span['image_path'])
if block['type'] == BlockType.TableCaption:
para_content['table_caption'].append(merge_para_with_text(block))
if block['type'] == BlockType.TableFootnote:
para_content['table_footnote'].append(merge_para_with_text(block))
para_content['page_idx'] = page_idx
if drop_reason is not None:
para_content['drop_reason'] = drop_reason
return para_content
def union_make(pdf_info_dict: list,
make_mode: str,
drop_mode: str,
img_buket_path: str = '',
):
output_content = []
for page_info in pdf_info_dict:
drop_reason_flag = False
drop_reason = None
if page_info.get('need_drop', False):
drop_reason = page_info.get('drop_reason')
if drop_mode == DropMode.NONE:
pass
elif drop_mode == DropMode.NONE_WITH_REASON:
drop_reason_flag = True
elif drop_mode == DropMode.WHOLE_PDF:
raise Exception((f'drop_mode is {DropMode.WHOLE_PDF} ,'
f'drop_reason is {drop_reason}'))
elif drop_mode == DropMode.SINGLE_PAGE:
logger.warning((f'drop_mode is {DropMode.SINGLE_PAGE} ,'
f'drop_reason is {drop_reason}'))
continue
else:
raise Exception('drop_mode can not be null')
paras_of_layout = page_info.get('para_blocks')
page_idx = page_info.get('page_idx')
if not paras_of_layout:
continue
if make_mode == MakeMode.MM_MD:
page_markdown = ocr_mk_markdown_with_para_core_v2(
paras_of_layout, 'mm', img_buket_path)
output_content.extend(page_markdown)
elif make_mode == MakeMode.NLP_MD:
page_markdown = ocr_mk_markdown_with_para_core_v2(
paras_of_layout, 'nlp')
output_content.extend(page_markdown)
elif make_mode == MakeMode.STANDARD_FORMAT:
for para_block in paras_of_layout:
if drop_reason_flag:
para_content = para_to_standard_format_v2(
para_block, img_buket_path, page_idx)
else:
para_content = para_to_standard_format_v2(
para_block, img_buket_path, page_idx)
output_content.append(para_content)
if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:
return '\n\n'.join(output_content)
elif make_mode == MakeMode.STANDARD_FORMAT:
return output_content
# Copyright (c) Alibaba Cloud.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree
import configparser
import copy
import re
import gc
import time
import torch
from argparse import ArgumentParser
from threading import Thread
from qwen_vl_utils import process_vision_info
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration, TextIteratorStreamer
from fastapi import FastAPI
from pydantic import BaseModel
from typing import Optional
from loguru import logger
app = FastAPI()
DEFAULT_CKPT_PATH = '/home/practice/model/Qwen2-VL-7B-Instruct'
REVISION = 'v1.0.4'
BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
logger.add("parse.log", rotation="10 MB", level="INFO",
format="{time} {level} {message}", encoding='utf-8', enqueue=True)
def _get_args():
parser = ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str, default=DEFAULT_CKPT_PATH,
help='Checkpoint name or path, default to %(default)r')
parser.add_argument('--cpu_only', action='store_true', help='Run demo with CPU only')
parser.add_argument('--flash_attn2', action='store_true', default=False,
help='Enable flash_attention_2 when loading the model.')
parser.add_argument('--share', action='store_true', default=False,
help='Create a publicly shareable link for the interface.')
parser.add_argument('--inbrowser', action='store_true', default=False,
help='Automatically launch the interface in a new tab on the default browser.')
parser.add_argument('--dcu_id', type=str, default='0', help='Specify the GPU ID to load the model onto.')
parser.add_argument(
'--config_path',
default='/home/practice/magic_pdf-main/magic_pdf/config.ini',
)
args = parser.parse_args()
return args
def _load_model_processor(args):
if args.cpu_only:
device_map = 'cpu'
else:
if args.dcu_id is not None:
device_map = {'': f'cuda:{args.dcu_id}'}
print('使用DCU推理:', f'cuda:{args.dcu_id}')
else:
device_map = 'auto'
if args.flash_attn2:
model = Qwen2VLForConditionalGeneration.from_pretrained(
args.checkpoint_path,
torch_dtype=torch.float16,
attn_implementation='flash_attention_2',
device_map=device_map
)
else:
model = Qwen2VLForConditionalGeneration.from_pretrained(
args.checkpoint_path,
torch_dtype=torch.float16,
device_map=device_map
)
processor = AutoProcessor.from_pretrained(args.checkpoint_path)
return model, processor
def _parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line.strip() != ""] # 去除空行
count = 0
parsed_lines = []
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split("`")
if count % 2 == 1:
# 开始代码块
parsed_lines.append(f'<pre><code class="language-{items[-1]}">')
else:
# 结束代码块
parsed_lines.append(f"</code></pre>")
else:
if i > 0 and count % 2 == 1:
# 转义代码块内的特殊字符
line = line.replace("`", r"\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
# 使用空格连接行
if parsed_lines:
parsed_lines[-1] += " " + line
else:
parsed_lines.append(line)
text = "".join(parsed_lines)
return text
def _remove_image_special(text):
text = text.replace('<ref>', '').replace('</ref>', '')
return re.sub(r'<box>.*?(</box>|$)', '', text)
def _is_video_file(filename):
video_extensions = ['.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv', '.webm', '.mpeg']
return any(filename.lower().endswith(ext) for ext in video_extensions)
def _transform_messages(original_messages):
transformed_messages = []
for message in original_messages:
new_content = []
for item in message['content']:
if 'image' in item:
new_item = {'type': 'image', 'image': item['image']}
elif 'text' in item:
new_item = {'type': 'text', 'text': item['text']}
elif 'video' in item:
new_item = {'type': 'video', 'video': item['video']}
else:
continue
new_content.append(new_item)
new_message = {'role': message['role'], 'content': new_content}
transformed_messages.append(new_message)
return transformed_messages
def _gc():
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def call_local_model(model, processor, messages):
messages = _transform_messages(messages)
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors='pt')
inputs = inputs.to(model.device)
tokenizer = processor.tokenizer
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
gen_kwargs = {'max_new_tokens': 512, 'streamer': streamer, **inputs}
thread = Thread(target=model.generate, kwargs=gen_kwargs)
thread.start()
generated_text = ''
for new_text in streamer:
generated_text += new_text
yield _parse_text(generated_text)
def create_predict_fn(model, processor):
def predict(_chatbot, task_history):
chat_query = _chatbot[-1][0]
query = task_history[-1][0]
if len(chat_query) == 0:
_chatbot.pop()
task_history.pop()
return _chatbot
print('User: ' + _parse_text(query))
history_cp = copy.deepcopy(task_history)
full_response = ''
messages = []
content = []
for q, a in history_cp:
if isinstance(q, (tuple, list)):
if _is_video_file(q[0]):
content.append({'video': f'file://{q[0]}'})
else:
content.append({'image': f'file://{q[0]}'})
else:
content.append({'text': q})
messages.append({'role': 'user', 'content': content})
messages.append({'role': 'assistant', 'content': [{'text': a}]})
content = []
messages.pop()
for response in call_local_model(model, processor, messages):
_chatbot[-1] = (_parse_text(chat_query), _remove_image_special(_parse_text(response)))
yield _chatbot
full_response = _parse_text(response)
task_history[-1] = (query, full_response)
print('Qwen-VL-Chat: ' + _parse_text(full_response))
yield _chatbot
return predict
# 启用加载模型
args = _get_args()
model, processor = _load_model_processor(args)
class Item(BaseModel):
image_path: str
text: str
@app.get("/health")
async def health_check():
return {"status": "healthy"}
@app.post("/predict")
async def predict(item: Item):
messages = [
{
'role': 'user',
'content': [
{'image': item.image_path},
{'text': item.text}
]
}
]
start = time.time()
generated_text = ''
for response in call_local_model(model, processor, messages):
generated_text = _parse_text(response)
_gc()
end = time.time()
logger.info(f'【{item.image_path}】解析的结果是:{generated_text},耗时为:{end-start}')
return {"Generated Text": generated_text}
if __name__ == "__main__":
import uvicorn
args = _get_args()
config = configparser.ConfigParser()
config.read(args.config_path)
# host = config.get('server', 'ocr_host')
host, port = config.get('server', 'ocr_server').split('://')[1].split(':')[0], int(
config.get('server', 'ocr_server').split('://')[1].split(':')[1])
# port = int(config.get('server', 'ocr_port'))
uvicorn.run(app, host=host, port=port)
import os
import json
import requests
from loguru import logger
import argparse
import time
from PIL import Image
import configparser
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config_path',
default='/home/practice/magic_pdf-main/magic_pdf/config.ini',
)
parser.add_argument(
'--image_path',
default='/path/to/your/image.png',
help='Path to the image file'
)
parser.add_argument(
'--text',
default="描述你在图片中看到的内容",
help='Text input for the model'
)
args = parser.parse_args()
return args
def parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line.strip() != ""] # 去除空行
count = 0
parsed_lines = []
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split("`")
if count % 2 == 1:
# 开始代码块
parsed_lines.append(f'<pre><code class="language-{items[-1]}">')
else:
# 结束代码块
parsed_lines.append(f"</code></pre>")
else:
if i > 0 and count % 2 == 1:
# 转义代码块内的特殊字符
line = line.replace("`", r"\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
# 使用空格连接行
if parsed_lines:
parsed_lines[-1] += " " + line
else:
parsed_lines.append(line)
text = "".join(parsed_lines)
return text
def unparse_text(parsed_text):
in_code_block = False
lines = parsed_text.split("\n")
unparsed_lines = []
for line in lines:
if "<pre><code" in line:
in_code_block = True
# 移除开始标签
line = line.split(">", 1)[1]
elif "</code></pre>" in line:
in_code_block = False
# 移除结束标签
line = line.rsplit("<", 1)[0]
# 反转 HTML 实体
line = line.replace("&lt;", "<")
line = line.replace("&gt;", ">")
line = line.replace("&nbsp;", " ")
line = line.replace("&ast;", "*")
line = line.replace("&lowbar;", "_")
line = line.replace("&#45;", "-")
line = line.replace("&#46;", ".")
line = line.replace("&#33;", "!")
line = line.replace("&#40;", "(")
line = line.replace("&#41;", ")")
line = line.replace("&#36;", "$")
# 如果在代码块内,还原反斜杠转义
if in_code_block:
line = line.replace(r"\`", "`")
unparsed_lines.append(line)
# 合并所有行
unparsed_text = "\n".join(unparsed_lines)
return unparsed_text
def compress_image(image_path, max_size=(512, 512)):
img = Image.open(image_path)
width, height = img.size
aspect_ratio = width / height
if width > max_size[0] or height > max_size[1]:
if width > height:
new_width = max_size[0]
new_height = int(new_width / aspect_ratio)
else:
new_height = max_size[1]
new_width = int(new_height * aspect_ratio)
img = img.resize((new_width, new_height), Image.LANCZOS)
img.save(image_path, optimize=True, quality=80)
class PredictClient:
def __init__(self, api_url):
self.api_url = api_url
def check_health(self):
health_check_url = f'{self.api_url}/health'
try:
response = requests.get(health_check_url)
if response.status_code == 200:
logger.info("Server is healthy and ready to process requests.")
return True
else:
logger.error(f'Server health check failed with status code:{response.status_code}')
return False
except requests.exceptions.RequestException as e:
logger.error(f'Health check request failed:{e}')
return False
def predict(self, image_path: str, text: str):
payload = {
"image_path": image_path,
"text": text
}
headers = {'Content-Type': 'application/json'}
response = requests.post(f"{self.api_url}/predict", json=payload, headers=headers)
if response.status_code == 200:
result = response.json()
return result.get('Generated Text', '')
else:
raise Exception(f"Predict API request failed with status code {response.status_code}")
def main():
args = parse_args()
config = configparser.ConfigParser()
config.read(args.config_path)
ocr_server = config.get('server', 'ocr_server')
client = PredictClient(ocr_server)
try:
start_time = time.time() # 记录开始时间
# 压缩图片
compress_image(args.image_path)
generated_text = client.predict(args.image_path, parse_text(args.text))
end_time = time.time() # 记录结束时间
elapsed_time = end_time - start_time # 计算运行时间
if generated_text:
clean_text = unparse_text(generated_text) # 解析生成的文本
logger.info(f"Image Path: {args.image_path}")
logger.info(f"Generated Text: {clean_text}")
logger.info(f"耗时为: {elapsed_time}秒") # 打印运行时间
else:
logger.warning("Received empty generated text.")
except requests.exceptions.RequestException as e:
logger.error(f"Error while making request to predict service: {e}")
except Exception as e:
logger.error(f"Unexpected error occurred: {e}")
if __name__ == "__main__":
main()
\ No newline at end of file
# Copyright (c) Alibaba Cloud.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree
import copy
import re
import gc
import torch
from argparse import ArgumentParser
from threading import Thread
from qwen_vl_utils import process_vision_info
from transformers import AutoProcessor
from vllm import LLM, SamplingParams
import os
import configparser
from fastapi import FastAPI
from pydantic import BaseModel
from typing import Optional
app = FastAPI()
DEFAULT_CKPT_PATH = '/home/practice/model/Qwen2-VL-7B-Instruct'
REVISION = 'v1.0.4'
BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
def get_args():
parser = ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str, default=DEFAULT_CKPT_PATH,
help='Checkpoint name or path, default to %(default)r')
parser.add_argument('--cpu_only', default=False, action='store_true', help='Run demo with CPU only')
parser.add_argument('--flash_attn2', action='store_true', default=False,
help='Enable flash_attention_2 when loading the model.')
parser.add_argument('--share', action='store_true', default=False,
help='Create a publicly shareable link for the interface.')
parser.add_argument('--inbrowser', action='store_true', default=False,
help='Automatically launch the interface in a new tab on the default browser.')
parser.add_argument('--gpu_nums', type=int, default=1, help='Number of GPUs to use for tensor parallelism.')
parser.add_argument('--dcu_id', type=str, default='0', help='Specify the GPU ID to load the model onto.')
parser.add_argument(
'--config_path',
default='/home/practice/magic_pdf-main/magic_pdf/config.ini',
)
args = parser.parse_args()
return args
def load_model_processor(args):
if args.cpu_only:
device = 'cpu'
else:
os.environ['CUDA_VISIBLE_DEVICES'] = args.dcu_id
print(f"Visible CUDA devices: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
llm = LLM(
model=args.checkpoint_path,
limit_mm_per_prompt={"image": 10, "video": 10},
trust_remote_code=True,
tensor_parallel_size=args.gpu_nums, # 用args.gpu_nums根据实际情况调整
gpu_memory_utilization=0.99,
dtype='float16', # 或者 'bfloat16'
)
processor = AutoProcessor.from_pretrained(args.checkpoint_path)
return llm, processor
def parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line.strip() != ""] # 去除空行
count = 0
parsed_lines = []
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split("`")
if count % 2 == 1:
# 开始代码块
parsed_lines.append(f'<pre><code class="language-{items[-1]}">')
else:
# 结束代码块
parsed_lines.append(f"</code></pre>")
else:
if i > 0 and count % 2 == 1:
# 转义代码块内的特殊字符
line = line.replace("`", r"\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
# 使用空格连接行
if parsed_lines:
parsed_lines[-1] += " " + line
else:
parsed_lines.append(line)
text = "".join(parsed_lines)
return text
def unparse_text(parsed_text):
in_code_block = False
lines = parsed_text.split("\n")
unparsed_lines = []
for line in lines:
if "<pre><code" in line:
in_code_block = True
# 移除开始标签
line = line.split(">", 1)[1]
elif "</code></pre>" in line:
in_code_block = False
# 移除结束标签
line = line.rsplit("<", 1)[0]
# 反转 HTML 实体
line = line.replace("&lt;", "<")
line = line.replace("&gt;", ">")
line = line.replace("&nbsp;", " ")
line = line.replace("&ast;", "*")
line = line.replace("&lowbar;", "_")
line = line.replace("&#45;", "-")
line = line.replace("&#46;", ".")
line = line.replace("&#33;", "!")
line = line.replace("&#40;", "(")
line = line.replace("&#41;", ")")
line = line.replace("&#36;", "$")
# 如果在代码块内,还原反斜杠转义
if in_code_block:
line = line.replace(r"\`", "`")
unparsed_lines.append(line)
# 合并所有行
unparsed_text = "\n".join(unparsed_lines)
return unparsed_text
def remove_image_special(text):
text = text.replace('<ref>', '').replace('</ref>', '')
return re.sub(r'<box>.*?(</box>|$)', '', text)
def is_video_file(filename):
video_extensions = ['.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv', '.webm', '.mpeg']
return any(filename.lower().endswith(ext) for ext in video_extensions)
def transform_messages(original_messages):
transformed_messages = []
for message in original_messages:
new_content = []
for item in message['content']:
if 'image' in item:
new_item = {'type': 'image', 'image': item['image']}
elif 'text' in item:
new_item = {'type': 'text', 'text': item['text']}
elif 'video' in item:
new_item = {'type': 'video', 'video': item['video']}
else:
continue
new_content.append(new_item)
new_message = {'role': message['role'], 'content': new_content}
transformed_messages.append(new_message)
return transformed_messages
def _gc():
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def call_local_model(llm, processor, messages):
messages = transform_messages(messages)
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
mm_data = {}
if image_inputs is not None:
mm_data["image"] = image_inputs
if video_inputs is not None:
mm_data["video"] = video_inputs
llm_inputs = {
"prompt": text,
"multi_modal_data": mm_data,
}
sampling_params = SamplingParams(
temperature=0.1,
top_p=0.001,
repetition_penalty=1.05,
max_tokens=256,
stop_token_ids=[],
)
outputs = llm.generate([llm_inputs], sampling_params=sampling_params)
generated_text = outputs[0].outputs[0].text
yield parse_text(generated_text)
def create_predict_fn(llm, processor):
def predict(_chatbot, task_history):
chat_query = _chatbot[-1][0]
query = task_history[-1][0]
if len(chat_query) == 0:
_chatbot.pop()
task_history.pop()
return _chatbot
print('User: ' + parse_text(query))
history_cp = copy.deepcopy(task_history)
full_response = ''
messages = []
content = []
for q, a in history_cp:
if isinstance(q, (tuple, list)):
if is_video_file(q[0]):
content.append({'video': f'file://{q[0]}'})
else:
content.append({'image': f'file://{q[0]}'})
else:
content.append({'text': q})
messages.append({'role': 'user', 'content': content})
messages.append({'role': 'assistant', 'content': [{'text': a}]})
content = []
messages.pop()
for response in call_local_model(llm, processor, messages):
_chatbot[-1] = (parse_text(chat_query), remove_image_special(parse_text(response)))
yield _chatbot
full_response = parse_text(response)
task_history[-1] = (query, full_response)
print('Qwen-VL-Chat: ' + unparse_text(full_response))
yield _chatbot
return predict
# 启用加载模型
args = get_args()
llm, processor = load_model_processor(args)
class Item(BaseModel):
image_path: str
text: str
@app.get("/health")
async def health_check():
return {"status": "healthy"}
@app.post("/predict")
async def predict(item: Item):
messages = [
{
'role': 'user',
'content': [
{'image': item.image_path},
{'text': item.text}
]
}
]
generated_text = ''
for response in call_local_model(llm, processor, messages):
generated_text = unparse_text(response)
_gc()
return {"Generated Text": generated_text}
if __name__ == "__main__":
import uvicorn
args = get_args()
config = configparser.ConfigParser()
config.read(args.config_path)
# host = config.get('server', 'ocr_host')
host, port = config.get('server', 'ocr_server').split('://')[1].split(':')[0], int(
config.get('server', 'ocr_server').split('://')[1].split(':')[1])
# port = int(config.get('server', 'ocr_port'))
uvicorn.run(app, host=host, port=port)
\ No newline at end of file
"""
根据利用meta_scan得到的结果,对pdf是否为文字版进行分类。
定义标准:
一、什么pdf会是文字pdf,只要满足以下任意一条
1. 随机抽取N页,如果有任何一页文字数目大于100
2. 只要存在一个页面,图片的数量为0
二、什么是扫描版pdf,只要满足以下任意一条
1. ~~80%页面上的最大图大小一样并且面积超过页面面积0.6~~
2. 大部分页面上文字的长度都是相等的。
"""
import json
import sys
from collections import Counter
import click
import numpy as np
from loguru import logger
from magic_pdf.libs.commons import mymax, get_top_percent_list
from magic_pdf.filter.pdf_meta_scan import scan_max_page, junk_limit_min
TEXT_LEN_THRESHOLD = 100
AVG_TEXT_LEN_THRESHOLD = 100
TEXT_LEN_SAMPLE_RATIO = 0.1 # 抽取0.1的页面进行文字长度统计
# 一个拼接图片的方案,将某些特殊扫描版本的拆图拼成一张整图
def merge_images(image_list, page_width, page_height, max_offset=5, max_gap=2):
# 先通过set去除所有bbox重叠的图片数据
image_list_result = []
for page_images in image_list:
page_result = []
dedup = set()
for img in page_images:
x0, y0, x1, y1, img_bojid = img
if (x0, y0, x1, y1) in dedup: # 这里面会出现一些重复的bbox,无需重复出现,需要去掉
continue
else:
dedup.add((x0, y0, x1, y1))
page_result.append([x0, y0, x1, y1, img_bojid])
image_list_result.append(page_result)
# 接下来,将同一页可拼接的图片进行合并
merged_images = []
for page_images in image_list_result:
if not page_images:
continue
# 先将同一页的图片从上到下,从左到右进行排序
page_images.sort(key=lambda img: (img[1], img[0]))
merged = [page_images[0]]
for img in page_images[1:]:
x0, y0, x1, y1, imgid = img
last_img = merged[-1]
last_x0, last_y0, last_x1, last_y1, last_imgid = last_img
# 单张图片宽或者高覆盖页面宽高的9成以上是拼图的一个前置条件
full_width = abs(x1 - x0) >= page_width * 0.9
full_height = abs(y1 - y0) >= page_height * 0.9
# 如果宽达标,检测是否能竖着拼
if full_width:
# 竖着拼需要满足两个前提,左右边界各偏移不能超过 max_offset,第一张图的下边界和第二张图的上边界偏移不能超过 max_gap
close1 = (last_x0 - max_offset) <= x0 <= (last_x0 + max_offset) and (last_x1 - max_offset) <= x1 <= (
last_x1 + max_offset) and (last_y1 - max_gap) <= y0 <= (last_y1 + max_gap)
# 如果高达标,检测是否可以横着拼
if full_height:
# 横着拼需要满足两个前提,上下边界各偏移不能超过 max_offset,第一张图的右边界和第二张图的左边界偏移不能超过 max_gap
close2 = (last_y0 - max_offset) <= y0 <= (last_y0 + max_offset) and (last_y1 - max_offset) <= y1 <= (
last_y1 + max_offset) and (last_x1 - max_gap) <= x0 <= (last_x1 + max_gap)
# Check if the image can be merged with the last image
if (full_width and close1) or (full_height and close2):
# Merge the image with the last image
merged[-1] = [min(x0, last_x0), min(y0, last_y0),
max(x1, last_x1), max(y1, last_y1), imgid]
else:
# Add the image as a new image
merged.append(img)
merged_images.append(merged)
return merged_images
def classify_by_area(total_page: int, page_width, page_height, img_sz_list, text_len_list: list):
"""
80%页面上的最大图大小一样并且面积超过页面面积0.6则返回False,否则返回True
:param pdf_path:
:param total_page:
:param page_width:
:param page_height:
:param img_sz_list:
:return:
"""
# # 只要有一页没有图片,那么就是文字pdf。但是同时还需要满足一个条件就是这个页面上同时不能有文字。发现过一些扫描版pdf,上面有一些空白页面,既没有图片也没有文字。
# if any([len(img_sz) == 0 for img_sz in img_sz_list]): # 含有不含图片的页面
# # 现在找到这些页面的index
# empty_page_index = [i for i, img_sz in enumerate(img_sz_list) if len(img_sz) == 0]
# # 然后检查这些页面上是否有文字
# text_len_at_page_idx = [text_len for i, text_len in enumerate(text_len_list) if i in empty_page_index and text_len > 0]
# if len(text_len_at_page_idx) > TEXT_LEN_THRESHOLD: # 没有图片,但是有文字,说明可能是个文字版,如果没有文字则无法判断,留给下一步,现在要求这页文字量超过一定阈值
# return True
# 通过objid去掉重复出现10次以上的图片,这些图片是隐藏的透明图层,其特点是id都一样
# 先对每个id出现的次数做个统计
objid_cnt = Counter([objid for page_img_sz in img_sz_list for _, _, _, _, objid in page_img_sz])
# 再去掉出现次数大于10的
if total_page >= scan_max_page: # 新的meta_scan只扫描前 scan_max_page 页,页数大于 scan_max_page 当total_page为 scan_max_page
total_page = scan_max_page
repeat_threshold = 2 # 把bad_image的阈值设为2
# repeat_threshold = min(2, total_page) # 当total_page为1时,repeat_threshold为1,会产生误判导致所有img变成bad_img
bad_image_objid = set([objid for objid, cnt in objid_cnt.items() if cnt >= repeat_threshold])
# bad_image_page_idx = [i for i, page_img_sz in enumerate(img_sz_list) if any([objid in bad_image_objid for _, _, _, _, objid in page_img_sz])]
# text_len_at_bad_image_page_idx = [text_len for i, text_len in enumerate(text_len_list) if i in bad_image_page_idx and text_len > 0]
# 特殊情况,一个文字版pdf,每页覆盖一个超大的透明图片,超大的定义是图片占整页面积的90%以上
# fake_image_ids = [objid for objid in bad_image_objid if
# any([abs((x1 - x0) * (y1 - y0) / page_width * page_height) > 0.9 for images in img_sz_list for
# x0, y0, x1, y1, _ in images])] # 原来的代码,any里面恒为true了,原因???
# fake_image_ids = [objid for objid in bad_image_objid for images in img_sz_list for x0, y0, x1, y1, img_id in images
# if img_id == objid and abs((x1 - x0) * (y1 - y0)) / (page_width * page_height) > 0.9]
# if len(fake_image_ids) > 0 and any([l > TEXT_LEN_THRESHOLD for l in text_len_at_bad_image_page_idx]): # 这些透明图片所在的页面上有文字大于阈值
# return True
img_sz_list = [[img_sz for img_sz in page_img_sz if img_sz[-1] not in bad_image_objid] for page_img_sz in
img_sz_list] # 过滤掉重复出现的图片
# 有的扫描版会把一页图片拆成很多张,需要先把图拼起来再计算
img_sz_list = merge_images(img_sz_list, page_width, page_height)
# 计算每个页面上最大的图的面积,然后计算这个面积占页面面积的比例
max_image_area_per_page = [mymax([(x1 - x0) * (y1 - y0) for x0, y0, x1, y1, _ in page_img_sz]) for page_img_sz in
img_sz_list]
page_area = page_width * page_height
max_image_area_per_page = [area / page_area for area in max_image_area_per_page]
max_image_area_per_page = [area for area in max_image_area_per_page if area > 0.5]
if len(max_image_area_per_page) >= 0.5 * total_page: # 阈值从0.8改到0.5,适配3页里面有两页和两页里面有一页的情况
# 这里条件成立的前提是把反复出现的图片去掉了。这些图片是隐藏的透明图层,其特点是id都一样
return False
else:
return True
def classify_by_text_len(text_len_list: list, total_page: int):
"""
随机抽取10%的页面,如果少于5个页面,那么就取全部页面。
查看页面上的文字长度,如果有任何一个页面的文字长度大于TEXT_LEN_THRESHOLD,那么就是文字pdf
:param total_page:
:param text_len_list:
:return:
"""
select_page_cnt = int(total_page * TEXT_LEN_SAMPLE_RATIO) # 选取10%的页面
if select_page_cnt < 5:
select_page_cnt = total_page
# # 排除头尾各10页
# if total_page > 20: # 如果总页数大于20
# page_range = list(range(10, total_page - 10)) # 从第11页到倒数第11页
# else:
# page_range = list(range(total_page)) # 否则选择所有页面
# page_num = np.random.choice(page_range, min(select_page_cnt, len(page_range)), replace=False)
# 排除前后10页对只有21,22页的pdf很尴尬,如果选出来的中间那一两页恰好没字容易误判,有了avg_words规则,这个规则可以忽略
page_num = np.random.choice(total_page, select_page_cnt, replace=False)
text_len_lst = [text_len_list[i] for i in page_num]
is_text_pdf = any([text_len > TEXT_LEN_THRESHOLD for text_len in text_len_lst])
return is_text_pdf
def classify_by_avg_words(text_len_list: list):
"""
补充规则,如果平均每页字数少于 AVG_TEXT_LEN_THRESHOLD,就不是文字pdf
主要是各种图集
:param text_len_list:
:return:
"""
sum_words = sum(text_len_list)
count_of_numbers = len(text_len_list)
if count_of_numbers == 0:
is_text_pdf = False
else:
avg_words = round(sum_words / count_of_numbers)
if avg_words > AVG_TEXT_LEN_THRESHOLD:
is_text_pdf = True
else:
is_text_pdf = False
return is_text_pdf
def classify_by_img_num(img_sz_list: list, img_num_list: list):
"""
补充规则,有一种扫描版本的PDF,每一页都会放所有的扫描页进去,在 metascan 时会被去重,
这种pdf的 metasca 扫描结果的特点是 img_sz_list 内全是空元素,img_num_list中每一页的数量都很大且相同
:param img_sz_list:
:param img_num_list:
:return:
"""
# 计算img_sz_list中非空元素的个数
count_img_sz_list_not_none = sum(1 for item in img_sz_list if item)
# 获取前80%的元素
top_eighty_percent = get_top_percent_list(img_num_list, 0.8)
# img_sz_list中非空元素的个数小于1,前80%的元素都相等,且最大值大于等于junk_limit_min
if count_img_sz_list_not_none <= 1 and len(set(top_eighty_percent)) == 1 and max(img_num_list) >= junk_limit_min:
#拿max和min的值,用来判断list内的值是否全都相等
# min_imgs = min(img_num_list)
# max_imgs = max(img_num_list)
#
# if count_img_sz_list_not_none == 0 and max_imgs == min_imgs and max_imgs >= junk_limit_min:
return False # 如果满足这个条件,一定不是文字版pdf
else:
return True # 不满足这三个条件,可能是文字版pdf,通过其他规则判断
def classify_by_text_layout(text_layout_per_page: list):
"""
判断文本布局是否以竖排为主。
Args:
text_layout_per_page (list): 文本布局列表,列表中的每个元素表示一页的文本布局,
值为'vertical'表示竖排,值为'horizontal'表示横排。
Returns:
bool: 若文本布局以竖排为主,则返回False;否则返回True。
"""
# 统计text_layout_per_page中竖排的个数
count_vertical = sum(1 for item in text_layout_per_page if item == 'vertical')
# 统计text_layout_per_page中横排的个数
count_horizontal = sum(1 for item in text_layout_per_page if item == 'horizontal')
# 计算text_layout_per_page中竖排的占比
known_layout_cnt = count_vertical + count_horizontal
if known_layout_cnt != 0:
ratio = count_vertical / known_layout_cnt
if ratio >= 0.5: # 阈值设为0.5,适配3页里面有2页和两页里有一页的情况
return False # 文本布局以竖排为主,认为不是文字版pdf
else:
return True # 文本布局以横排为主,认为是文字版pdf
else:
return False # 文本布局未知,默认认为不是文字版pdf
def classify_by_img_narrow_strips(page_width, page_height, img_sz_list):
"""
判断一页是否由细长条组成,有两个条件:
1. 图片的宽或高达到页面宽或高的90%,且长边需要是窄边长度的数倍以上
2. 整个页面所有的图片有80%以上满足条件1
Args:
page_width (float): 页面宽度
page_height (float): 页面高度
img_sz_list (list): 图片尺寸列表,每个元素为一个元组,表示图片的矩形区域和尺寸,形如(x0, y0, x1, y1, size),其中(x0, y0)为矩形区域的左上角坐标,(x1, y1)为矩形区域的右下角坐标,size为图片的尺寸
Returns:
bool: 如果满足条件的页面的比例小于0.5,返回True,否则返回False
"""
def is_narrow_strip(img):
x0, y0, x1, y1, _ = img
width, height = x1 - x0, y1 - y0
return any([
# 图片宽度大于等于页面宽度的90%,且宽度大于等于高度4倍
width >= page_width * 0.9 and width >= height * 4,
# 图片高度大于等于页面高度的90%,且高度大于等于宽度4倍
height >= page_height * 0.9 and height >= width * 4,
])
# 初始化满足条件的页面数量
narrow_strip_pages_count = 0
# 遍历所有页面
for page_img_list in img_sz_list:
# 忽略空页面
if not page_img_list:
continue
# 计算页面中的图片总数
total_images = len(page_img_list)
# 计算页面中细长条图片的数量
narrow_strip_images_count = 0
for img in page_img_list:
if is_narrow_strip(img):
narrow_strip_images_count += 1
# 如果细长条图片的数量少于5,跳过
if narrow_strip_images_count < 5:
continue
else:
# 如果细长条图片的比例大于或等于0.8,增加满足条件的页面数量
if narrow_strip_images_count / total_images >= 0.8:
narrow_strip_pages_count += 1
# 计算满足条件的页面的比例
narrow_strip_pages_ratio = narrow_strip_pages_count / len(img_sz_list)
return narrow_strip_pages_ratio < 0.5
def classify(total_page: int, page_width, page_height, img_sz_list: list, text_len_list: list, img_num_list: list,
text_layout_list: list, invalid_chars: bool):
"""
这里的图片和页面长度单位是pts
:param total_page:
:param text_len_list:
:param page_width:
:param page_height:
:param img_sz_list:
:param pdf_path:
:return:
"""
results = {
'by_image_area': classify_by_area(total_page, page_width, page_height, img_sz_list, text_len_list),
'by_text_len': classify_by_text_len(text_len_list, total_page),
'by_avg_words': classify_by_avg_words(text_len_list),
'by_img_num': classify_by_img_num(img_sz_list, img_num_list),
'by_text_layout': classify_by_text_layout(text_layout_list),
'by_img_narrow_strips': classify_by_img_narrow_strips(page_width, page_height, img_sz_list),
'by_invalid_chars': invalid_chars,
}
if all(results.values()):
return True, results
elif not any(results.values()):
return False, results
else:
logger.warning(
f"pdf is not classified by area and text_len, by_image_area: {results['by_image_area']},"
f" by_text: {results['by_text_len']}, by_avg_words: {results['by_avg_words']}, by_img_num: {results['by_img_num']},"
f" by_text_layout: {results['by_text_layout']}, by_img_narrow_strips: {results['by_img_narrow_strips']},"
f" by_invalid_chars: {results['by_invalid_chars']}",
file=sys.stderr) # 利用这种情况可以快速找出来哪些pdf比较特殊,针对性修正分类算法
return False, results
@click.command()
@click.option("--json-file", type=str, help="pdf信息")
def main(json_file):
if json_file is None:
print("json_file is None", file=sys.stderr)
exit(0)
try:
with open(json_file, "r") as f:
for l in f:
if l.strip() == "":
continue
o = json.loads(l)
total_page = o["total_page"]
page_width = o["page_width_pts"]
page_height = o["page_height_pts"]
img_sz_list = o["image_info_per_page"]
text_len_list = o['text_len_per_page']
text_layout_list = o['text_layout_per_page']
pdf_path = o['pdf_path']
is_encrypted = o['is_encrypted']
is_needs_password = o['is_needs_password']
if is_encrypted or total_page == 0 or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理
continue
tag = classify(total_page, page_width, page_height, img_sz_list, text_len_list, text_layout_list)
o['is_text_pdf'] = tag
print(json.dumps(o, ensure_ascii=False))
except Exception as e:
print("ERROR: ", e, file=sys.stderr)
if __name__ == "__main__":
main()
# false = False
# true = True
# null = None
# o = {"pdf_path":"s3://llm-raw-snew/llm-raw-the-eye/raw/World%20Tracker%20Library/worldtracker.org/media/library/Science/Computer%20Science/Shreiner%20-%20OpenGL%20Programming%20Guide%206e%20%5BThe%20Redbook%5D%20%28AW%2C%202008%29.pdf","is_needs_password":false,"is_encrypted":false,"total_page":978,"page_width_pts":368,"page_height_pts":513,"image_info_per_page":[[[0,0,368,513,10037]],[[0,0,368,513,4]],[[0,0,368,513,7]],[[0,0,368,513,10]],[[0,0,368,513,13]],[[0,0,368,513,16]],[[0,0,368,513,19]],[[0,0,368,513,22]],[[0,0,368,513,25]],[[0,0,368,513,28]],[[0,0,368,513,31]],[[0,0,368,513,34]],[[0,0,368,513,37]],[[0,0,368,513,40]],[[0,0,368,513,43]],[[0,0,368,513,46]],[[0,0,368,513,49]],[[0,0,368,513,52]],[[0,0,368,513,55]],[[0,0,368,513,58]],[[0,0,368,513,61]],[[0,0,368,513,64]],[[0,0,368,513,67]],[[0,0,368,513,70]],[[0,0,368,513,73]],[[0,0,368,516,76]],[[0,0,368,516,79]],[[0,0,368,513,82]],[[0,0,368,513,85]],[[0,0,368,513,88]],[[0,0,368,513,91]],[[0,0,368,513,94]],[[0,0,368,513,97]],[[0,0,368,513,100]],[[0,0,368,513,103]],[[0,0,368,513,106]],[[0,0,368,513,109]],[[0,0,368,513,112]],[[0,0,368,513,115]],[[0,0,368,513,118]],[[0,0,368,513,121]],[[0,0,368,513,124]],[[0,0,368,513,127]],[[0,0,368,513,130]],[[0,0,368,513,133]],[[0,0,368,513,136]],[[0,0,368,513,139]],[[0,0,368,513,142]],[[0,0,368,513,145]],[[0,0,368,513,148]],[[0,0,368,513,151]],[[0,0,368,513,154]],[[0,0,368,513,157]],[[0,0,368,513,160]],[[0,0,368,513,163]],[[0,0,368,513,166]],[[0,0,368,513,169]],[[0,0,368,513,172]],[[0,0,368,513,175]],[[0,0,368,513,178]],[[0,0,368,513,181]],[[0,0,368,513,184]],[[0,0,368,513,187]],[[0,0,368,513,190]],[[0,0,368,513,193]],[[0,0,368,513,196]],[[0,0,368,513,199]],[[0,0,368,513,202]],[[0,0,368,513,205]],[[0,0,368,513,208]],[[0,0,368,513,211]],[[0,0,368,513,214]],[[0,0,368,513,217]],[[0,0,368,513,220]],[[0,0,368,513,223]],[[0,0,368,513,226]],[[0,0,368,513,229]],[[0,0,368,513,232]],[[0,0,368,513,235]],[[0,0,368,513,238]],[[0,0,368,513,241]],[[0,0,368,513,244]],[[0,0,368,513,247]],[[0,0,368,513,250]],[[0,0,368,513,253]],[[0,0,368,513,256]],[[0,0,368,513,259]],[[0,0,368,513,262]],[[0,0,368,513,265]],[[0,0,368,513,268]],[[0,0,368,513,271]],[[0,0,368,513,274]],[[0,0,368,513,277]],[[0,0,368,513,280]],[[0,0,368,513,283]],[[0,0,368,513,286]],[[0,0,368,513,289]],[[0,0,368,513,292]],[[0,0,368,513,295]],[[0,0,368,513,298]],[[0,0,368,513,301]],[[0,0,368,513,304]],[[0,0,368,513,307]],[[0,0,368,513,310]],[[0,0,368,513,313]],[[0,0,368,513,316]],[[0,0,368,513,319]],[[0,0,368,513,322]],[[0,0,368,513,325]],[[0,0,368,513,328]],[[0,0,368,513,331]],[[0,0,368,513,334]],[[0,0,368,513,337]],[[0,0,368,513,340]],[[0,0,368,513,343]],[[0,0,368,513,346]],[[0,0,368,513,349]],[[0,0,368,513,352]],[[0,0,368,513,355]],[[0,0,368,513,358]],[[0,0,368,513,361]],[[0,0,368,513,364]],[[0,0,368,513,367]],[[0,0,368,513,370]],[[0,0,368,513,373]],[[0,0,368,513,376]],[[0,0,368,513,379]],[[0,0,368,513,382]],[[0,0,368,513,385]],[[0,0,368,513,388]],[[0,0,368,513,391]],[[0,0,368,513,394]],[[0,0,368,513,397]],[[0,0,368,513,400]],[[0,0,368,513,403]],[[0,0,368,513,406]],[[0,0,368,513,409]],[[0,0,368,513,412]],[[0,0,368,513,415]],[[0,0,368,513,418]],[[0,0,368,513,421]],[[0,0,368,513,424]],[[0,0,368,513,427]],[[0,0,368,513,430]],[[0,0,368,513,433]],[[0,0,368,513,436]],[[0,0,368,513,439]],[[0,0,368,513,442]],[[0,0,368,513,445]],[[0,0,368,513,448]],[[0,0,368,513,451]],[[0,0,368,513,454]],[[0,0,368,513,457]],[[0,0,368,513,460]],[[0,0,368,513,463]],[[0,0,368,513,466]],[[0,0,368,513,469]],[[0,0,368,513,472]],[[0,0,368,513,475]],[[0,0,368,513,478]],[[0,0,368,513,481]],[[0,0,368,513,484]],[[0,0,368,513,487]],[[0,0,368,513,490]],[[0,0,368,513,493]],[[0,0,368,513,496]],[[0,0,368,513,499]],[[0,0,368,513,502]],[[0,0,368,513,505]],[[0,0,368,513,508]],[[0,0,368,513,511]],[[0,0,368,513,514]],[[0,0,368,513,517]],[[0,0,368,513,520]],[[0,0,368,513,523]],[[0,0,368,513,526]],[[0,0,368,513,529]],[[0,0,368,513,532]],[[0,0,368,513,535]],[[0,0,368,513,538]],[[0,0,368,513,541]],[[0,0,368,513,544]],[[0,0,368,513,547]],[[0,0,368,513,550]],[[0,0,368,513,553]],[[0,0,368,513,556]],[[0,0,368,513,559]],[[0,0,368,513,562]],[[0,0,368,513,565]],[[0,0,368,513,568]],[[0,0,368,513,571]],[[0,0,368,513,574]],[[0,0,368,513,577]],[[0,0,368,513,580]],[[0,0,368,513,583]],[[0,0,368,513,586]],[[0,0,368,513,589]],[[0,0,368,513,592]],[[0,0,368,513,595]],[[0,0,368,513,598]],[[0,0,368,513,601]],[[0,0,368,513,604]],[[0,0,368,513,607]],[[0,0,368,513,610]],[[0,0,368,513,613]],[[0,0,368,513,616]],[[0,0,368,513,619]],[[0,0,368,513,622]],[[0,0,368,513,625]],[[0,0,368,513,628]],[[0,0,368,513,631]],[[0,0,368,513,634]],[[0,0,368,513,637]],[[0,0,368,513,640]],[[0,0,368,513,643]],[[0,0,368,513,646]],[[0,0,368,513,649]],[[0,0,368,513,652]],[[0,0,368,513,655]],[[0,0,368,513,658]],[[0,0,368,513,661]],[[0,0,368,513,664]],[[0,0,368,513,667]],[[0,0,368,513,670]],[[0,0,368,513,673]],[[0,0,368,513,676]],[[0,0,368,513,679]],[[0,0,368,513,682]],[[0,0,368,513,685]],[[0,0,368,513,688]],[[0,0,368,513,691]],[[0,0,368,513,694]],[[0,0,368,513,697]],[[0,0,368,513,700]],[[0,0,368,513,703]],[[0,0,368,513,706]],[[0,0,368,513,709]],[[0,0,368,513,712]],[[0,0,368,513,715]],[[0,0,368,513,718]],[[0,0,368,513,721]],[[0,0,368,513,724]],[[0,0,368,513,727]],[[0,0,368,513,730]],[[0,0,368,513,733]],[[0,0,368,513,736]],[[0,0,368,513,739]],[[0,0,368,513,742]],[[0,0,368,513,745]],[[0,0,368,513,748]],[[0,0,368,513,751]],[[0,0,368,513,754]],[[0,0,368,513,757]],[[0,0,368,513,760]],[[0,0,368,513,763]],[[0,0,368,513,766]],[[0,0,368,513,769]],[[0,0,368,513,772]],[[0,0,368,513,775]],[[0,0,368,513,778]],[[0,0,368,513,781]],[[0,0,368,513,784]],[[0,0,368,513,787]],[[0,0,368,513,790]],[[0,0,368,513,793]],[[0,0,368,513,796]],[[0,0,368,513,799]],[[0,0,368,513,802]],[[0,0,368,513,805]],[[0,0,368,513,808]],[[0,0,368,513,811]],[[0,0,368,513,814]],[[0,0,368,513,817]],[[0,0,368,513,820]],[[0,0,368,513,823]],[[0,0,368,513,826]],[[0,0,368,513,829]],[[0,0,368,513,832]],[[0,0,368,513,835]],[[0,0,368,513,838]],[[0,0,368,513,841]],[[0,0,368,513,844]],[[0,0,368,513,847]],[[0,0,368,513,850]],[[0,0,368,513,853]],[[0,0,368,513,856]],[[0,0,368,513,859]],[[0,0,368,513,862]],[[0,0,368,513,865]],[[0,0,368,513,868]],[[0,0,368,513,871]],[[0,0,368,513,874]],[[0,0,368,513,877]],[[0,0,368,513,880]],[[0,0,368,513,883]],[[0,0,368,513,886]],[[0,0,368,513,889]],[[0,0,368,513,892]],[[0,0,368,513,895]],[[0,0,368,513,898]],[[0,0,368,513,901]],[[0,0,368,513,904]],[[0,0,368,513,907]],[[0,0,368,513,910]],[[0,0,368,513,913]],[[0,0,368,513,916]],[[0,0,368,513,919]],[[0,0,368,513,922]],[[0,0,368,513,925]],[[0,0,368,513,928]],[[0,0,368,513,931]],[[0,0,368,513,934]],[[0,0,368,513,937]],[[0,0,368,513,940]],[[0,0,368,513,943]],[[0,0,368,513,946]],[[0,0,368,513,949]],[[0,0,368,513,952]],[[0,0,368,513,955]],[[0,0,368,513,958]],[[0,0,368,513,961]],[[0,0,368,513,964]],[[0,0,368,513,967]],[[0,0,368,513,970]],[[0,0,368,513,973]],[[0,0,368,513,976]],[[0,0,368,513,979]],[[0,0,368,513,982]],[[0,0,368,513,985]],[[0,0,368,513,988]],[[0,0,368,513,991]],[[0,0,368,513,994]],[[0,0,368,513,997]],[[0,0,368,513,1000]],[[0,0,368,513,1003]],[[0,0,368,513,1006]],[[0,0,368,513,1009]],[[0,0,368,513,1012]],[[0,0,368,513,1015]],[[0,0,368,513,1018]],[[0,0,368,513,2797]],[[0,0,368,513,2798]],[[0,0,368,513,2799]],[[0,0,368,513,2800]],[[0,0,368,513,2801]],[[0,0,368,513,2802]],[[0,0,368,513,2803]],[[0,0,368,513,2804]],[[0,0,368,513,2805]],[[0,0,368,513,2806]],[[0,0,368,513,2807]],[[0,0,368,513,2808]],[[0,0,368,513,2809]],[[0,0,368,513,2810]],[[0,0,368,513,2811]],[[0,0,368,513,2812]],[[0,0,368,513,2813]],[[0,0,368,513,2814]],[[0,0,368,513,2815]],[[0,0,368,513,2816]],[[0,0,368,513,2817]],[[0,0,368,513,2818]],[[0,0,368,513,2819]],[[0,0,368,513,2820]],[[0,0,368,513,2821]],[[0,0,368,513,2822]],[[0,0,368,513,2823]],[[0,0,368,513,2824]],[[0,0,368,513,2825]],[[0,0,368,513,2826]],[[0,0,368,513,2827]],[[0,0,368,513,2828]],[[0,0,368,513,2829]],[[0,0,368,513,2830]],[[0,0,368,513,2831]],[[0,0,368,513,2832]],[[0,0,368,513,2833]],[[0,0,368,513,2834]],[[0,0,368,513,2835]],[[0,0,368,513,2836]],[[0,0,368,513,2837]],[[0,0,368,513,2838]],[[0,0,368,513,2839]],[[0,0,368,513,2840]],[[0,0,368,513,2841]],[[0,0,368,513,2842]],[[0,0,368,513,2843]],[[0,0,368,513,2844]],[[0,0,368,513,2845]],[[0,0,368,513,2846]],[[0,0,368,513,2847]],[[0,0,368,513,2848]],[[0,0,368,513,2849]],[[0,0,368,513,2850]],[[0,0,368,513,2851]],[[0,0,368,513,2852]],[[0,0,368,513,2853]],[[0,0,368,513,2854]],[[0,0,368,513,2855]],[[0,0,368,513,2856]],[[0,0,368,513,2857]],[[0,0,368,513,2858]],[[0,0,368,513,2859]],[[0,0,368,513,2860]],[[0,0,368,513,2861]],[[0,0,368,513,2862]],[[0,0,368,513,2863]],[[0,0,368,513,2864]],[[0,0,368,513,2797]],[[0,0,368,513,2798]],[[0,0,368,513,2799]],[[0,0,368,513,2800]],[[0,0,368,513,2801]],[[0,0,368,513,2802]],[[0,0,368,513,2803]],[[0,0,368,513,2804]],[[0,0,368,513,2805]],[[0,0,368,513,2806]],[[0,0,368,513,2807]],[[0,0,368,513,2808]],[[0,0,368,513,2809]],[[0,0,368,513,2810]],[[0,0,368,513,2811]],[[0,0,368,513,2812]],[[0,0,368,513,2813]],[[0,0,368,513,2814]],[[0,0,368,513,2815]],[[0,0,368,513,2816]],[[0,0,368,513,2817]],[[0,0,368,513,2818]],[[0,0,368,513,2819]],[[0,0,368,513,2820]],[[0,0,368,513,2821]],[[0,0,368,513,2822]],[[0,0,368,513,2823]],[[0,0,368,513,2824]],[[0,0,368,513,2825]],[[0,0,368,513,2826]],[[0,0,368,513,2827]],[[0,0,368,513,2828]],[[0,0,368,513,2829]],[[0,0,368,513,2830]],[[0,0,368,513,2831]],[[0,0,368,513,2832]],[[0,0,368,513,2833]],[[0,0,368,513,2834]],[[0,0,368,513,2835]],[[0,0,368,513,2836]],[[0,0,368,513,2837]],[[0,0,368,513,2838]],[[0,0,368,513,2839]],[[0,0,368,513,2840]],[[0,0,368,513,2841]],[[0,0,368,513,2842]],[[0,0,368,513,2843]],[[0,0,368,513,2844]],[[0,0,368,513,2845]],[[0,0,368,513,2846]],[[0,0,368,513,2847]],[[0,0,368,513,2848]],[[0,0,368,513,2849]],[[0,0,368,513,2850]],[[0,0,368,513,2851]],[[0,0,368,513,2852]],[[0,0,368,513,2853]],[[0,0,368,513,2854]],[[0,0,368,513,2855]],[[0,0,368,513,2856]],[[0,0,368,513,2857]],[[0,0,368,513,2858]],[[0,0,368,513,2859]],[[0,0,368,513,2860]],[[0,0,368,513,2861]],[[0,0,368,513,2862]],[[0,0,368,513,2863]],[[0,0,368,513,2864]],[[0,0,368,513,1293]],[[0,0,368,513,1296]],[[0,0,368,513,1299]],[[0,0,368,513,1302]],[[0,0,368,513,1305]],[[0,0,368,513,1308]],[[0,0,368,513,1311]],[[0,0,368,513,1314]],[[0,0,368,513,1317]],[[0,0,368,513,1320]],[[0,0,368,513,1323]],[[0,0,368,513,1326]],[[0,0,368,513,1329]],[[0,0,368,513,1332]],[[0,0,368,513,1335]],[[0,0,368,513,1338]],[[0,0,368,513,1341]],[[0,0,368,513,1344]],[[0,0,368,513,1347]],[[0,0,368,513,1350]],[[0,0,368,513,1353]],[[0,0,368,513,1356]],[[0,0,368,513,1359]],[[0,0,368,513,1362]],[[0,0,368,513,1365]],[[0,0,368,513,1368]],[[0,0,368,513,1371]],[[0,0,368,513,1374]],[[0,0,368,513,1377]],[[0,0,368,513,1380]],[[0,0,368,513,1383]],[[0,0,368,513,1386]],[[0,0,368,513,1389]],[[0,0,368,513,1392]],[[0,0,368,513,1395]],[[0,0,368,513,1398]],[[0,0,368,513,1401]],[[0,0,368,513,1404]],[[0,0,368,513,1407]],[[0,0,368,513,1410]],[[0,0,368,513,1413]],[[0,0,368,513,1416]],[[0,0,368,513,1419]],[[0,0,368,513,1422]],[[0,0,368,513,1425]],[[0,0,368,513,1428]],[[0,0,368,513,1431]],[[0,0,368,513,1434]],[[0,0,368,513,1437]],[[0,0,368,513,1440]],[[0,0,368,513,1443]],[[0,0,368,513,1446]],[[0,0,368,513,1449]],[[0,0,368,513,1452]],[[0,0,368,513,1455]],[[0,0,368,513,1458]],[[0,0,368,513,1461]],[[0,0,368,513,1464]],[[0,0,368,513,1467]],[[0,0,368,513,1470]],[[0,0,368,513,1473]],[[0,0,368,513,1476]],[[0,0,368,513,1479]],[[0,0,368,513,1482]],[[0,0,368,513,1485]],[[0,0,368,513,1488]],[[0,0,368,513,1491]],[[0,0,368,513,1494]],[[0,0,368,513,1497]],[[0,0,368,513,1500]],[[0,0,368,513,1503]],[[0,0,368,513,1506]],[[0,0,368,513,1509]],[[0,0,368,513,1512]],[[0,0,368,513,1515]],[[0,0,368,513,1518]],[[0,0,368,513,1521]],[[0,0,368,513,1524]],[[0,0,368,513,1527]],[[0,0,368,513,1530]],[[0,0,368,513,1533]],[[0,0,368,513,1536]],[[0,0,368,513,1539]],[[0,0,368,513,1542]],[[0,0,368,513,1545]],[[0,0,368,513,1548]],[[0,0,368,513,1551]],[[0,0,368,513,1554]],[[0,0,368,513,1557]],[[0,0,368,513,1560]],[[0,0,368,513,1563]],[[0,0,368,513,1566]],[[0,0,368,513,1569]],[[0,0,368,513,1572]],[[0,0,368,513,1575]],[[0,0,368,513,1578]],[[0,0,368,513,1581]],[[0,0,368,513,1584]],[[0,0,368,513,1587]],[[0,0,368,513,1590]],[[0,0,368,513,1593]],[[0,0,368,513,1596]],[[0,0,368,513,1599]],[[0,0,368,513,1602]],[[0,0,368,513,1605]],[[0,0,368,513,1608]],[[0,0,368,513,1611]],[[0,0,368,513,1614]],[[0,0,368,513,1617]],[[0,0,368,513,1620]],[[0,0,368,513,1623]],[[0,0,368,513,1626]],[[0,0,368,513,1629]],[[0,0,368,513,1632]],[[0,0,368,513,1635]],[[0,0,368,513,1638]],[[0,0,368,513,1641]],[[0,0,368,513,1644]],[[0,0,368,513,1647]],[[0,0,368,513,1650]],[[0,0,368,513,1653]],[[0,0,368,513,1656]],[[0,0,368,513,1659]],[[0,0,368,513,1662]],[[0,0,368,513,1665]],[[0,0,368,513,1668]],[[0,0,368,513,1671]],[[0,0,368,513,1674]],[[0,0,368,513,1677]],[[0,0,368,513,1680]],[[0,0,368,513,1683]],[[0,0,368,513,1686]],[[0,0,368,513,1689]],[[0,0,368,513,1692]],[[0,0,368,513,1695]],[[0,0,368,513,1698]],[[0,0,368,513,1701]],[[0,0,368,513,1704]],[[0,0,368,513,1707]],[[0,0,368,513,1710]],[[0,0,368,513,1713]],[[0,0,368,513,1716]],[[0,0,368,513,1719]],[[0,0,368,513,1722]],[[0,0,368,513,1725]],[[0,0,368,513,1728]],[[0,0,368,513,1731]],[[0,0,368,513,1734]],[[0,0,368,513,1737]],[[0,0,368,513,1740]],[[0,0,368,513,1743]],[[0,0,368,513,1746]],[[0,0,368,513,1749]],[[0,0,368,513,1752]],[[0,0,368,513,1755]],[[0,0,368,513,1758]],[[0,0,368,513,1761]],[[0,0,368,513,1764]],[[0,0,368,513,1767]],[[0,0,368,513,1770]],[[0,0,368,513,1773]],[[0,0,368,513,1776]],[[0,0,368,513,1779]],[[0,0,368,513,1782]],[[0,0,368,513,1785]],[[0,0,368,513,1788]],[[0,0,368,513,1791]],[[0,0,368,513,1794]],[[0,0,368,513,1797]],[[0,0,368,513,1800]],[[0,0,368,513,1803]],[[0,0,368,513,1806]],[[0,0,368,513,1809]],[[0,0,368,513,1812]],[[0,0,368,513,1815]],[[0,0,368,513,1818]],[[0,0,368,513,1821]],[[0,0,368,513,1824]],[[0,0,368,513,1827]],[[0,0,368,513,1830]],[[0,0,368,513,1833]],[[0,0,368,513,1836]],[[0,0,368,513,1839]],[[0,0,368,513,1842]],[[0,0,368,513,1845]],[[0,0,368,513,1848]],[[0,0,368,513,1851]],[[0,0,368,513,1854]],[[0,0,368,513,1857]],[[0,0,368,513,1860]],[[0,0,368,513,1863]],[[0,0,368,513,1866]],[[0,0,368,513,1869]],[[0,0,368,513,1872]],[[0,0,368,513,1875]],[[0,0,368,513,1878]],[[0,0,368,513,1881]],[[0,0,368,513,1884]],[[0,0,368,513,1887]],[[0,0,368,513,1890]],[[0,0,368,513,1893]],[[0,0,368,513,1896]],[[0,0,368,513,1899]],[[0,0,368,513,1902]],[[0,0,368,513,1905]],[[0,0,368,513,1908]],[[0,0,368,513,1911]],[[0,0,368,513,1914]],[[0,0,368,513,1917]],[[0,0,368,513,1920]],[[0,0,368,513,1923]],[[0,0,368,513,1926]],[[0,0,368,513,1929]],[[0,0,368,513,1932]],[[0,0,368,513,1935]],[[0,0,368,513,1938]],[[0,0,368,513,1941]],[[0,0,368,513,1944]],[[0,0,368,513,1947]],[[0,0,368,513,1950]],[[0,0,368,513,1953]],[[0,0,368,513,1956]],[[0,0,368,513,1959]],[[0,0,368,513,1962]],[[0,0,368,513,1965]],[[0,0,368,513,1968]],[[0,0,368,513,1971]],[[0,0,368,513,1974]],[[0,0,368,513,1977]],[[0,0,368,513,1980]],[[0,0,368,513,1983]],[[0,0,368,513,1986]],[[0,0,368,513,1989]],[[0,0,368,513,1992]],[[0,0,368,513,1995]],[[0,0,368,513,1998]],[[0,0,368,513,2001]],[[0,0,368,513,2004]],[[0,0,368,513,2007]],[[0,0,368,513,2010]],[[0,0,368,513,2013]],[[0,0,368,513,2016]],[[0,0,368,513,2019]],[[0,0,368,513,2022]],[[0,0,368,513,2025]],[[0,0,368,513,2028]],[[0,0,368,513,2031]],[[0,0,368,513,2034]],[[0,0,368,513,2037]],[[0,0,368,513,2040]],[[0,0,368,513,2043]],[[0,0,368,513,2046]],[[0,0,368,513,2049]],[[0,0,368,513,2052]],[[0,0,368,513,2055]],[[0,0,368,513,2058]],[[0,0,368,513,2061]],[[0,0,368,513,2064]],[[0,0,368,513,2067]],[[0,0,368,513,2070]],[[0,0,368,513,2073]],[[0,0,368,513,2076]],[[0,0,368,513,2079]],[[0,0,368,513,2082]],[[0,0,368,513,2085]],[[0,0,368,513,2088]],[[0,0,368,513,2091]],[[0,0,368,513,2094]],[[0,0,368,513,2097]],[[0,0,368,513,2100]],[[0,0,368,513,2103]],[[0,0,368,513,2106]],[[0,0,368,513,2109]],[[0,0,368,513,2112]],[[0,0,368,513,2115]],[[0,0,368,513,2118]],[[0,0,368,513,2121]],[[0,0,368,513,2124]],[[0,0,368,513,2127]],[[0,0,368,513,2130]],[[0,0,368,513,2133]],[[0,0,368,513,2136]],[[0,0,368,513,2139]],[[0,0,368,513,2142]],[[0,0,368,513,2145]],[[0,0,368,513,2148]],[[0,0,368,513,2151]],[[0,0,368,513,2154]],[[0,0,368,513,2157]],[[0,0,368,513,2160]],[[0,0,368,513,2163]],[[0,0,368,513,2166]],[[0,0,368,513,2169]],[[0,0,368,513,2172]],[[0,0,368,513,2175]],[[0,0,368,513,2178]],[[0,0,368,513,2181]],[[0,0,368,513,2184]],[[0,0,368,513,2187]],[[0,0,368,513,2190]],[[0,0,368,513,2193]],[[0,0,368,513,2196]],[[0,0,368,513,2199]],[[0,0,368,513,2202]],[[0,0,368,513,2205]],[[0,0,368,513,2208]],[[0,0,368,513,2211]],[[0,0,368,513,2214]],[[0,0,368,513,2217]],[[0,0,368,513,2220]],[[0,0,368,513,2223]],[[0,0,368,513,2226]],[[0,0,368,513,2229]],[[0,0,368,513,2232]],[[0,0,368,513,2235]],[[0,0,368,513,2238]],[[0,0,368,513,2241]],[[0,0,368,513,2244]],[[0,0,368,513,2247]],[[0,0,368,513,2250]],[[0,0,368,513,2253]],[[0,0,368,513,2256]],[[0,0,368,513,2259]],[[0,0,368,513,2262]],[[0,0,368,513,2265]],[[0,0,368,513,2268]],[[0,0,368,513,2271]],[[0,0,368,513,2274]],[[0,0,368,513,2277]],[[0,0,368,513,2280]],[[0,0,368,513,2283]],[[0,0,368,513,2286]],[[0,0,368,513,2289]],[[0,0,368,513,2292]],[[0,0,368,513,2295]],[[0,0,368,513,2298]],[[0,0,368,513,2301]],[[0,0,368,513,2304]],[[0,0,368,513,2307]],[[0,0,368,513,2310]],[[0,0,368,513,2313]],[[0,0,368,513,2316]],[[0,0,368,513,2319]],[[0,0,368,513,2322]],[[0,0,368,513,2325]],[[0,0,368,513,2328]],[[0,0,368,513,2331]],[[0,0,368,513,2334]],[[0,0,368,513,2337]],[[0,0,368,513,2340]],[[0,0,368,513,2343]],[[0,0,368,513,2346]],[[0,0,368,513,2349]],[[0,0,368,513,2352]],[[0,0,368,513,2355]],[[0,0,368,513,2358]],[[0,0,368,513,2361]],[[0,0,368,513,2364]],[[0,0,368,513,2367]],[[0,0,368,513,2370]],[[0,0,368,513,2373]],[[0,0,368,513,2376]],[[0,0,368,513,2379]],[[0,0,368,513,2382]],[[0,0,368,513,2385]],[[0,0,368,513,2388]],[[0,0,368,513,2391]],[[0,0,368,513,2394]],[[0,0,368,513,2397]],[[0,0,368,513,2400]],[[0,0,368,513,2403]],[[0,0,368,513,2406]],[[0,0,368,513,2409]],[[0,0,368,513,2412]],[[0,0,368,513,2415]],[[0,0,368,513,2418]],[[0,0,368,513,2421]],[[0,0,368,513,2424]],[[0,0,368,513,2427]],[[0,0,368,513,2430]],[[0,0,368,513,2433]],[[0,0,368,513,2436]],[[0,0,368,513,2439]],[[0,0,368,513,2442]],[[0,0,368,513,2445]],[[0,0,368,513,2448]],[[0,0,368,513,2451]],[[0,0,368,513,2454]],[[0,0,368,513,2457]],[[0,0,368,513,2460]],[[0,0,368,513,2463]],[[0,0,368,513,2466]],[[0,0,368,513,2469]],[[0,0,368,513,2472]],[[0,0,368,513,2475]],[[0,0,368,513,2478]],[[0,0,368,513,2481]],[[0,0,368,513,2484]],[[0,0,368,513,2487]],[[0,0,368,513,2490]],[[0,0,368,513,2493]],[[0,0,368,513,2496]],[[0,0,368,513,2499]],[[0,0,368,513,2502]],[[0,0,368,513,2505]],[[0,0,368,513,2508]],[[0,0,368,513,2511]],[[0,0,368,513,2514]],[[0,0,368,513,2517]],[[0,0,368,513,2520]],[[0,0,368,513,2523]],[[0,0,368,513,2526]],[[0,0,368,513,2529]],[[0,0,368,513,2532]],[[0,0,368,513,2535]],[[0,0,368,513,2538]],[[0,0,368,513,2541]],[[0,0,368,513,2544]],[[0,0,368,513,2547]],[[0,0,368,513,2550]],[[0,0,368,513,2553]],[[0,0,368,513,2556]],[[0,0,368,513,2559]],[[0,0,368,513,2562]],[[0,0,368,513,2565]],[[0,0,368,513,2568]],[[0,0,368,513,2571]],[[0,0,368,513,2574]],[[0,0,368,513,2577]],[[0,0,368,513,2580]],[[0,0,368,513,2583]],[[0,0,368,513,2586]],[[0,0,368,513,2589]],[[0,0,368,513,2592]],[[0,0,368,513,2595]],[[0,0,368,513,2598]],[[0,0,368,513,2601]],[[0,0,368,513,2604]],[[0,0,368,513,2607]],[[0,0,368,513,2610]],[[0,0,368,513,2613]],[[0,0,368,513,2616]],[[0,0,368,513,2619]],[[0,0,368,513,2622]],[[0,0,368,513,2625]],[[0,0,368,513,2628]],[[0,0,368,513,2631]],[[0,0,368,513,2634]],[[0,0,368,513,2637]],[[0,0,368,513,2640]],[[0,0,368,513,2643]],[[0,0,368,513,2646]],[[0,0,368,513,2649]],[[0,0,368,513,2652]],[[0,0,368,513,2655]],[[0,0,368,513,2658]],[[0,0,368,513,2661]],[[0,0,368,513,2664]],[[0,0,368,513,2667]],[[0,0,368,513,2670]],[[0,0,368,513,2673]],[[0,0,368,513,2676]],[[0,0,368,513,2679]],[[0,0,368,513,2682]],[[0,0,368,513,2685]],[[0,0,368,513,2688]],[[0,0,368,513,2691]],[[0,0,368,513,2694]],[[0,0,368,513,2697]],[[0,0,368,513,2700]],[[0,0,368,513,2703]],[[0,0,368,513,2706]],[[0,0,368,513,2709]],[[0,0,368,513,2712]],[[0,0,368,513,2715]],[[0,0,368,513,2718]],[[0,0,368,513,2721]],[[0,0,368,513,2724]],[[0,0,368,513,2727]],[[0,0,368,513,2730]],[[0,0,368,513,2733]],[[0,0,368,513,2736]],[[0,0,368,513,2739]],[[0,0,368,513,2742]],[[0,0,368,513,2745]],[[0,0,368,513,2748]],[[0,0,368,513,2751]],[[0,0,368,513,2754]],[[0,0,368,513,2757]],[[0,0,368,513,2760]],[[0,0,368,513,2763]],[[0,0,368,513,2766]],[[0,0,368,513,2769]],[[0,0,368,513,2772]],[[0,0,368,513,2775]],[[0,0,368,513,2778]],[[0,0,368,513,2781]],[[0,0,368,513,2784]],[[0,0,368,513,2787]],[[0,0,368,513,2790]],[[0,0,368,513,2793]],[[0,0,368,513,2796]]],"text_len_per_page":[53,53,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54],"metadata":{"format":"PDF 1.6","title":"","author":"","subject":"","keywords":"","creator":"Adobe Acrobat 7.0","producer":"Adobe Acrobat 7.0 Image Conversion Plug-in","creationDate":"D:20080404141457+01'00'","modDate":"D:20080404144821+01'00'","trapped":"","encryption":null}}
# o = json.loads(json.dumps(o))
# total_page = o["total_page"]
# page_width = o["page_width_pts"]
# page_height = o["page_height_pts"]
# img_sz_list = o["image_info_per_page"]
# text_len_list = o['text_len_per_page']
# pdf_path = o['pdf_path']
# is_encrypted = o['is_encrypted']
# is_needs_password = o['is_needs_password']
# if is_encrypted or total_page == 0 or is_needs_password: # 加密的,需要密码的,没有页面的,都不处理
# print("加密的")
# exit(0)
# tag = classify(pdf_path, total_page, page_width, page_height, img_sz_list, text_len_list)
# o['is_text_pdf'] = tag
# print(json.dumps(o, ensure_ascii=False))
"""
输入: s3路径,每行一个
输出: pdf文件元信息,包括每一页上的所有图片的长宽高,bbox位置
"""
import sys
import click
from magic_pdf.libs.commons import read_file, mymax, get_top_percent_list
from magic_pdf.libs.commons import fitz
from loguru import logger
from collections import Counter
from magic_pdf.libs.drop_reason import DropReason
from magic_pdf.libs.language import detect_lang
from magic_pdf.libs.pdf_check import detect_invalid_chars
scan_max_page = 50
junk_limit_min = 10
def calculate_max_image_area_per_page(result: list, page_width_pts, page_height_pts):
max_image_area_per_page = [mymax([(x1 - x0) * (y1 - y0) for x0, y0, x1, y1, _ in page_img_sz]) for page_img_sz in
result]
page_area = int(page_width_pts) * int(page_height_pts)
max_image_area_per_page = [area / page_area for area in max_image_area_per_page]
max_image_area_per_page = [area for area in max_image_area_per_page if area > 0.6]
return max_image_area_per_page
def process_image(page, junk_img_bojids=[]):
page_result = [] # 存每个页面里的多张图四元组信息
items = page.get_images()
dedup = set()
for img in items:
# 这里返回的是图片在page上的实际展示的大小。返回一个数组,每个元素第一部分是
img_bojid = img[0] # 在pdf文件中是全局唯一的,如果这个图反复出现在pdf里那么就可能是垃圾信息,例如水印、页眉页脚等
if img_bojid in junk_img_bojids: # 如果是垃圾图像,就跳过
continue
recs = page.get_image_rects(img, transform=True)
if recs:
rec = recs[0][0]
x0, y0, x1, y1 = map(int, rec)
width = x1 - x0
height = y1 - y0
if (x0, y0, x1, y1, img_bojid) in dedup: # 这里面会出现一些重复的bbox,无需重复出现,需要去掉
continue
if not all([width, height]): # 长和宽任何一个都不能是0,否则这个图片不可见,没有实际意义
continue
dedup.add((x0, y0, x1, y1, img_bojid))
page_result.append([x0, y0, x1, y1, img_bojid])
return page_result
def get_image_info(doc: fitz.Document, page_width_pts, page_height_pts) -> list:
"""
返回每个页面里的图片的四元组,每个页面多个图片。
:param doc:
:return:
"""
# 使用 Counter 计数 img_bojid 的出现次数
img_bojid_counter = Counter(img[0] for page in doc for img in page.get_images())
# 找出出现次数超过 len(doc) 半数的 img_bojid
junk_limit = max(len(doc) * 0.5, junk_limit_min) # 对一些页数比较少的进行豁免
junk_img_bojids = [img_bojid for img_bojid, count in img_bojid_counter.items() if count >= junk_limit]
#todo 加个判断,用前十页就行,这些垃圾图片需要满足两个条件,不止出现的次数要足够多,而且图片占书页面积的比例要足够大,且图与图大小都差不多
#有两种扫描版,一种文字版,这里可能会有误判
#扫描版1:每页都有所有扫描页图片,特点是图占比大,每页展示1张
#扫描版2,每页存储的扫描页图片数量递增,特点是图占比大,每页展示1张,需要清空junklist跑前50页图片信息用于分类判断
#文字版1.每页存储所有图片,特点是图片占页面比例不大,每页展示可能为0也可能不止1张 这种pdf需要拿前10页抽样检测img大小和个数,如果符合需要清空junklist
imgs_len_list = [len(page.get_images()) for page in doc]
special_limit_pages = 10
# 统一用前十页结果做判断
result = []
break_loop = False
for i, page in enumerate(doc):
if break_loop:
break
if i >= special_limit_pages:
break
page_result = process_image(page) # 这里不传junk_img_bojids,拿前十页所有图片信息用于后续分析
result.append(page_result)
for item in result:
if not any(item): # 如果任何一页没有图片,说明是个文字版,需要判断是否为特殊文字版
if max(imgs_len_list) == min(imgs_len_list) and max(
imgs_len_list) >= junk_limit_min: # 如果是特殊文字版,就把junklist置空并break
junk_img_bojids = []
else: # 不是特殊文字版,是个普通文字版,但是存在垃圾图片,不置空junklist
pass
break_loop = True
break
if not break_loop:
# 获取前80%的元素
top_eighty_percent = get_top_percent_list(imgs_len_list, 0.8)
# 检查前80%的元素是否都相等
if len(set(top_eighty_percent)) == 1 and max(imgs_len_list) >= junk_limit_min:
# # 如果前10页跑完都有图,根据每页图片数量是否相等判断是否需要清除junklist
# if max(imgs_len_list) == min(imgs_len_list) and max(imgs_len_list) >= junk_limit_min:
#前10页都有图,且每页数量一致,需要检测图片大小占页面的比例判断是否需要清除junklist
max_image_area_per_page = calculate_max_image_area_per_page(result, page_width_pts, page_height_pts)
if len(max_image_area_per_page) < 0.8 * special_limit_pages: # 前10页不全是大图,说明可能是个文字版pdf,把垃圾图片list置空
junk_img_bojids = []
else: # 前10页都有图,而且80%都是大图,且每页图片数量一致并都很多,说明是扫描版1,不需要清空junklist
pass
else: # 每页图片数量不一致,需要清掉junklist全量跑前50页图片
junk_img_bojids = []
#正式进入取前50页图片的信息流程
result = []
for i, page in enumerate(doc):
if i >= scan_max_page:
break
page_result = process_image(page, junk_img_bojids)
# logger.info(f"page {i} img_len: {len(page_result)}")
result.append(page_result)
return result, junk_img_bojids
def get_pdf_page_size_pts(doc: fitz.Document):
page_cnt = len(doc)
l: int = min(page_cnt, 50)
#把所有宽度和高度塞到两个list 分别取中位数(中间遇到了个在纵页里塞横页的pdf,导致宽高互换了)
page_width_list = []
page_height_list = []
for i in range(l):
page = doc[i]
page_rect = page.rect
page_width_list.append(page_rect.width)
page_height_list.append(page_rect.height)
page_width_list.sort()
page_height_list.sort()
median_width = page_width_list[len(page_width_list) // 2]
median_height = page_height_list[len(page_height_list) // 2]
return median_width, median_height
def get_pdf_textlen_per_page(doc: fitz.Document):
text_len_lst = []
for page in doc:
# 拿包含img和text的所有blocks
# text_block = page.get_text("blocks")
# 拿所有text的blocks
# text_block = page.get_text("words")
# text_block_len = sum([len(t[4]) for t in text_block])
#拿所有text的str
text_block = page.get_text("text")
text_block_len = len(text_block)
# logger.info(f"page {page.number} text_block_len: {text_block_len}")
text_len_lst.append(text_block_len)
return text_len_lst
def get_pdf_text_layout_per_page(doc: fitz.Document):
"""
根据PDF文档的每一页文本布局,判断该页的文本布局是横向、纵向还是未知。
Args:
doc (fitz.Document): PDF文档对象。
Returns:
List[str]: 每一页的文本布局(横向、纵向、未知)。
"""
text_layout_list = []
for page_id, page in enumerate(doc):
if page_id >= scan_max_page:
break
# 创建每一页的纵向和横向的文本行数计数器
vertical_count = 0
horizontal_count = 0
text_dict = page.get_text("dict")
if "blocks" in text_dict:
for block in text_dict["blocks"]:
if 'lines' in block:
for line in block["lines"]:
# 获取line的bbox顶点坐标
x0, y0, x1, y1 = line['bbox']
# 计算bbox的宽高
width = x1 - x0
height = y1 - y0
# 计算bbox的面积
area = width * height
font_sizes = []
for span in line['spans']:
if 'size' in span:
font_sizes.append(span['size'])
if len(font_sizes) > 0:
average_font_size = sum(font_sizes) / len(font_sizes)
else:
average_font_size = 10 # 有的line拿不到font_size,先定一个阈值100
if area <= average_font_size ** 2: # 判断bbox的面积是否小于平均字体大小的平方,单字无法计算是横向还是纵向
continue
else:
if 'wmode' in line: # 通过wmode判断文本方向
if line['wmode'] == 1: # 判断是否为竖向文本
vertical_count += 1
elif line['wmode'] == 0: # 判断是否为横向文本
horizontal_count += 1
# if 'dir' in line: # 通过旋转角度计算判断文本方向
# # 获取行的 "dir" 值
# dir_value = line['dir']
# cosine, sine = dir_value
# # 计算角度
# angle = math.degrees(math.acos(cosine))
#
# # 判断是否为横向文本
# if abs(angle - 0) < 0.01 or abs(angle - 180) < 0.01:
# # line_text = ' '.join(span['text'] for span in line['spans'])
# # print('This line is horizontal:', line_text)
# horizontal_count += 1
# # 判断是否为纵向文本
# elif abs(angle - 90) < 0.01 or abs(angle - 270) < 0.01:
# # line_text = ' '.join(span['text'] for span in line['spans'])
# # print('This line is vertical:', line_text)
# vertical_count += 1
# print(f"page_id: {page_id}, vertical_count: {vertical_count}, horizontal_count: {horizontal_count}")
# 判断每一页的文本布局
if vertical_count == 0 and horizontal_count == 0: # 该页没有文本,无法判断
text_layout_list.append("unknow")
continue
else:
if vertical_count > horizontal_count: # 该页的文本纵向行数大于横向的
text_layout_list.append("vertical")
else: # 该页的文本横向行数大于纵向的
text_layout_list.append("horizontal")
# logger.info(f"page_id: {page_id}, vertical_count: {vertical_count}, horizontal_count: {horizontal_count}")
return text_layout_list
'''定义一个自定义异常用来抛出单页svg太多的pdf'''
class PageSvgsTooManyError(Exception):
def __init__(self, message="Page SVGs are too many"):
self.message = message
super().__init__(self.message)
def get_svgs_per_page(doc: fitz.Document):
svgs_len_list = []
for page_id, page in enumerate(doc):
# svgs = page.get_drawings()
svgs = page.get_cdrawings() # 切换成get_cdrawings,效率更高
len_svgs = len(svgs)
if len_svgs >= 3000:
raise PageSvgsTooManyError()
else:
svgs_len_list.append(len_svgs)
# logger.info(f"page_id: {page_id}, svgs_len: {len(svgs)}")
return svgs_len_list
def get_imgs_per_page(doc: fitz.Document):
imgs_len_list = []
for page_id, page in enumerate(doc):
imgs = page.get_images()
imgs_len_list.append(len(imgs))
# logger.info(f"page_id: {page}, imgs_len: {len(imgs)}")
return imgs_len_list
def get_language(doc: fitz.Document):
"""
获取PDF文档的语言。
Args:
doc (fitz.Document): PDF文档对象。
Returns:
str: 文档语言,如 "en-US"。
"""
language_lst = []
for page_id, page in enumerate(doc):
if page_id >= scan_max_page:
break
# 拿所有text的str
text_block = page.get_text("text")
page_language = detect_lang(text_block)
language_lst.append(page_language)
# logger.info(f"page_id: {page_id}, page_language: {page_language}")
# 统计text_language_list中每种语言的个数
count_dict = Counter(language_lst)
# 输出text_language_list中出现的次数最多的语言
language = max(count_dict, key=count_dict.get)
return language
def check_invalid_chars(pdf_bytes):
"""
乱码检测
"""
return detect_invalid_chars(pdf_bytes)
def pdf_meta_scan(pdf_bytes: bytes):
"""
:param s3_pdf_path:
:param pdf_bytes: pdf文件的二进制数据
几个维度来评价:是否加密,是否需要密码,纸张大小,总页数,是否文字可提取
"""
doc = fitz.open("pdf", pdf_bytes)
is_needs_password = doc.needs_pass
is_encrypted = doc.is_encrypted
total_page = len(doc)
if total_page == 0:
logger.warning(f"drop this pdf, drop_reason: {DropReason.EMPTY_PDF}")
result = {"_need_drop": True, "_drop_reason": DropReason.EMPTY_PDF}
return result
else:
page_width_pts, page_height_pts = get_pdf_page_size_pts(doc)
# logger.info(f"page_width_pts: {page_width_pts}, page_height_pts: {page_height_pts}")
# svgs_per_page = get_svgs_per_page(doc)
# logger.info(f"svgs_per_page: {svgs_per_page}")
imgs_per_page = get_imgs_per_page(doc)
# logger.info(f"imgs_per_page: {imgs_per_page}")
image_info_per_page, junk_img_bojids = get_image_info(doc, page_width_pts, page_height_pts)
# logger.info(f"image_info_per_page: {image_info_per_page}, junk_img_bojids: {junk_img_bojids}")
text_len_per_page = get_pdf_textlen_per_page(doc)
# logger.info(f"text_len_per_page: {text_len_per_page}")
text_layout_per_page = get_pdf_text_layout_per_page(doc)
# logger.info(f"text_layout_per_page: {text_layout_per_page}")
text_language = get_language(doc)
# logger.info(f"text_language: {text_language}")
invalid_chars = check_invalid_chars(pdf_bytes)
# logger.info(f"invalid_chars: {invalid_chars}")
# 最后输出一条json
res = {
"is_needs_password": is_needs_password,
"is_encrypted": is_encrypted,
"total_page": total_page,
"page_width_pts": int(page_width_pts),
"page_height_pts": int(page_height_pts),
"image_info_per_page": image_info_per_page,
"text_len_per_page": text_len_per_page,
"text_layout_per_page": text_layout_per_page,
"text_language": text_language,
# "svgs_per_page": svgs_per_page,
"imgs_per_page": imgs_per_page, # 增加每页img数量list
"junk_img_bojids": junk_img_bojids, # 增加垃圾图片的bojid list
"invalid_chars": invalid_chars,
"metadata": doc.metadata
}
# logger.info(json.dumps(res, ensure_ascii=False))
return res
@click.command()
@click.option('--s3-pdf-path', help='s3上pdf文件的路径')
@click.option('--s3-profile', help='s3上的profile')
def main(s3_pdf_path: str, s3_profile: str):
"""
"""
try:
file_content = read_file(s3_pdf_path, s3_profile)
pdf_meta_scan(file_content)
except Exception as e:
print(f"ERROR: {s3_pdf_path}, {e}", file=sys.stderr)
logger.exception(e)
if __name__ == '__main__':
main()
# "D:\project/20231108code-clean\pdf_cost_time\竖排例子\净空法师-大乘无量寿.pdf"
# "D:\project/20231108code-clean\pdf_cost_time\竖排例子\三国演义_繁体竖排版.pdf"
# "D:\project/20231108code-clean\pdf_cost_time\scihub\scihub_86800000\libgen.scimag86880000-86880999.zip_10.1021/acsami.1c03109.s002.pdf"
# "D:/project/20231108code-clean/pdf_cost_time/scihub/scihub_18600000/libgen.scimag18645000-18645999.zip_10.1021/om3006239.pdf"
# file_content = read_file("D:/project/20231108code-clean/pdf_cost_time/scihub/scihub_31000000/libgen.scimag31098000-31098999.zip_10.1109/isit.2006.261791.pdf","")
# file_content = read_file("D:\project/20231108code-clean\pdf_cost_time\竖排例子\净空法师_大乘无量寿.pdf","")
# doc = fitz.open("pdf", file_content)
# text_layout_lst = get_pdf_text_layout_per_page(doc)
# print(text_layout_lst)
import os
from pathlib import Path
from loguru import logger
from magic_pdf.integrations.rag.type import (ElementRelation, LayoutElements,
Node)
from magic_pdf.integrations.rag.utils import inference
class RagPageReader:
def __init__(self, pagedata: LayoutElements):
self.o = [
Node(
category_type=v.category_type,
text=v.text,
image_path=v.image_path,
anno_id=v.anno_id,
latex=v.latex,
html=v.html,
) for v in pagedata.layout_dets
]
self.pagedata = pagedata
def __iter__(self):
return iter(self.o)
def get_rel_map(self) -> list[ElementRelation]:
return self.pagedata.extra.element_relation
class RagDocumentReader:
def __init__(self, ragdata: list[LayoutElements]):
self.o = [RagPageReader(v) for v in ragdata]
def __iter__(self):
return iter(self.o)
class DataReader:
def __init__(self, path_or_directory: str, method: str, output_dir: str):
self.path_or_directory = path_or_directory
self.method = method
self.output_dir = output_dir
self.pdfs = []
if os.path.isdir(path_or_directory):
for doc_path in Path(path_or_directory).glob('*.pdf'):
self.pdfs.append(doc_path)
else:
assert path_or_directory.endswith('.pdf')
self.pdfs.append(Path(path_or_directory))
def get_documents_count(self) -> int:
"""Returns the number of documents in the directory."""
return len(self.pdfs)
def get_document_result(self, idx: int) -> RagDocumentReader | None:
"""
Args:
idx (int): the index of documents under the
directory path_or_directory
Returns:
RagDocumentReader | None: RagDocumentReader is an iterable object,
more details @RagDocumentReader
"""
if idx >= self.get_documents_count() or idx < 0:
logger.error(f'invalid idx: {idx}')
return None
res = inference(str(self.pdfs[idx]), self.output_dir, self.method)
if res is None:
logger.warning(f'failed to inference pdf {self.pdfs[idx]}')
return None
return RagDocumentReader(res)
def get_document_filename(self, idx: int) -> Path:
"""get the filename of the document."""
return self.pdfs[idx]
from enum import Enum
from pydantic import BaseModel, Field
# rag
class CategoryType(Enum): # py310 not support StrEnum
text = 'text'
title = 'title'
interline_equation = 'interline_equation'
image = 'image'
image_body = 'image_body'
image_caption = 'image_caption'
table = 'table'
table_body = 'table_body'
table_caption = 'table_caption'
table_footnote = 'table_footnote'
class ElementRelType(Enum):
sibling = 'sibling'
class PageInfo(BaseModel):
page_no: int = Field(description='the index of page, start from zero',
ge=0)
height: int = Field(description='the height of page', gt=0)
width: int = Field(description='the width of page', ge=0)
image_path: str | None = Field(description='the image of this page',
default=None)
class ContentObject(BaseModel):
category_type: CategoryType = Field(description='类别')
poly: list[float] = Field(
description=('Coordinates, need to convert back to PDF coordinates,'
' order is top-left, top-right, bottom-right, bottom-left'
' x,y coordinates'))
ignore: bool = Field(description='whether ignore this object',
default=False)
text: str | None = Field(description='text content of the object',
default=None)
image_path: str | None = Field(description='path of embedded image',
default=None)
order: int = Field(description='the order of this object within a page',
default=-1)
anno_id: int = Field(description='unique id', default=-1)
latex: str | None = Field(description='latex result', default=None)
html: str | None = Field(description='html result', default=None)
class ElementRelation(BaseModel):
source_anno_id: int = Field(description='unique id of the source object',
default=-1)
target_anno_id: int = Field(description='unique id of the target object',
default=-1)
relation: ElementRelType = Field(
description='the relation between source and target element')
class LayoutElementsExtra(BaseModel):
element_relation: list[ElementRelation] = Field(
description='the relation between source and target element')
class LayoutElements(BaseModel):
layout_dets: list[ContentObject] = Field(
description='layout element details')
page_info: PageInfo = Field(description='page info')
extra: LayoutElementsExtra = Field(description='extra information')
# iter data format
class Node(BaseModel):
category_type: CategoryType = Field(description='类别')
text: str | None = Field(description='text content of the object',
default=None)
image_path: str | None = Field(description='path of embedded image',
default=None)
anno_id: int = Field(description='unique id', default=-1)
latex: str | None = Field(description='latex result', default=None)
html: str | None = Field(description='html result', default=None)
import json
import os
from pathlib import Path
from loguru import logger
import magic_pdf.model as model_config
from magic_pdf.dict2md.ocr_mkcontent import merge_para_with_text
from magic_pdf.integrations.rag.type import (CategoryType, ContentObject,
ElementRelation, ElementRelType,
LayoutElements,
LayoutElementsExtra, PageInfo)
from magic_pdf.libs.ocr_content_type import BlockType, ContentType
from magic_pdf.rw.AbsReaderWriter import AbsReaderWriter
from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
from magic_pdf.tools.common import do_parse, prepare_env
def convert_middle_json_to_layout_elements(
json_data: dict,
output_dir: str,
) -> list[LayoutElements]:
uniq_anno_id = 0
res: list[LayoutElements] = []
for page_no, page_data in enumerate(json_data['pdf_info']):
order_id = 0
page_info = PageInfo(
height=int(page_data['page_size'][1]),
width=int(page_data['page_size'][0]),
page_no=page_no,
)
layout_dets: list[ContentObject] = []
extra_element_relation: list[ElementRelation] = []
for para_block in page_data['para_blocks']:
para_text = ''
para_type = para_block['type']
if para_type == BlockType.Text:
para_text = merge_para_with_text(para_block)
x0, y0, x1, y1 = para_block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.text,
text=para_text,
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
uniq_anno_id += 1
order_id += 1
layout_dets.append(content)
elif para_type == BlockType.Title:
para_text = merge_para_with_text(para_block)
x0, y0, x1, y1 = para_block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.title,
text=para_text,
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
uniq_anno_id += 1
order_id += 1
layout_dets.append(content)
elif para_type == BlockType.InterlineEquation:
para_text = merge_para_with_text(para_block)
x0, y0, x1, y1 = para_block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.interline_equation,
text=para_text,
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
uniq_anno_id += 1
order_id += 1
layout_dets.append(content)
elif para_type == BlockType.Image:
body_anno_id = -1
caption_anno_id = -1
for block in para_block['blocks']:
if block['type'] == BlockType.ImageBody:
for line in block['lines']:
for span in line['spans']:
if span['type'] == ContentType.Image:
x0, y0, x1, y1 = block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.image_body,
image_path=os.path.join(
output_dir, span['image_path']),
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
body_anno_id = uniq_anno_id
uniq_anno_id += 1
order_id += 1
layout_dets.append(content)
for block in para_block['blocks']:
if block['type'] == BlockType.ImageCaption:
para_text += merge_para_with_text(block)
x0, y0, x1, y1 = block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.image_caption,
text=para_text,
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
caption_anno_id = uniq_anno_id
uniq_anno_id += 1
order_id += 1
layout_dets.append(content)
if body_anno_id > 0 and caption_anno_id > 0:
element_relation = ElementRelation(
relation=ElementRelType.sibling,
source_anno_id=body_anno_id,
target_anno_id=caption_anno_id,
)
extra_element_relation.append(element_relation)
elif para_type == BlockType.Table:
body_anno_id, caption_anno_id, footnote_anno_id = -1, -1, -1
for block in para_block['blocks']:
if block['type'] == BlockType.TableCaption:
para_text += merge_para_with_text(block)
x0, y0, x1, y1 = block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.table_caption,
text=para_text,
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
caption_anno_id = uniq_anno_id
uniq_anno_id += 1
order_id += 1
layout_dets.append(content)
for block in para_block['blocks']:
if block['type'] == BlockType.TableBody:
for line in block['lines']:
for span in line['spans']:
if span['type'] == ContentType.Table:
x0, y0, x1, y1 = para_block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.table_body,
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
body_anno_id = uniq_anno_id
uniq_anno_id += 1
order_id += 1
# if processed by table model
if span.get('latex', ''):
content.latex = span['latex']
else:
content.image_path = os.path.join(
output_dir, span['image_path'])
layout_dets.append(content)
for block in para_block['blocks']:
if block['type'] == BlockType.TableFootnote:
para_text += merge_para_with_text(block)
x0, y0, x1, y1 = block['bbox']
content = ContentObject(
anno_id=uniq_anno_id,
category_type=CategoryType.table_footnote,
text=para_text,
order=order_id,
poly=[x0, y0, x1, y0, x1, y1, x0, y1],
)
footnote_anno_id = uniq_anno_id
uniq_anno_id += 1
order_id += 1
layout_dets.append(content)
if caption_anno_id != -1 and body_anno_id != -1:
element_relation = ElementRelation(
relation=ElementRelType.sibling,
source_anno_id=body_anno_id,
target_anno_id=caption_anno_id,
)
extra_element_relation.append(element_relation)
if footnote_anno_id != -1 and body_anno_id != -1:
element_relation = ElementRelation(
relation=ElementRelType.sibling,
source_anno_id=body_anno_id,
target_anno_id=footnote_anno_id,
)
extra_element_relation.append(element_relation)
res.append(
LayoutElements(
page_info=page_info,
layout_dets=layout_dets,
extra=LayoutElementsExtra(
element_relation=extra_element_relation),
))
return res
def inference(path, output_dir, method):
model_config.__use_inside_model__ = True
model_config.__model_mode__ = 'full'
if output_dir == '':
if os.path.isdir(path):
output_dir = os.path.join(path, 'output')
else:
output_dir = os.path.join(os.path.dirname(path), 'output')
local_image_dir, local_md_dir = prepare_env(output_dir,
str(Path(path).stem), method)
def read_fn(path):
disk_rw = DiskReaderWriter(os.path.dirname(path))
return disk_rw.read(os.path.basename(path), AbsReaderWriter.MODE_BIN)
def parse_doc(doc_path: str):
try:
file_name = str(Path(doc_path).stem)
pdf_data = read_fn(doc_path)
do_parse(
output_dir,
file_name,
pdf_data,
[],
method,
False,
f_draw_span_bbox=False,
f_draw_layout_bbox=False,
f_dump_md=False,
f_dump_middle_json=True,
f_dump_model_json=False,
f_dump_orig_pdf=False,
f_dump_content_list=False,
f_draw_model_bbox=False,
)
middle_json_fn = os.path.join(local_md_dir,
f'{file_name}_middle.json')
with open(middle_json_fn) as fd:
jso = json.load(fd)
os.remove(middle_json_fn)
return convert_middle_json_to_layout_elements(jso, local_image_dir)
except Exception as e:
logger.exception(e)
return parse_doc(path)
if __name__ == '__main__':
import pprint
base_dir = '/opt/data/pdf/resources/samples/'
if 0:
with open(base_dir + 'json_outputs/middle.json') as f:
d = json.load(f)
result = convert_middle_json_to_layout_elements(d, '/tmp')
pprint.pp(result)
if 0:
with open(base_dir + 'json_outputs/middle.3.json') as f:
d = json.load(f)
result = convert_middle_json_to_layout_elements(d, '/tmp')
pprint.pp(result)
if 1:
res = inference(
base_dir + 'samples/pdf/one_page_with_table_image.pdf',
'/tmp/output',
'ocr',
)
pprint.pp(res)
# 定义这里的bbox是一个list [x0, y0, x1, y1, block_content, idx_x, idx_y, content_type, ext_x0, ext_y0, ext_x1, ext_y1], 初始时候idx_x, idx_y都是None
# 其中x0, y0代表左上角坐标,x1, y1代表右下角坐标,坐标原点在左上角。
from magic_pdf.layout.layout_spiler_recog import get_spilter_of_page
from magic_pdf.libs.boxbase import _is_in, _is_in_or_part_overlap, _is_vertical_full_overlap
from magic_pdf.libs.commons import mymax
X0_IDX = 0
Y0_IDX = 1
X1_IDX = 2
Y1_IDX = 3
CONTENT_IDX = 4
IDX_X = 5
IDX_Y = 6
CONTENT_TYPE_IDX = 7
X0_EXT_IDX = 8
Y0_EXT_IDX = 9
X1_EXT_IDX = 10
Y1_EXT_IDX = 11
def prepare_bboxes_for_layout_split(image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info, text_raw_blocks: dict, page_boundry, page):
"""
text_raw_blocks:结构参考test/assets/papre/pymu_textblocks.json
把bbox重新组装成一个list,每个元素[x0, y0, x1, y1, block_content, idx_x, idx_y, content_type, ext_x0, ext_y0, ext_x1, ext_y1], 初始时候idx_x, idx_y都是None. 对于图片、公式来说,block_content是图片的地址, 对于段落来说,block_content是pymupdf里的block结构
"""
all_bboxes = []
for image in image_info:
box = image['bbox']
# 由于没有实现横向的栏切分,因此在这里先过滤掉一些小的图片。这些图片有可能影响layout,造成没有横向栏切分的情况下,layout切分不准确。例如 scihub_76500000/libgen.scimag76570000-76570999.zip_10.1186/s13287-019-1355-1
# 把长宽都小于50的去掉
if abs(box[0]-box[2]) < 50 and abs(box[1]-box[3]) < 50:
continue
all_bboxes.append([box[0], box[1], box[2], box[3], None, None, None, 'image', None, None, None, None])
for table in table_info:
box = table['bbox']
all_bboxes.append([box[0], box[1], box[2], box[3], None, None, None, 'table', None, None, None, None])
"""由于公式与段落混合,因此公式不再参与layout划分,无需加入all_bboxes"""
# 加入文本block
text_block_temp = []
for block in text_raw_blocks:
bbox = block['bbox']
text_block_temp.append([bbox[0], bbox[1], bbox[2], bbox[3], None, None, None, 'text', None, None, None, None])
text_block_new = resolve_bbox_overlap_for_layout_det(text_block_temp)
text_block_new = filter_lines_bbox(text_block_new) # 去掉线条bbox,有可能让layout探测陷入无限循环
"""找出会影响layout的色块、横向分割线"""
spilter_bboxes = get_spilter_of_page(page, [b['bbox'] for b in image_info]+[b['bbox'] for b in image_backup_info], [b['bbox'] for b in table_info], )
# 还要去掉存在于spilter_bboxes里的text_block
if len(spilter_bboxes) > 0:
text_block_new = [box for box in text_block_new if not any([_is_in_or_part_overlap(box[:4], spilter_bbox) for spilter_bbox in spilter_bboxes])]
for bbox in text_block_new:
all_bboxes.append([bbox[0], bbox[1], bbox[2], bbox[3], None, None, None, 'text', None, None, None, None])
for bbox in spilter_bboxes:
all_bboxes.append([bbox[0], bbox[1], bbox[2], bbox[3], None, None, None, 'spilter', None, None, None, None])
return all_bboxes
def resolve_bbox_overlap_for_layout_det(bboxes:list):
"""
1. 去掉bbox互相包含的,去掉被包含的
2. 上下方向上如果有重叠,就扩大大box范围,直到覆盖小box
"""
def _is_in_other_bbox(i:int):
"""
判断i个box是否被其他box有所包含
"""
for j in range(0, len(bboxes)):
if j!=i and _is_in(bboxes[i][:4], bboxes[j][:4]):
return True
# elif j!=i and _is_bottom_full_overlap(bboxes[i][:4], bboxes[j][:4]):
# return True
return False
# 首先去掉被包含的bbox
new_bbox_1 = []
for i in range(0, len(bboxes)):
if not _is_in_other_bbox(i):
new_bbox_1.append(bboxes[i])
# 其次扩展大的box
new_box = []
new_bbox_2 = []
len_1 = len(new_bbox_2)
while True:
merged_idx = []
for i in range(0, len(new_bbox_1)):
if i in merged_idx:
continue
for j in range(i+1, len(new_bbox_1)):
if j in merged_idx:
continue
bx1 = new_bbox_1[i]
bx2 = new_bbox_1[j]
if i!=j and _is_vertical_full_overlap(bx1[:4], bx2[:4]):
merged_box = min([bx1[0], bx2[0]]), min([bx1[1], bx2[1]]), max([bx1[2], bx2[2]]), max([bx1[3], bx2[3]])
new_bbox_2.append(merged_box)
merged_idx.append(i)
merged_idx.append(j)
for i in range(0, len(new_bbox_1)): # 没有合并的加入进来
if i not in merged_idx:
new_bbox_2.append(new_bbox_1[i])
if len(new_bbox_2)==0 or len_1==len(new_bbox_2):
break
else:
len_1 = len(new_bbox_2)
new_box = new_bbox_2
new_bbox_1, new_bbox_2 = new_bbox_2, []
return new_box
def filter_lines_bbox(bboxes: list):
"""
过滤掉bbox为空的行
"""
new_box = []
for box in bboxes:
x0, y0, x1, y1 = box[0], box[1], box[2], box[3]
if abs(x0-x1)<=1 or abs(y0-y1)<=1:
continue
else:
new_box.append(box)
return new_box
################################################################################
# 第一种排序算法
# 以下是基于延长线遮挡做的一个算法
#
################################################################################
def find_all_left_bbox(this_bbox, all_bboxes) -> list:
"""
寻找this_bbox左边的所有bbox
"""
left_boxes = [box for box in all_bboxes if box[X1_IDX] <= this_bbox[X0_IDX]]
return left_boxes
def find_all_top_bbox(this_bbox, all_bboxes) -> list:
"""
寻找this_bbox上面的所有bbox
"""
top_boxes = [box for box in all_bboxes if box[Y1_IDX] <= this_bbox[Y0_IDX]]
return top_boxes
def get_and_set_idx_x(this_bbox, all_bboxes) -> int:
"""
寻找this_bbox在all_bboxes中的遮挡深度 idx_x
"""
if this_bbox[IDX_X] is not None:
return this_bbox[IDX_X]
else:
all_left_bboxes = find_all_left_bbox(this_bbox, all_bboxes)
if len(all_left_bboxes) == 0:
this_bbox[IDX_X] = 0
else:
all_left_bboxes_idx = [get_and_set_idx_x(bbox, all_bboxes) for bbox in all_left_bboxes]
max_idx_x = mymax(all_left_bboxes_idx)
this_bbox[IDX_X] = max_idx_x + 1
return this_bbox[IDX_X]
def get_and_set_idx_y(this_bbox, all_bboxes) -> int:
"""
寻找this_bbox在all_bboxes中y方向的遮挡深度 idx_y
"""
if this_bbox[IDX_Y] is not None:
return this_bbox[IDX_Y]
else:
all_top_bboxes = find_all_top_bbox(this_bbox, all_bboxes)
if len(all_top_bboxes) == 0:
this_bbox[IDX_Y] = 0
else:
all_top_bboxes_idx = [get_and_set_idx_y(bbox, all_bboxes) for bbox in all_top_bboxes]
max_idx_y = mymax(all_top_bboxes_idx)
this_bbox[IDX_Y] = max_idx_y + 1
return this_bbox[IDX_Y]
def bbox_sort(all_bboxes: list):
"""
排序
"""
all_bboxes_idx_x = [get_and_set_idx_x(bbox, all_bboxes) for bbox in all_bboxes]
all_bboxes_idx_y = [get_and_set_idx_y(bbox, all_bboxes) for bbox in all_bboxes]
all_bboxes_idx = [(idx_x, idx_y) for idx_x, idx_y in zip(all_bboxes_idx_x, all_bboxes_idx_y)]
all_bboxes_idx = [idx_x_y[0] * 100000 + idx_x_y[1] for idx_x_y in all_bboxes_idx] # 变换成一个点,保证能够先X,X相同时按Y排序
all_bboxes_idx = list(zip(all_bboxes_idx, all_bboxes))
all_bboxes_idx.sort(key=lambda x: x[0])
sorted_bboxes = [bbox for idx, bbox in all_bboxes_idx]
return sorted_bboxes
################################################################################
# 第二种排序算法
# 下面的算法在计算idx_x和idx_y的时候不考虑延长线,而只考虑实际的长或者宽被遮挡的情况
#
################################################################################
def find_left_nearest_bbox(this_bbox, all_bboxes) -> list:
"""
在all_bboxes里找到所有右侧高度和this_bbox有重叠的bbox
"""
left_boxes = [box for box in all_bboxes if box[X1_IDX] <= this_bbox[X0_IDX] and any([
box[Y0_IDX] < this_bbox[Y0_IDX] < box[Y1_IDX], box[Y0_IDX] < this_bbox[Y1_IDX] < box[Y1_IDX],
this_bbox[Y0_IDX] < box[Y0_IDX] < this_bbox[Y1_IDX], this_bbox[Y0_IDX] < box[Y1_IDX] < this_bbox[Y1_IDX],
box[Y0_IDX]==this_bbox[Y0_IDX] and box[Y1_IDX]==this_bbox[Y1_IDX]])]
# 然后再过滤一下,找到水平上距离this_bbox最近的那个
if len(left_boxes) > 0:
left_boxes.sort(key=lambda x: x[X1_IDX], reverse=True)
left_boxes = [left_boxes[0]]
else:
left_boxes = []
return left_boxes
def get_and_set_idx_x_2(this_bbox, all_bboxes):
"""
寻找this_bbox在all_bboxes中的被直接遮挡的深度 idx_x
这个遮挡深度不考虑延长线,而是被实际的长或者宽遮挡的情况
"""
if this_bbox[IDX_X] is not None:
return this_bbox[IDX_X]
else:
left_nearest_bbox = find_left_nearest_bbox(this_bbox, all_bboxes)
if len(left_nearest_bbox) == 0:
this_bbox[IDX_X] = 0
else:
left_idx_x = get_and_set_idx_x_2(left_nearest_bbox[0], all_bboxes)
this_bbox[IDX_X] = left_idx_x + 1
return this_bbox[IDX_X]
def find_top_nearest_bbox(this_bbox, all_bboxes) -> list:
"""
在all_bboxes里找到所有下侧宽度和this_bbox有重叠的bbox
"""
top_boxes = [box for box in all_bboxes if box[Y1_IDX] <= this_bbox[Y0_IDX] and any([
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
# 然后再过滤一下,找到水平上距离this_bbox最近的那个
if len(top_boxes) > 0:
top_boxes.sort(key=lambda x: x[Y1_IDX], reverse=True)
top_boxes = [top_boxes[0]]
else:
top_boxes = []
return top_boxes
def get_and_set_idx_y_2(this_bbox, all_bboxes):
"""
寻找this_bbox在all_bboxes中的被直接遮挡的深度 idx_y
这个遮挡深度不考虑延长线,而是被实际的长或者宽遮挡的情况
"""
if this_bbox[IDX_Y] is not None:
return this_bbox[IDX_Y]
else:
top_nearest_bbox = find_top_nearest_bbox(this_bbox, all_bboxes)
if len(top_nearest_bbox) == 0:
this_bbox[IDX_Y] = 0
else:
top_idx_y = get_and_set_idx_y_2(top_nearest_bbox[0], all_bboxes)
this_bbox[IDX_Y] = top_idx_y + 1
return this_bbox[IDX_Y]
def paper_bbox_sort(all_bboxes: list, page_width, page_height):
all_bboxes_idx_x = [get_and_set_idx_x_2(bbox, all_bboxes) for bbox in all_bboxes]
all_bboxes_idx_y = [get_and_set_idx_y_2(bbox, all_bboxes) for bbox in all_bboxes]
all_bboxes_idx = [(idx_x, idx_y) for idx_x, idx_y in zip(all_bboxes_idx_x, all_bboxes_idx_y)]
all_bboxes_idx = [idx_x_y[0] * 100000 + idx_x_y[1] for idx_x_y in all_bboxes_idx] # 变换成一个点,保证能够先X,X相同时按Y排序
all_bboxes_idx = list(zip(all_bboxes_idx, all_bboxes))
all_bboxes_idx.sort(key=lambda x: x[0])
sorted_bboxes = [bbox for idx, bbox in all_bboxes_idx]
return sorted_bboxes
################################################################################
"""
第三种排序算法, 假设page的最左侧为X0,最右侧为X1,最上侧为Y0,最下侧为Y1
这个排序算法在第二种算法基础上增加对bbox的预处理步骤。预处理思路如下:
1. 首先在水平方向上对bbox进行扩展。扩展方法是:
- 对每个bbox,找到其左边最近的bbox(也就是y方向有重叠),然后将其左边界扩展到左边最近bbox的右边界(x1+1),这里加1是为了避免重叠。如果没有左边的bbox,那么就将其左边界扩展到page的最左侧X0。
- 对每个bbox,找到其右边最近的bbox(也就是y方向有重叠),然后将其右边界扩展到右边最近bbox的左边界(x0-1),这里减1是为了避免重叠。如果没有右边的bbox,那么就将其右边界扩展到page的最右侧X1。
- 经过上面2个步骤,bbox扩展到了水平方向的最大范围。[左最近bbox.x1+1, 右最近bbox.x0-1]
2. 合并所有的连续水平方向的bbox, 合并方法是:
- 对bbox进行y方向排序,然后从上到下遍历所有bbox,如果当前bbox和下一个bbox的x0, x1等于X0, X1,那么就合并这两个bbox。
3. 然后在垂直方向上对bbox进行扩展。扩展方法是:
- 首先从page上切割掉合并后的水平bbox, 得到几个新的block
针对每个block
- x0: 扎到位于左侧x=x0延长线的左侧所有的bboxes, 找到最大的x1,让x0=x1+1。如果没有,则x0=X0
- x1: 找到位于右侧x=x1延长线右侧所有的bboxes, 找到最小的x0, 让x1=x0-1。如果没有,则x1=X1
随后在垂直方向上合并所有的连续的block,方法如下:
- 对block进行x方向排序,然后从左到右遍历所有block,如果当前block和下一个block的x0, x1相等,那么就合并这两个block。
如果垂直切分后所有小bbox都被分配到了一个block, 那么分割就完成了。这些合并后的block打上标签'GOOD_LAYOUT’
如果在某个垂直方向上无法被完全分割到一个block,那么就将这个block打上标签'BAD_LAYOUT'。
至此完成,一个页面的预处理,天然的block要么属于'GOOD_LAYOUT',要么属于'BAD_LAYOUT'。针对含有'BAD_LAYOUT'的页面,可以先按照自上而下,自左到右进行天然排序,也可以先过滤掉这种书籍。
(完成条件下次加强:进行水平方向切分,把混乱的layout部分尽可能切割出去)
"""
################################################################################
def find_left_neighbor_bboxes(this_bbox, all_bboxes) -> list:
"""
在all_bboxes里找到所有右侧高度和this_bbox有重叠的bbox
这里使用扩展之后的bbox
"""
left_boxes = [box for box in all_bboxes if box[X1_EXT_IDX] <= this_bbox[X0_EXT_IDX] and any([
box[Y0_EXT_IDX] < this_bbox[Y0_EXT_IDX] < box[Y1_EXT_IDX], box[Y0_EXT_IDX] < this_bbox[Y1_EXT_IDX] < box[Y1_EXT_IDX],
this_bbox[Y0_EXT_IDX] < box[Y0_EXT_IDX] < this_bbox[Y1_EXT_IDX], this_bbox[Y0_EXT_IDX] < box[Y1_EXT_IDX] < this_bbox[Y1_EXT_IDX],
box[Y0_EXT_IDX]==this_bbox[Y0_EXT_IDX] and box[Y1_EXT_IDX]==this_bbox[Y1_EXT_IDX]])]
# 然后再过滤一下,找到水平上距离this_bbox最近的那个
if len(left_boxes) > 0:
left_boxes.sort(key=lambda x: x[X1_EXT_IDX], reverse=True)
left_boxes = left_boxes
else:
left_boxes = []
return left_boxes
def find_top_neighbor_bboxes(this_bbox, all_bboxes) -> list:
"""
在all_bboxes里找到所有下侧宽度和this_bbox有重叠的bbox
这里使用扩展之后的bbox
"""
top_boxes = [box for box in all_bboxes if box[Y1_EXT_IDX] <= this_bbox[Y0_EXT_IDX] and any([
box[X0_EXT_IDX] < this_bbox[X0_EXT_IDX] < box[X1_EXT_IDX], box[X0_EXT_IDX] < this_bbox[X1_EXT_IDX] < box[X1_EXT_IDX],
this_bbox[X0_EXT_IDX] < box[X0_EXT_IDX] < this_bbox[X1_EXT_IDX], this_bbox[X0_EXT_IDX] < box[X1_EXT_IDX] < this_bbox[X1_EXT_IDX],
box[X0_EXT_IDX]==this_bbox[X0_EXT_IDX] and box[X1_EXT_IDX]==this_bbox[X1_EXT_IDX]])]
# 然后再过滤一下,找到水平上距离this_bbox最近的那个
if len(top_boxes) > 0:
top_boxes.sort(key=lambda x: x[Y1_EXT_IDX], reverse=True)
top_boxes = top_boxes
else:
top_boxes = []
return top_boxes
def get_and_set_idx_x_2_ext(this_bbox, all_bboxes):
"""
寻找this_bbox在all_bboxes中的被直接遮挡的深度 idx_x
这个遮挡深度不考虑延长线,而是被实际的长或者宽遮挡的情况
"""
if this_bbox[IDX_X] is not None:
return this_bbox[IDX_X]
else:
left_nearest_bbox = find_left_neighbor_bboxes(this_bbox, all_bboxes)
if len(left_nearest_bbox) == 0:
this_bbox[IDX_X] = 0
else:
left_idx_x = [get_and_set_idx_x_2(b, all_bboxes) for b in left_nearest_bbox]
this_bbox[IDX_X] = mymax(left_idx_x) + 1
return this_bbox[IDX_X]
def get_and_set_idx_y_2_ext(this_bbox, all_bboxes):
"""
寻找this_bbox在all_bboxes中的被直接遮挡的深度 idx_y
这个遮挡深度不考虑延长线,而是被实际的长或者宽遮挡的情况
"""
if this_bbox[IDX_Y] is not None:
return this_bbox[IDX_Y]
else:
top_nearest_bbox = find_top_neighbor_bboxes(this_bbox, all_bboxes)
if len(top_nearest_bbox) == 0:
this_bbox[IDX_Y] = 0
else:
top_idx_y = [get_and_set_idx_y_2_ext(b, all_bboxes) for b in top_nearest_bbox]
this_bbox[IDX_Y] = mymax(top_idx_y) + 1
return this_bbox[IDX_Y]
def _paper_bbox_sort_ext(all_bboxes: list):
all_bboxes_idx_x = [get_and_set_idx_x_2_ext(bbox, all_bboxes) for bbox in all_bboxes]
all_bboxes_idx_y = [get_and_set_idx_y_2_ext(bbox, all_bboxes) for bbox in all_bboxes]
all_bboxes_idx = [(idx_x, idx_y) for idx_x, idx_y in zip(all_bboxes_idx_x, all_bboxes_idx_y)]
all_bboxes_idx = [idx_x_y[0] * 100000 + idx_x_y[1] for idx_x_y in all_bboxes_idx] # 变换成一个点,保证能够先X,X相同时按Y排序
all_bboxes_idx = list(zip(all_bboxes_idx, all_bboxes))
all_bboxes_idx.sort(key=lambda x: x[0])
sorted_bboxes = [bbox for idx, bbox in all_bboxes_idx]
return sorted_bboxes
# ===============================================================================================
def find_left_bbox_ext_line(this_bbox, all_bboxes) -> list:
"""
寻找this_bbox左边的所有bbox, 使用延长线
"""
left_boxes = [box for box in all_bboxes if box[X1_IDX] <= this_bbox[X0_IDX]]
if len(left_boxes):
left_boxes.sort(key=lambda x: x[X1_IDX], reverse=True)
left_boxes = left_boxes[0]
else:
left_boxes = None
return left_boxes
def find_right_bbox_ext_line(this_bbox, all_bboxes) -> list:
"""
寻找this_bbox右边的所有bbox, 使用延长线
"""
right_boxes = [box for box in all_bboxes if box[X0_IDX] >= this_bbox[X1_IDX]]
if len(right_boxes):
right_boxes.sort(key=lambda x: x[X0_IDX])
right_boxes = right_boxes[0]
else:
right_boxes = None
return right_boxes
# =============================================================================================
def find_left_nearest_bbox_direct(this_bbox, all_bboxes) -> list:
"""
在all_bboxes里找到所有右侧高度和this_bbox有重叠的bbox, 不用延长线并且不能像
"""
left_boxes = [box for box in all_bboxes if box[X1_IDX] <= this_bbox[X0_IDX] and any([
box[Y0_IDX] < this_bbox[Y0_IDX] < box[Y1_IDX], box[Y0_IDX] < this_bbox[Y1_IDX] < box[Y1_IDX],
this_bbox[Y0_IDX] < box[Y0_IDX] < this_bbox[Y1_IDX], this_bbox[Y0_IDX] < box[Y1_IDX] < this_bbox[Y1_IDX],
box[Y0_IDX]==this_bbox[Y0_IDX] and box[Y1_IDX]==this_bbox[Y1_IDX]])]
# 然后再过滤一下,找到水平上距离this_bbox最近的那个——x1最大的那个
if len(left_boxes) > 0:
left_boxes.sort(key=lambda x: x[X1_EXT_IDX] if x[X1_EXT_IDX] else x[X1_IDX], reverse=True)
left_boxes = left_boxes[0]
else:
left_boxes = None
return left_boxes
def find_right_nearst_bbox_direct(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox右侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
right_bboxes = [box for box in all_bboxes if box[X0_IDX] >= this_bbox[X1_IDX] and any([
this_bbox[Y0_IDX] < box[Y0_IDX] < this_bbox[Y1_IDX], this_bbox[Y0_IDX] < box[Y1_IDX] < this_bbox[Y1_IDX],
box[Y0_IDX] < this_bbox[Y0_IDX] < box[Y1_IDX], box[Y0_IDX] < this_bbox[Y1_IDX] < box[Y1_IDX],
box[Y0_IDX]==this_bbox[Y0_IDX] and box[Y1_IDX]==this_bbox[Y1_IDX]])]
if len(right_bboxes)>0:
right_bboxes.sort(key=lambda x: x[X0_EXT_IDX] if x[X0_EXT_IDX] else x[X0_IDX])
right_bboxes = right_bboxes[0]
else:
right_bboxes = None
return right_bboxes
def reset_idx_x_y(all_boxes:list)->list:
for box in all_boxes:
box[IDX_X] = None
box[IDX_Y] = None
return all_boxes
# ===================================================================================================
def find_top_nearest_bbox_direct(this_bbox, bboxes_collection) -> list:
"""
找到在this_bbox上方且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
top_bboxes = [box for box in bboxes_collection if box[Y1_IDX] <= this_bbox[Y0_IDX] and any([
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
# 然后再过滤一下,找到上方距离this_bbox最近的那个
if len(top_bboxes) > 0:
top_bboxes.sort(key=lambda x: x[Y1_IDX], reverse=True)
top_bboxes = top_bboxes[0]
else:
top_bboxes = None
return top_bboxes
def find_bottom_nearest_bbox_direct(this_bbox, bboxes_collection) -> list:
"""
找到在this_bbox下方且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
bottom_bboxes = [box for box in bboxes_collection if box[Y0_IDX] >= this_bbox[Y1_IDX] and any([
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
# 然后再过滤一下,找到水平上距离this_bbox最近的那个
if len(bottom_bboxes) > 0:
bottom_bboxes.sort(key=lambda x: x[Y0_IDX])
bottom_bboxes = bottom_bboxes[0]
else:
bottom_bboxes = None
return bottom_bboxes
def find_boundry_bboxes(bboxes:list) -> tuple:
"""
找到bboxes的边界——找到所有bbox里最小的(x0, y0), 最大的(x1, y1)
"""
x0, y0, x1, y1 = bboxes[0][X0_IDX], bboxes[0][Y0_IDX], bboxes[0][X1_IDX], bboxes[0][Y1_IDX]
for box in bboxes:
x0 = min(box[X0_IDX], x0)
y0 = min(box[Y0_IDX], y0)
x1 = max(box[X1_IDX], x1)
y1 = max(box[Y1_IDX], y1)
return x0, y0, x1, y1
def extend_bbox_vertical(bboxes:list, boundry_x0, boundry_y0, boundry_x1, boundry_y1) -> list:
"""
在垂直方向上扩展能够直接垂直打通的bbox,也就是那些上下都没有其他box的bbox
"""
for box in bboxes:
top_nearest_bbox = find_top_nearest_bbox_direct(box, bboxes)
bottom_nearest_bbox = find_bottom_nearest_bbox_direct(box, bboxes)
if top_nearest_bbox is None and bottom_nearest_bbox is None: # 独占一列
box[X0_EXT_IDX] = box[X0_IDX]
box[Y0_EXT_IDX] = boundry_y0
box[X1_EXT_IDX] = box[X1_IDX]
box[Y1_EXT_IDX] = boundry_y1
# else:
# if top_nearest_bbox is None:
# box[Y0_EXT_IDX] = boundry_y0
# else:
# box[Y0_EXT_IDX] = top_nearest_bbox[Y1_IDX] + 1
# if bottom_nearest_bbox is None:
# box[Y1_EXT_IDX] = boundry_y1
# else:
# box[Y1_EXT_IDX] = bottom_nearest_bbox[Y0_IDX] - 1
# box[X0_EXT_IDX] = box[X0_IDX]
# box[X1_EXT_IDX] = box[X1_IDX]
return bboxes
# ===================================================================================================
def paper_bbox_sort_v2(all_bboxes: list, page_width:int, page_height:int):
"""
增加预处理行为的排序:
return:
[
{
"layout_bbox": [x0, y0, x1, y1],
"layout_label":"GOOD_LAYOUT/BAD_LAYOUT",
"content_bboxes": [] #每个元素都是[x0, y0, x1, y1, block_content, idx_x, idx_y, content_type, ext_x0, ext_y0, ext_x1, ext_y1], 并且顺序就是阅读顺序
}
]
"""
sorted_layouts = [] # 最后的返回结果
page_x0, page_y0, page_x1, page_y1 = 1, 1, page_width-1, page_height-1
all_bboxes = paper_bbox_sort(all_bboxes) # 大致拍下序
# 首先在水平方向上扩展独占一行的bbox
for bbox in all_bboxes:
left_nearest_bbox = find_left_nearest_bbox_direct(bbox, all_bboxes) # 非扩展线
right_nearest_bbox = find_right_nearst_bbox_direct(bbox, all_bboxes)
if left_nearest_bbox is None and right_nearest_bbox is None: # 独占一行
bbox[X0_EXT_IDX] = page_x0
bbox[Y0_EXT_IDX] = bbox[Y0_IDX]
bbox[X1_EXT_IDX] = page_x1
bbox[Y1_EXT_IDX] = bbox[Y1_IDX]
# 此时独占一行的被成功扩展到指定的边界上,这个时候利用边界条件合并连续的bbox,成为一个group
if len(all_bboxes)==1:
return [{"layout_bbox": [page_x0, page_y0, page_x1, page_y1], "layout_label":"GOOD_LAYOUT", "content_bboxes": all_bboxes}]
if len(all_bboxes)==0:
return []
"""
然后合并所有连续水平方向的bbox.
"""
all_bboxes.sort(key=lambda x: x[Y0_IDX])
h_bboxes = []
h_bbox_group = []
v_boxes = []
for bbox in all_bboxes:
if bbox[X0_IDX] == page_x0 and bbox[X1_IDX] == page_x1:
h_bbox_group.append(bbox)
else:
if len(h_bbox_group)>0:
h_bboxes.append(h_bbox_group)
h_bbox_group = []
# 最后一个group
if len(h_bbox_group)>0:
h_bboxes.append(h_bbox_group)
"""
现在h_bboxes里面是所有的group了,每个group都是一个list
对h_bboxes里的每个group进行计算放回到sorted_layouts里
"""
for gp in h_bboxes:
gp.sort(key=lambda x: x[Y0_IDX])
block_info = {"layout_label":"GOOD_LAYOUT", "content_bboxes": gp}
# 然后计算这个group的layout_bbox,也就是最小的x0,y0, 最大的x1,y1
x0, y0, x1, y1 = gp[0][X0_EXT_IDX], gp[0][Y0_EXT_IDX], gp[-1][X1_EXT_IDX], gp[-1][Y1_EXT_IDX]
block_info["layout_bbox"] = [x0, y0, x1, y1]
sorted_layouts.append(block_info)
# 接下来利用这些连续的水平bbox的layout_bbox的y0, y1,从水平上切分开其余的为几个部分
h_split_lines = [page_y0]
for gp in h_bboxes:
layout_bbox = gp['layout_bbox']
y0, y1 = layout_bbox[1], layout_bbox[3]
h_split_lines.append(y0)
h_split_lines.append(y1)
h_split_lines.append(page_y1)
unsplited_bboxes = []
for i in range(0, len(h_split_lines), 2):
start_y0, start_y1 = h_split_lines[i:i+2]
# 然后找出[start_y0, start_y1]之间的其他bbox,这些组成一个未分割板块
bboxes_in_block = [bbox for bbox in all_bboxes if bbox[Y0_IDX]>=start_y0 and bbox[Y1_IDX]<=start_y1]
unsplited_bboxes.append(bboxes_in_block)
# ================== 至此,水平方向的 已经切分排序完毕====================================
"""
接下来针对每个非水平的部分切分垂直方向的
此时,只剩下了无法被完全水平打通的bbox了。对这些box,优先进行垂直扩展,然后进行垂直切分.
分3步:
1. 先把能完全垂直打通的隔离出去当做一个layout
2. 其余的先垂直切分
3. 垂直切分之后的部分再尝试水平切分
4. 剩下的不能被切分的各个部分当成一个layout
"""
# 对每部分进行垂直切分
for bboxes_in_block in unsplited_bboxes:
# 首先对这个block的bbox进行垂直方向上的扩展
boundry_x0, boundry_y0, boundry_x1, boundry_y1 = find_boundry_bboxes(bboxes_in_block)
# 进行垂直方向上的扩展
extended_vertical_bboxes = extend_bbox_vertical(bboxes_in_block, boundry_x0, boundry_y0, boundry_x1, boundry_y1)
# 然后对这个block进行垂直方向上的切分
extend_bbox_vertical.sort(key=lambda x: x[X0_IDX]) # x方向上从小到大,代表了从左到右读取
v_boxes_group = []
for bbox in extended_vertical_bboxes:
if bbox[Y0_IDX]==boundry_y0 and bbox[Y1_IDX]==boundry_y1:
v_boxes_group.append(bbox)
else:
if len(v_boxes_group)>0:
v_boxes.append(v_boxes_group)
v_boxes_group = []
if len(v_boxes_group)>0:
v_boxes.append(v_boxes_group)
# 把连续的垂直部分加入到sorted_layouts里。注意这个时候已经是连续的垂直部分了,因为上面已经做了
for gp in v_boxes:
gp.sort(key=lambda x: x[X0_IDX])
block_info = {"layout_label":"GOOD_LAYOUT", "content_bboxes": gp}
# 然后计算这个group的layout_bbox,也就是最小的x0,y0, 最大的x1,y1
x0, y0, x1, y1 = gp[0][X0_EXT_IDX], gp[0][Y0_EXT_IDX], gp[-1][X1_EXT_IDX], gp[-1][Y1_EXT_IDX]
block_info["layout_bbox"] = [x0, y0, x1, y1]
sorted_layouts.append(block_info)
# 在垂直方向上,划分子块,也就是用贯通的垂直线进行切分。这些被切分出来的块,极大可能是可被垂直切分的,如果不能完全的垂直切分,那么尝试水平切分。都不能的则当成一个layout
v_split_lines = [boundry_x0]
for gp in v_boxes:
layout_bbox = gp['layout_bbox']
x0, x1 = layout_bbox[0], layout_bbox[2]
v_split_lines.append(x0)
v_split_lines.append(x1)
v_split_lines.append(boundry_x1)
reset_idx_x_y(all_bboxes)
all_boxes = _paper_bbox_sort_ext(all_bboxes)
return all_boxes
from magic_pdf.layout.bbox_sort import X0_EXT_IDX, X0_IDX, X1_EXT_IDX, X1_IDX, Y0_IDX, Y1_EXT_IDX, Y1_IDX
from magic_pdf.libs.boxbase import _is_bottom_full_overlap, _left_intersect, _right_intersect
def find_all_left_bbox_direct(this_bbox, all_bboxes) -> list:
"""
在all_bboxes里找到所有右侧垂直方向上和this_bbox有重叠的bbox, 不用延长线
并且要考虑两个box左右相交的情况,如果相交了,那么右侧的box就不算最左侧。
"""
left_boxes = [box for box in all_bboxes if box[X1_IDX] <= this_bbox[X0_IDX]
and any([
box[Y0_IDX] < this_bbox[Y0_IDX] < box[Y1_IDX], box[Y0_IDX] < this_bbox[Y1_IDX] < box[Y1_IDX],
this_bbox[Y0_IDX] < box[Y0_IDX] < this_bbox[Y1_IDX], this_bbox[Y0_IDX] < box[Y1_IDX] < this_bbox[Y1_IDX],
box[Y0_IDX]==this_bbox[Y0_IDX] and box[Y1_IDX]==this_bbox[Y1_IDX]]) or _left_intersect(box[:4], this_bbox[:4])]
# 然后再过滤一下,找到水平上距离this_bbox最近的那个——x1最大的那个
if len(left_boxes) > 0:
left_boxes.sort(key=lambda x: x[X1_EXT_IDX] if x[X1_EXT_IDX] else x[X1_IDX], reverse=True)
left_boxes = left_boxes[0]
else:
left_boxes = None
return left_boxes
def find_all_right_bbox_direct(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox右侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
right_bboxes = [box for box in all_bboxes if box[X0_IDX] >= this_bbox[X1_IDX]
and any([
this_bbox[Y0_IDX] < box[Y0_IDX] < this_bbox[Y1_IDX], this_bbox[Y0_IDX] < box[Y1_IDX] < this_bbox[Y1_IDX],
box[Y0_IDX] < this_bbox[Y0_IDX] < box[Y1_IDX], box[Y0_IDX] < this_bbox[Y1_IDX] < box[Y1_IDX],
box[Y0_IDX]==this_bbox[Y0_IDX] and box[Y1_IDX]==this_bbox[Y1_IDX]]) or _right_intersect(this_bbox[:4], box[:4])]
if len(right_bboxes)>0:
right_bboxes.sort(key=lambda x: x[X0_EXT_IDX] if x[X0_EXT_IDX] else x[X0_IDX])
right_bboxes = right_bboxes[0]
else:
right_bboxes = None
return right_bboxes
def find_all_top_bbox_direct(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox上侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
top_bboxes = [box for box in all_bboxes if box[Y1_IDX] <= this_bbox[Y0_IDX] and any([
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
if len(top_bboxes)>0:
top_bboxes.sort(key=lambda x: x[Y1_EXT_IDX] if x[Y1_EXT_IDX] else x[Y1_IDX], reverse=True)
top_bboxes = top_bboxes[0]
else:
top_bboxes = None
return top_bboxes
def find_all_bottom_bbox_direct(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox下侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
bottom_bboxes = [box for box in all_bboxes if box[Y0_IDX] >= this_bbox[Y1_IDX] and any([
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
if len(bottom_bboxes)>0:
bottom_bboxes.sort(key=lambda x: x[Y0_IDX])
bottom_bboxes = bottom_bboxes[0]
else:
bottom_bboxes = None
return bottom_bboxes
# ===================================================================================================================
def find_bottom_bbox_direct_from_right_edge(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox下侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
bottom_bboxes = [box for box in all_bboxes if box[Y0_IDX] >= this_bbox[Y1_IDX] and any([
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
if len(bottom_bboxes)>0:
# y0最小, X1最大的那个,也就是box上边缘最靠近this_bbox的那个,并且还最靠右
bottom_bboxes.sort(key=lambda x: x[Y0_IDX])
bottom_bboxes = [box for box in bottom_bboxes if box[Y0_IDX]==bottom_bboxes[0][Y0_IDX]]
# 然后再y1相同的情况下,找到x1最大的那个
bottom_bboxes.sort(key=lambda x: x[X1_IDX], reverse=True)
bottom_bboxes = bottom_bboxes[0]
else:
bottom_bboxes = None
return bottom_bboxes
def find_bottom_bbox_direct_from_left_edge(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox下侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
bottom_bboxes = [box for box in all_bboxes if box[Y0_IDX] >= this_bbox[Y1_IDX] and any([
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
if len(bottom_bboxes)>0:
# y0最小, X0最小的那个
bottom_bboxes.sort(key=lambda x: x[Y0_IDX])
bottom_bboxes = [box for box in bottom_bboxes if box[Y0_IDX]==bottom_bboxes[0][Y0_IDX]]
# 然后再y0相同的情况下,找到x0最小的那个
bottom_bboxes.sort(key=lambda x: x[X0_IDX])
bottom_bboxes = bottom_bboxes[0]
else:
bottom_bboxes = None
return bottom_bboxes
def find_top_bbox_direct_from_left_edge(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox上侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
top_bboxes = [box for box in all_bboxes if box[Y1_IDX] <= this_bbox[Y0_IDX] and any([
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
if len(top_bboxes)>0:
# y1最大, X0最小的那个
top_bboxes.sort(key=lambda x: x[Y1_IDX], reverse=True)
top_bboxes = [box for box in top_bboxes if box[Y1_IDX]==top_bboxes[0][Y1_IDX]]
# 然后再y1相同的情况下,找到x0最小的那个
top_bboxes.sort(key=lambda x: x[X0_IDX])
top_bboxes = top_bboxes[0]
else:
top_bboxes = None
return top_bboxes
def find_top_bbox_direct_from_right_edge(this_bbox, all_bboxes) -> list:
"""
找到在this_bbox上侧且距离this_bbox距离最近的bbox.必须是直接遮挡的那种
"""
top_bboxes = [box for box in all_bboxes if box[Y1_IDX] <= this_bbox[Y0_IDX] and any([
box[X0_IDX] < this_bbox[X0_IDX] < box[X1_IDX], box[X0_IDX] < this_bbox[X1_IDX] < box[X1_IDX],
this_bbox[X0_IDX] < box[X0_IDX] < this_bbox[X1_IDX], this_bbox[X0_IDX] < box[X1_IDX] < this_bbox[X1_IDX],
box[X0_IDX]==this_bbox[X0_IDX] and box[X1_IDX]==this_bbox[X1_IDX]])]
if len(top_bboxes)>0:
# y1最大, X1最大的那个
top_bboxes.sort(key=lambda x: x[Y1_IDX], reverse=True)
top_bboxes = [box for box in top_bboxes if box[Y1_IDX]==top_bboxes[0][Y1_IDX]]
# 然后再y1相同的情况下,找到x1最大的那个
top_bboxes.sort(key=lambda x: x[X1_IDX], reverse=True)
top_bboxes = top_bboxes[0]
else:
top_bboxes = None
return top_bboxes
# ===================================================================================================================
def get_left_edge_bboxes(all_bboxes) -> list:
"""
返回最左边的bbox
"""
left_bboxes = [box for box in all_bboxes if find_all_left_bbox_direct(box, all_bboxes) is None]
return left_bboxes
def get_right_edge_bboxes(all_bboxes) -> list:
"""
返回最右边的bbox
"""
right_bboxes = [box for box in all_bboxes if find_all_right_bbox_direct(box, all_bboxes) is None]
return right_bboxes
def fix_vertical_bbox_pos(bboxes:list):
"""
检查这批bbox在垂直方向是否有轻微的重叠,如果重叠了,就把重叠的bbox往下移动一点
在x方向上必须一个包含或者被包含,或者完全重叠,不能只有部分重叠
"""
bboxes.sort(key=lambda x: x[Y0_IDX]) # 从上向下排列
for i in range(0, len(bboxes)):
for j in range(i+1, len(bboxes)):
if _is_bottom_full_overlap(bboxes[i][:4], bboxes[j][:4]):
# 如果两个bbox有部分重叠,那么就把下面的bbox往下移动一点
bboxes[j][Y0_IDX] = bboxes[i][Y1_IDX] + 2 # 2是个经验值
break
return bboxes
"""对pdf上的box进行layout识别,并对内部组成的box进行排序."""
from loguru import logger
from magic_pdf.layout.bbox_sort import (CONTENT_IDX, CONTENT_TYPE_IDX,
X0_EXT_IDX, X0_IDX, X1_EXT_IDX, X1_IDX,
Y0_EXT_IDX, Y0_IDX, Y1_EXT_IDX, Y1_IDX,
paper_bbox_sort)
from magic_pdf.layout.layout_det_utils import (
find_all_bottom_bbox_direct, find_all_left_bbox_direct,
find_all_right_bbox_direct, find_all_top_bbox_direct,
find_bottom_bbox_direct_from_left_edge,
find_bottom_bbox_direct_from_right_edge,
find_top_bbox_direct_from_left_edge, find_top_bbox_direct_from_right_edge,
get_left_edge_bboxes, get_right_edge_bboxes)
from magic_pdf.libs.boxbase import get_bbox_in_boundary
LAYOUT_V = 'V'
LAYOUT_H = 'H'
LAYOUT_UNPROC = 'U'
LAYOUT_BAD = 'B'
def _is_single_line_text(bbox):
"""检查bbox里面的文字是否只有一行."""
return True # TODO
box_type = bbox[CONTENT_TYPE_IDX]
if box_type != 'text':
return False
paras = bbox[CONTENT_IDX]['paras']
text_content = ''
for para_id, para in paras.items(): # 拼装内部的段落文本
is_title = para['is_title']
if is_title != 0:
text_content += f"## {para['text']}"
else:
text_content += para['text']
text_content += '\n\n'
return bbox[CONTENT_TYPE_IDX] == 'text' and len(text_content.split('\n\n')) <= 1
def _horizontal_split(bboxes: list, boundary: tuple, avg_font_size=20) -> list:
"""
对bboxes进行水平切割
方法是:找到左侧和右侧都没有被直接遮挡的box,然后进行扩展,之后进行切割
return:
返回几个大的Layout区域 [[x0, y0, x1, y1, "h|u|v"], ], h代表水平,u代表未探测的,v代表垂直布局
"""
sorted_layout_blocks = [] # 这是要最终返回的值
bound_x0, bound_y0, bound_x1, bound_y1 = boundary
all_bboxes = get_bbox_in_boundary(bboxes, boundary)
# all_bboxes = paper_bbox_sort(all_bboxes, abs(bound_x1-bound_x0), abs(bound_y1-bound_x0)) # 大致拍下序, 这个是基于直接遮挡的。
"""
首先在水平方向上扩展独占一行的bbox
"""
last_h_split_line_y1 = bound_y0 # 记录下上次的水平分割线
for i, bbox in enumerate(all_bboxes):
left_nearest_bbox = find_all_left_bbox_direct(bbox, all_bboxes) # 非扩展线
right_nearest_bbox = find_all_right_bbox_direct(bbox, all_bboxes)
if left_nearest_bbox is None and right_nearest_bbox is None: # 独占一行
"""
然而,如果只是孤立的一行文字,那么就还要满足以下几个条件才可以:
1. bbox和中心线相交。或者
2. 上方或者下方也存在同类水平的独占一行的bbox。 或者
3. TODO 加强条件:这个bbox上方和下方是同一列column,那么就不能算作独占一行
"""
# 先检查这个bbox里是否只包含一行文字
# is_single_line = _is_single_line_text(bbox)
"""
这里有个点需要注意,当页面内容不是居中的时候,第一次调用传递的是page的boundary,这个时候mid_x就不是中心线了.
所以这里计算出最紧致的boundary,然后再计算mid_x
"""
boundary_real_x0, boundary_real_x1 = min(
[bbox[X0_IDX] for bbox in all_bboxes]
), max([bbox[X1_IDX] for bbox in all_bboxes])
mid_x = (boundary_real_x0 + boundary_real_x1) / 2
# 检查这个box是否内容在中心线有交
# 必须跨过去2个字符的宽度
is_cross_boundary_mid_line = (
min(mid_x - bbox[X0_IDX], bbox[X1_IDX] - mid_x) > avg_font_size * 2
)
"""
检查条件2
"""
is_belong_to_col = False
"""
检查是否能被上方col吸收,方法是:
1. 上方非空且不是独占一行的,并且
2. 从上个水平分割的最大y=y1开始到当前bbox,最左侧的bbox的[min_x0, max_x1],能够覆盖当前box的[x0, x1]
"""
"""
以迭代的方式向上找,查找范围是[bound_x0, last_h_sp, bound_x1, bbox[Y0_IDX]]
"""
# 先确定上方的y0, y0
b_y0, b_y1 = last_h_split_line_y1, bbox[Y0_IDX]
# 然后从box开始逐个向上找到所有与box在x上有交集的box
box_to_check = [bound_x0, b_y0, bound_x1, b_y1]
bbox_in_bound_check = get_bbox_in_boundary(all_bboxes, box_to_check)
bboxes_on_top = []
virtual_box = bbox
while True:
b_on_top = find_all_top_bbox_direct(virtual_box, bbox_in_bound_check)
if b_on_top is not None:
bboxes_on_top.append(b_on_top)
virtual_box = [
min([virtual_box[X0_IDX], b_on_top[X0_IDX]]),
min(virtual_box[Y0_IDX], b_on_top[Y0_IDX]),
max([virtual_box[X1_IDX], b_on_top[X1_IDX]]),
b_y1,
]
else:
break
# 随后确定这些box的最小x0, 最大x1
if len(bboxes_on_top) > 0 and len(bboxes_on_top) != len(
bbox_in_bound_check
): # virtual_box可能会膨胀到占满整个区域,这实际上就不能属于一个col了。
min_x0, max_x1 = virtual_box[X0_IDX], virtual_box[X1_IDX]
# 然后采用一种比较粗糙的方法,看min_x0,max_x1是否与位于[bound_x0, last_h_sp, bound_x1, bbox[Y0_IDX]]之间的box有相交
if not any(
[
b[X0_IDX] <= min_x0 - 1 <= b[X1_IDX]
or b[X0_IDX] <= max_x1 + 1 <= b[X1_IDX]
for b in bbox_in_bound_check
]
):
# 其上,下都不能被扩展成行,暂时只检查一下上方 TODO
top_nearest_bbox = find_all_top_bbox_direct(bbox, bboxes)
bottom_nearest_bbox = find_all_bottom_bbox_direct(bbox, bboxes)
if not any(
[
top_nearest_bbox is not None
and (
find_all_left_bbox_direct(top_nearest_bbox, bboxes)
is None
and find_all_right_bbox_direct(top_nearest_bbox, bboxes)
is None
),
bottom_nearest_bbox is not None
and (
find_all_left_bbox_direct(bottom_nearest_bbox, bboxes)
is None
and find_all_right_bbox_direct(
bottom_nearest_bbox, bboxes
)
is None
),
top_nearest_bbox is None or bottom_nearest_bbox is None,
]
):
is_belong_to_col = True
# 检查是否能被下方col吸收 TODO
"""
这里为什么没有is_cross_boundary_mid_line的条件呢?
确实有些杂志左右两栏宽度不是对称的。
"""
if not is_belong_to_col or is_cross_boundary_mid_line:
bbox[X0_EXT_IDX] = bound_x0
bbox[Y0_EXT_IDX] = bbox[Y0_IDX]
bbox[X1_EXT_IDX] = bound_x1
bbox[Y1_EXT_IDX] = bbox[Y1_IDX]
last_h_split_line_y1 = bbox[Y1_IDX] # 更新这条线
else:
continue
"""
此时独占一行的被成功扩展到指定的边界上,这个时候利用边界条件合并连续的bbox,成为一个group
然后合并所有连续水平方向的bbox.
"""
all_bboxes.sort(key=lambda x: x[Y0_IDX])
h_bboxes = []
h_bbox_group = []
for bbox in all_bboxes:
if bbox[X0_EXT_IDX] == bound_x0 and bbox[X1_EXT_IDX] == bound_x1:
h_bbox_group.append(bbox)
else:
if len(h_bbox_group) > 0:
h_bboxes.append(h_bbox_group)
h_bbox_group = []
# 最后一个group
if len(h_bbox_group) > 0:
h_bboxes.append(h_bbox_group)
"""
现在h_bboxes里面是所有的group了,每个group都是一个list
对h_bboxes里的每个group进行计算放回到sorted_layouts里
"""
h_layouts = []
for gp in h_bboxes:
gp.sort(key=lambda x: x[Y0_IDX])
# 然后计算这个group的layout_bbox,也就是最小的x0,y0, 最大的x1,y1
x0, y0, x1, y1 = (
gp[0][X0_EXT_IDX],
gp[0][Y0_EXT_IDX],
gp[-1][X1_EXT_IDX],
gp[-1][Y1_EXT_IDX],
)
h_layouts.append([x0, y0, x1, y1, LAYOUT_H]) # 水平的布局
"""
接下来利用这些连续的水平bbox的layout_bbox的y0, y1,从水平上切分开其余的为几个部分
"""
h_split_lines = [bound_y0]
for gp in h_bboxes: # gp是一个list[bbox_list]
y0, y1 = gp[0][1], gp[-1][3]
h_split_lines.append(y0)
h_split_lines.append(y1)
h_split_lines.append(bound_y1)
unsplited_bboxes = []
for i in range(0, len(h_split_lines), 2):
start_y0, start_y1 = h_split_lines[i : i + 2]
# 然后找出[start_y0, start_y1]之间的其他bbox,这些组成一个未分割板块
bboxes_in_block = [
bbox
for bbox in all_bboxes
if bbox[Y0_IDX] >= start_y0 and bbox[Y1_IDX] <= start_y1
]
unsplited_bboxes.append(bboxes_in_block)
# 接着把未处理的加入到h_layouts里
for bboxes_in_block in unsplited_bboxes:
if len(bboxes_in_block) == 0:
continue
x0, y0, x1, y1 = (
bound_x0,
min([bbox[Y0_IDX] for bbox in bboxes_in_block]),
bound_x1,
max([bbox[Y1_IDX] for bbox in bboxes_in_block]),
)
h_layouts.append([x0, y0, x1, y1, LAYOUT_UNPROC])
h_layouts.sort(key=lambda x: x[1]) # 按照y0排序, 也就是从上到下的顺序
"""
转换成如下格式返回
"""
for layout in h_layouts:
sorted_layout_blocks.append(
{
'layout_bbox': layout[:4],
'layout_label': layout[4],
'sub_layout': [],
}
)
return sorted_layout_blocks
###############################################################################################
#
# 垂直方向的处理
#
#
###############################################################################################
def _vertical_align_split_v1(bboxes: list, boundary: tuple) -> list:
"""
计算垂直方向上的对齐, 并分割bboxes成layout。负责对一列多行的进行列维度分割。
如果不能完全分割,剩余部分作为layout_lable为u的layout返回
-----------------------
| | |
| | |
| | |
| | |
-------------------------
此函数会将:以上布局将会切分出来2列
"""
sorted_layout_blocks = [] # 这是要最终返回的值
new_boundary = [boundary[0], boundary[1], boundary[2], boundary[3]]
v_blocks = []
"""
先从左到右切分
"""
while True:
all_bboxes = get_bbox_in_boundary(bboxes, new_boundary)
left_edge_bboxes = get_left_edge_bboxes(all_bboxes)
if len(left_edge_bboxes) == 0:
break
right_split_line_x1 = max([bbox[X1_IDX] for bbox in left_edge_bboxes]) + 1
# 然后检查这条线能不与其他bbox的左边界相交或者重合
if any(
[bbox[X0_IDX] <= right_split_line_x1 <= bbox[X1_IDX] for bbox in all_bboxes]
):
# 垂直切分线与某些box发生相交,说明无法完全垂直方向切分。
break
else: # 说明成功分割出一列
# 找到左侧边界最靠左的bbox作为layout的x0
layout_x0 = min(
[bbox[X0_IDX] for bbox in left_edge_bboxes]
) # 这里主要是为了画出来有一定间距
v_blocks.append(
[
layout_x0,
new_boundary[1],
right_split_line_x1,
new_boundary[3],
LAYOUT_V,
]
)
new_boundary[0] = right_split_line_x1 # 更新边界
"""
再从右到左切, 此时如果还是无法完全切分,那么剩余部分作为layout_lable为u的layout返回
"""
unsplited_block = []
while True:
all_bboxes = get_bbox_in_boundary(bboxes, new_boundary)
right_edge_bboxes = get_right_edge_bboxes(all_bboxes)
if len(right_edge_bboxes) == 0:
break
left_split_line_x0 = min([bbox[X0_IDX] for bbox in right_edge_bboxes]) - 1
# 然后检查这条线能不与其他bbox的左边界相交或者重合
if any(
[bbox[X0_IDX] <= left_split_line_x0 <= bbox[X1_IDX] for bbox in all_bboxes]
):
# 这里是余下的
unsplited_block.append(
[
new_boundary[0],
new_boundary[1],
new_boundary[2],
new_boundary[3],
LAYOUT_UNPROC,
]
)
break
else:
# 找到右侧边界最靠右的bbox作为layout的x1
layout_x1 = max([bbox[X1_IDX] for bbox in right_edge_bboxes])
v_blocks.append(
[
left_split_line_x0,
new_boundary[1],
layout_x1,
new_boundary[3],
LAYOUT_V,
]
)
new_boundary[2] = left_split_line_x0 # 更新右边界
"""
最后拼装成layout格式返回
"""
for block in v_blocks:
sorted_layout_blocks.append(
{
'layout_bbox': block[:4],
'layout_label': block[4],
'sub_layout': [],
}
)
for block in unsplited_block:
sorted_layout_blocks.append(
{
'layout_bbox': block[:4],
'layout_label': block[4],
'sub_layout': [],
}
)
# 按照x0排序
sorted_layout_blocks.sort(key=lambda x: x['layout_bbox'][0])
return sorted_layout_blocks
def _vertical_align_split_v2(bboxes: list, boundary: tuple) -> list:
"""改进的
_vertical_align_split算法,原算法会因为第二列的box由于左侧没有遮挡被认为是左侧的一部分,导致整个layout多列被识别为一列。
利用从左上角的box开始向下看的方法,不断扩展w_x0, w_x1,直到不能继续向下扩展,或者到达边界下边界。"""
sorted_layout_blocks = [] # 这是要最终返回的值
new_boundary = [boundary[0], boundary[1], boundary[2], boundary[3]]
bad_boxes = [] # 被割中的box
v_blocks = []
while True:
all_bboxes = get_bbox_in_boundary(bboxes, new_boundary)
if len(all_bboxes) == 0:
break
left_top_box = min(
all_bboxes, key=lambda x: (x[X0_IDX], x[Y0_IDX])
) # 这里应该加强,检查一下必须是在第一列的 TODO
start_box = [
left_top_box[X0_IDX],
left_top_box[Y0_IDX],
left_top_box[X1_IDX],
left_top_box[Y1_IDX],
]
w_x0, w_x1 = left_top_box[X0_IDX], left_top_box[X1_IDX]
"""
然后沿着这个box线向下找最近的那个box, 然后扩展w_x0, w_x1
扩展之后,宽度会增加,随后用x=w_x1来检测在边界内是否有box与相交,如果相交,那么就说明不能再扩展了。
当不能扩展的时候就要看是否到达下边界:
1. 达到,那么更新左边界继续分下一个列
2. 没有达到,那么此时开始从右侧切分进入下面的循环里
"""
while left_top_box is not None: # 向下去找
virtual_box = [w_x0, left_top_box[Y0_IDX], w_x1, left_top_box[Y1_IDX]]
left_top_box = find_bottom_bbox_direct_from_left_edge(
virtual_box, all_bboxes
)
if left_top_box:
w_x0, w_x1 = min(virtual_box[X0_IDX], left_top_box[X0_IDX]), max(
[virtual_box[X1_IDX], left_top_box[X1_IDX]]
)
# 万一这个初始的box在column中间,那么还要向上看
start_box = [
w_x0,
start_box[Y0_IDX],
w_x1,
start_box[Y1_IDX],
] # 扩展一下宽度更鲁棒
left_top_box = find_top_bbox_direct_from_left_edge(start_box, all_bboxes)
while left_top_box is not None: # 向上去找
virtual_box = [w_x0, left_top_box[Y0_IDX], w_x1, left_top_box[Y1_IDX]]
left_top_box = find_top_bbox_direct_from_left_edge(virtual_box, all_bboxes)
if left_top_box:
w_x0, w_x1 = min(virtual_box[X0_IDX], left_top_box[X0_IDX]), max(
[virtual_box[X1_IDX], left_top_box[X1_IDX]]
)
# 检查相交
if any([bbox[X0_IDX] <= w_x1 + 1 <= bbox[X1_IDX] for bbox in all_bboxes]):
for b in all_bboxes:
if b[X0_IDX] <= w_x1 + 1 <= b[X1_IDX]:
bad_boxes.append([b[X0_IDX], b[Y0_IDX], b[X1_IDX], b[Y1_IDX]])
break
else: # 说明成功分割出一列
v_blocks.append([w_x0, new_boundary[1], w_x1, new_boundary[3], LAYOUT_V])
new_boundary[0] = w_x1 # 更新边界
"""
接着开始从右上角的box扫描
"""
w_x0, w_x1 = 0, 0
unsplited_block = []
while True:
all_bboxes = get_bbox_in_boundary(bboxes, new_boundary)
if len(all_bboxes) == 0:
break
# 先找到X1最大的
bbox_list_sorted = sorted(
all_bboxes, key=lambda bbox: bbox[X1_IDX], reverse=True
)
# Then, find the boxes with the smallest Y0 value
bigest_x1 = bbox_list_sorted[0][X1_IDX]
boxes_with_bigest_x1 = [
bbox for bbox in bbox_list_sorted if bbox[X1_IDX] == bigest_x1
] # 也就是最靠右的那些
right_top_box = min(
boxes_with_bigest_x1, key=lambda bbox: bbox[Y0_IDX]
) # y0最小的那个
start_box = [
right_top_box[X0_IDX],
right_top_box[Y0_IDX],
right_top_box[X1_IDX],
right_top_box[Y1_IDX],
]
w_x0, w_x1 = right_top_box[X0_IDX], right_top_box[X1_IDX]
while right_top_box is not None:
virtual_box = [w_x0, right_top_box[Y0_IDX], w_x1, right_top_box[Y1_IDX]]
right_top_box = find_bottom_bbox_direct_from_right_edge(
virtual_box, all_bboxes
)
if right_top_box:
w_x0, w_x1 = min([w_x0, right_top_box[X0_IDX]]), max(
[w_x1, right_top_box[X1_IDX]]
)
# 在向上扫描
start_box = [
w_x0,
start_box[Y0_IDX],
w_x1,
start_box[Y1_IDX],
] # 扩展一下宽度更鲁棒
right_top_box = find_top_bbox_direct_from_right_edge(start_box, all_bboxes)
while right_top_box is not None:
virtual_box = [w_x0, right_top_box[Y0_IDX], w_x1, right_top_box[Y1_IDX]]
right_top_box = find_top_bbox_direct_from_right_edge(
virtual_box, all_bboxes
)
if right_top_box:
w_x0, w_x1 = min([w_x0, right_top_box[X0_IDX]]), max(
[w_x1, right_top_box[X1_IDX]]
)
# 检查是否与其他box相交, 垂直切分线与某些box发生相交,说明无法完全垂直方向切分。
if any([bbox[X0_IDX] <= w_x0 - 1 <= bbox[X1_IDX] for bbox in all_bboxes]):
unsplited_block.append(
[
new_boundary[0],
new_boundary[1],
new_boundary[2],
new_boundary[3],
LAYOUT_UNPROC,
]
)
for b in all_bboxes:
if b[X0_IDX] <= w_x0 - 1 <= b[X1_IDX]:
bad_boxes.append([b[X0_IDX], b[Y0_IDX], b[X1_IDX], b[Y1_IDX]])
break
else: # 说明成功分割出一列
v_blocks.append([w_x0, new_boundary[1], w_x1, new_boundary[3], LAYOUT_V])
new_boundary[2] = w_x0
"""转换数据结构"""
for block in v_blocks:
sorted_layout_blocks.append(
{
'layout_bbox': block[:4],
'layout_label': block[4],
'sub_layout': [],
}
)
for block in unsplited_block:
sorted_layout_blocks.append(
{
'layout_bbox': block[:4],
'layout_label': block[4],
'sub_layout': [],
'bad_boxes': bad_boxes, # 记录下来,这个box是被割中的
}
)
# 按照x0排序
sorted_layout_blocks.sort(key=lambda x: x['layout_bbox'][0])
return sorted_layout_blocks
def _try_horizontal_mult_column_split(bboxes: list, boundary: tuple) -> list:
"""
尝试水平切分,如果切分不动,那就当一个BAD_LAYOUT返回
------------------
| | |
------------------
| | | | <- 这里是此函数要切分的场景
------------------
| | |
| | |
"""
pass
def _vertical_split(bboxes: list, boundary: tuple) -> list:
"""
从垂直方向进行切割,分block
这个版本里,如果垂直切分不动,那就当一个BAD_LAYOUT返回
--------------------------
| | |
| | |
| |
这种列是此函数要切分的 -> | |
| |
| | |
| | |
-------------------------
"""
sorted_layout_blocks = [] # 这是要最终返回的值
bound_x0, bound_y0, bound_x1, bound_y1 = boundary
all_bboxes = get_bbox_in_boundary(bboxes, boundary)
"""
all_bboxes = fix_vertical_bbox_pos(all_bboxes) # 垂直方向解覆盖
all_bboxes = fix_hor_bbox_pos(all_bboxes) # 水平解覆盖
这两行代码目前先不执行,因为公式检测,表格检测还不是很成熟,导致非常多的textblock参与了运算,时间消耗太大。
这两行代码的作用是:
如果遇到互相重叠的bbox, 那么会把面积较小的box进行压缩,从而避免重叠。对布局切分来说带来正反馈。
"""
# all_bboxes = paper_bbox_sort(all_bboxes, abs(bound_x1-bound_x0), abs(bound_y1-bound_x0)) # 大致拍下序, 这个是基于直接遮挡的。
"""
首先在垂直方向上扩展独占一行的bbox
"""
for bbox in all_bboxes:
top_nearest_bbox = find_all_top_bbox_direct(bbox, all_bboxes) # 非扩展线
bottom_nearest_bbox = find_all_bottom_bbox_direct(bbox, all_bboxes)
if (
top_nearest_bbox is None
and bottom_nearest_bbox is None
and not any(
[
b[X0_IDX] < bbox[X1_IDX] < b[X1_IDX]
or b[X0_IDX] < bbox[X0_IDX] < b[X1_IDX]
for b in all_bboxes
]
)
): # 独占一列, 且不和其他重叠
bbox[X0_EXT_IDX] = bbox[X0_IDX]
bbox[Y0_EXT_IDX] = bound_y0
bbox[X1_EXT_IDX] = bbox[X1_IDX]
bbox[Y1_EXT_IDX] = bound_y1
"""
此时独占一列的被成功扩展到指定的边界上,这个时候利用边界条件合并连续的bbox,成为一个group
然后合并所有连续垂直方向的bbox.
"""
all_bboxes.sort(key=lambda x: x[X0_IDX])
# fix: 这里水平方向的列不要合并成一个行,因为需要保证返回给下游的最小block,总是可以无脑从上到下阅读文字。
v_bboxes = []
for box in all_bboxes:
if box[Y0_EXT_IDX] == bound_y0 and box[Y1_EXT_IDX] == bound_y1:
v_bboxes.append(box)
"""
现在v_bboxes里面是所有的group了,每个group都是一个list
对v_bboxes里的每个group进行计算放回到sorted_layouts里
"""
v_layouts = []
for vbox in v_bboxes:
# gp.sort(key=lambda x: x[X0_IDX])
# 然后计算这个group的layout_bbox,也就是最小的x0,y0, 最大的x1,y1
x0, y0, x1, y1 = (
vbox[X0_EXT_IDX],
vbox[Y0_EXT_IDX],
vbox[X1_EXT_IDX],
vbox[Y1_EXT_IDX],
)
v_layouts.append([x0, y0, x1, y1, LAYOUT_V]) # 垂直的布局
"""
接下来利用这些连续的垂直bbox的layout_bbox的x0, x1,从垂直上切分开其余的为几个部分
"""
v_split_lines = [bound_x0]
for gp in v_bboxes:
x0, x1 = gp[X0_IDX], gp[X1_IDX]
v_split_lines.append(x0)
v_split_lines.append(x1)
v_split_lines.append(bound_x1)
unsplited_bboxes = []
for i in range(0, len(v_split_lines), 2):
start_x0, start_x1 = v_split_lines[i : i + 2]
# 然后找出[start_x0, start_x1]之间的其他bbox,这些组成一个未分割板块
bboxes_in_block = [
bbox
for bbox in all_bboxes
if bbox[X0_IDX] >= start_x0 and bbox[X1_IDX] <= start_x1
]
unsplited_bboxes.append(bboxes_in_block)
# 接着把未处理的加入到v_layouts里
for bboxes_in_block in unsplited_bboxes:
if len(bboxes_in_block) == 0:
continue
x0, y0, x1, y1 = (
min([bbox[X0_IDX] for bbox in bboxes_in_block]),
bound_y0,
max([bbox[X1_IDX] for bbox in bboxes_in_block]),
bound_y1,
)
v_layouts.append(
[x0, y0, x1, y1, LAYOUT_UNPROC]
) # 说明这篇区域未能够分析出可靠的版面
v_layouts.sort(key=lambda x: x[0]) # 按照x0排序, 也就是从左到右的顺序
for layout in v_layouts:
sorted_layout_blocks.append(
{
'layout_bbox': layout[:4],
'layout_label': layout[4],
'sub_layout': [],
}
)
"""
至此,垂直方向切成了2种类型,其一是独占一列的,其二是未处理的。
下面对这些未处理的进行垂直方向切分,这个切分要切出来类似“吕”这种类型的垂直方向的布局
"""
for i, layout in enumerate(sorted_layout_blocks):
if layout['layout_label'] == LAYOUT_UNPROC:
x0, y0, x1, y1 = layout['layout_bbox']
v_split_layouts = _vertical_align_split_v2(bboxes, [x0, y0, x1, y1])
sorted_layout_blocks[i] = {
'layout_bbox': [x0, y0, x1, y1],
'layout_label': LAYOUT_H,
'sub_layout': v_split_layouts,
}
layout['layout_label'] = LAYOUT_H # 被垂线切分成了水平布局
return sorted_layout_blocks
def split_layout(bboxes: list, boundary: tuple, page_num: int) -> list:
"""
把bboxes切割成layout
return:
[
{
"layout_bbox": [x0,y0,x1,y1],
"layout_label":"u|v|h|b", 未处理|垂直|水平|BAD_LAYOUT
"sub_layout":[] #每个元素都是[
x0,y0,
x1,y1,
block_content,
idx_x,idx_y,
content_type,
ext_x0,ext_y0,
ext_x1,ext_y1
], 并且顺序就是阅读顺序
}
]
example:
[
{
"layout_bbox": [0, 0, 100, 100],
"layout_label":"u|v|h|b",
"sub_layout":[
]
},
{
"layout_bbox": [0, 0, 100, 100],
"layout_label":"u|v|h|b",
"sub_layout":[
{
"layout_bbox": [0, 0, 100, 100],
"layout_label":"u|v|h|b",
"content_bboxes":[
[],
[],
[]
]
},
{
"layout_bbox": [0, 0, 100, 100],
"layout_label":"u|v|h|b",
"sub_layout":[
]
}
}
]
"""
sorted_layouts = [] # 最终返回的结果
boundary_x0, boundary_y0, boundary_x1, boundary_y1 = boundary
if len(bboxes) <= 1:
return [
{
'layout_bbox': [boundary_x0, boundary_y0, boundary_x1, boundary_y1],
'layout_label': LAYOUT_V,
'sub_layout': [],
}
]
"""
接下来按照先水平后垂直的顺序进行切分
"""
bboxes = paper_bbox_sort(
bboxes, boundary_x1 - boundary_x0, boundary_y1 - boundary_y0
)
sorted_layouts = _horizontal_split(bboxes, boundary) # 通过水平分割出来的layout
for i, layout in enumerate(sorted_layouts):
x0, y0, x1, y1 = layout['layout_bbox']
layout_type = layout['layout_label']
if layout_type == LAYOUT_UNPROC: # 说明是非独占单行的,这些需要垂直切分
v_split_layouts = _vertical_split(bboxes, [x0, y0, x1, y1])
"""
最后这里有个逻辑问题:如果这个函数只分离出来了一个column layout,那么这个layout分割肯定超出了算法能力范围。因为我们假定的是传进来的
box已经把行全部剥离了,所以这里必须十多个列才可以。如果只剥离出来一个layout,并且是多个box,那么就说明这个layout是无法分割的,标记为LAYOUT_UNPROC
"""
layout_label = LAYOUT_V
if len(v_split_layouts) == 1:
if len(v_split_layouts[0]['sub_layout']) == 0:
layout_label = LAYOUT_UNPROC
# logger.warning(f"WARNING: pageno={page_num}, 无法分割的layout: ", v_split_layouts)
"""
组合起来最终的layout
"""
sorted_layouts[i] = {
'layout_bbox': [x0, y0, x1, y1],
'layout_label': layout_label,
'sub_layout': v_split_layouts,
}
layout['layout_label'] = LAYOUT_H
"""
水平和垂直方向都切分完毕了。此时还有一些未处理的,这些未处理的可能是因为水平和垂直方向都无法切分。
这些最后调用_try_horizontal_mult_block_split做一次水平多个block的联合切分,如果也不能切分最终就当做BAD_LAYOUT返回
"""
# TODO
return sorted_layouts
def get_bboxes_layout(all_boxes: list, boundary: tuple, page_id: int):
"""
对利用layout排序之后的box,进行排序
return:
[
{
"layout_bbox": [x0, y0, x1, y1],
"layout_label":"u|v|h|b", 未处理|垂直|水平|BAD_LAYOUT
},
]
"""
def _preorder_traversal(layout):
"""对sorted_layouts的叶子节点,也就是len(sub_layout)==0的节点进行排序。排序按照前序遍历的顺序,也就是从上到
下,从左到右的顺序."""
sorted_layout_blocks = []
for layout in layout:
sub_layout = layout['sub_layout']
if len(sub_layout) == 0:
sorted_layout_blocks.append(layout)
else:
s = _preorder_traversal(sub_layout)
sorted_layout_blocks.extend(s)
return sorted_layout_blocks
# -------------------------------------------------------------------------------------------------------------------------
sorted_layouts = split_layout(
all_boxes, boundary, page_id
) # 先切分成layout,得到一个Tree
total_sorted_layout_blocks = _preorder_traversal(sorted_layouts)
return total_sorted_layout_blocks, sorted_layouts
def get_columns_cnt_of_layout(layout_tree):
"""获取一个layout的宽度."""
max_width_list = [0] # 初始化一个元素,防止max,min函数报错
for items in layout_tree: # 针对每一层(横切)计算列数,横着的算一列
layout_type = items['layout_label']
sub_layouts = items['sub_layout']
if len(sub_layouts) == 0:
max_width_list.append(1)
else:
if layout_type == LAYOUT_H:
max_width_list.append(1)
else:
width = 0
for sub_layout in sub_layouts:
if len(sub_layout['sub_layout']) == 0:
width += 1
else:
for lay in sub_layout['sub_layout']:
width += get_columns_cnt_of_layout([lay])
max_width_list.append(width)
return max(max_width_list)
def sort_with_layout(bboxes: list, page_width, page_height) -> (list, list):
"""输入是一个bbox的list.
获取到输入之后,先进行layout切分,然后对这些bbox进行排序。返回排序后的bboxes
"""
new_bboxes = []
for box in bboxes:
# new_bboxes.append([box[0], box[1], box[2], box[3], None, None, None, 'text', None, None, None, None])
new_bboxes.append(
[
box[0],
box[1],
box[2],
box[3],
None,
None,
None,
'text',
None,
None,
None,
None,
box[4],
]
)
layout_bboxes, _ = get_bboxes_layout(
new_bboxes, tuple([0, 0, page_width, page_height]), 0
)
if any([lay['layout_label'] == LAYOUT_UNPROC for lay in layout_bboxes]):
logger.warning('drop this pdf, reason: 复杂版面')
return None, None
sorted_bboxes = []
# 利用layout bbox每次框定一些box,然后排序
for layout in layout_bboxes:
lbox = layout['layout_bbox']
bbox_in_layout = get_bbox_in_boundary(new_bboxes, lbox)
sorted_bbox = paper_bbox_sort(
bbox_in_layout, lbox[2] - lbox[0], lbox[3] - lbox[1]
)
sorted_bboxes.extend(sorted_bbox)
return sorted_bboxes, layout_bboxes
def sort_text_block(text_block, layout_bboxes):
"""对一页的text_block进行排序."""
sorted_text_bbox = []
all_text_bbox = []
# 做一个box=>text的映射
box_to_text = {}
for blk in text_block:
box = blk['bbox']
box_to_text[(box[0], box[1], box[2], box[3])] = blk
all_text_bbox.append(box)
# text_blocks_to_sort = []
# for box in box_to_text.keys():
# text_blocks_to_sort.append([box[0], box[1], box[2], box[3], None, None, None, 'text', None, None, None, None])
# 按照layout_bboxes的顺序,对text_block进行排序
for layout in layout_bboxes:
layout_box = layout['layout_bbox']
text_bbox_in_layout = get_bbox_in_boundary(
all_text_bbox,
[
layout_box[0] - 1,
layout_box[1] - 1,
layout_box[2] + 1,
layout_box[3] + 1,
],
)
# sorted_bbox = paper_bbox_sort(text_bbox_in_layout, layout_box[2]-layout_box[0], layout_box[3]-layout_box[1])
text_bbox_in_layout.sort(
key=lambda x: x[1]
) # 一个layout内部的box,按照y0自上而下排序
# sorted_bbox = [[b] for b in text_blocks_to_sort]
for sb in text_bbox_in_layout:
sorted_text_bbox.append(box_to_text[(sb[0], sb[1], sb[2], sb[3])])
return sorted_text_bbox
"""
找到能分割布局的水平的横线、色块
"""
import os
from magic_pdf.libs.commons import fitz
from magic_pdf.libs.boxbase import _is_in_or_part_overlap
def __rect_filter_by_width(rect, page_w, page_h):
mid_x = page_w/2
if rect[0]< mid_x < rect[2]:
return True
return False
def __rect_filter_by_pos(rect, image_bboxes, table_bboxes):
"""
不能出现在table和image的位置
"""
for box in image_bboxes:
if _is_in_or_part_overlap(rect, box):
return False
for box in table_bboxes:
if _is_in_or_part_overlap(rect, box):
return False
return True
def __debug_show_page(page, bboxes1: list,bboxes2: list,bboxes3: list,):
save_path = "./tmp/debug.pdf"
if os.path.exists(save_path):
# 删除已经存在的文件
os.remove(save_path)
# 创建一个新的空白 PDF 文件
doc = fitz.open('')
width = page.rect.width
height = page.rect.height
new_page = doc.new_page(width=width, height=height)
shape = new_page.new_shape()
for bbox in bboxes1:
# 原始box画上去
rect = fitz.Rect(*bbox[0:4])
shape = new_page.new_shape()
shape.draw_rect(rect)
shape.finish(color=fitz.pdfcolor['red'], fill=fitz.pdfcolor['blue'], fill_opacity=0.2)
shape.finish()
shape.commit()
for bbox in bboxes2:
# 原始box画上去
rect = fitz.Rect(*bbox[0:4])
shape = new_page.new_shape()
shape.draw_rect(rect)
shape.finish(color=None, fill=fitz.pdfcolor['yellow'], fill_opacity=0.2)
shape.finish()
shape.commit()
for bbox in bboxes3:
# 原始box画上去
rect = fitz.Rect(*bbox[0:4])
shape = new_page.new_shape()
shape.draw_rect(rect)
shape.finish(color=fitz.pdfcolor['red'], fill=None)
shape.finish()
shape.commit()
parent_dir = os.path.dirname(save_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
doc.save(save_path)
doc.close()
def get_spilter_of_page(page, image_bboxes, table_bboxes):
"""
获取到色块和横线
"""
cdrawings = page.get_cdrawings()
spilter_bbox = []
for block in cdrawings:
if 'fill' in block:
fill = block['fill']
if 'fill' in block and block['fill'] and block['fill']!=(1.0,1.0,1.0):
rect = block['rect']
if __rect_filter_by_width(rect, page.rect.width, page.rect.height) and __rect_filter_by_pos(rect, image_bboxes, table_bboxes):
spilter_bbox.append(list(rect))
"""过滤、修正一下这些box。因为有时候会有一些矩形,高度为0或者为负数,造成layout计算无限循环。如果是负高度或者0高度,统一修正为高度为1"""
for box in spilter_bbox:
if box[3]-box[1] <= 0:
box[3] = box[1] + 1
#__debug_show_page(page, spilter_bbox, [], [])
return spilter_bbox
"""
This is an advanced PyMuPDF utility for detecting multi-column pages.
It can be used in a shell script, or its main function can be imported and
invoked as descript below.
Features
---------
- Identify text belonging to (a variable number of) columns on the page.
- Text with different background color is handled separately, allowing for
easier treatment of side remarks, comment boxes, etc.
- Uses text block detection capability to identify text blocks and
uses the block bboxes as primary structuring principle.
- Supports ignoring footers via a footer margin parameter.
- Returns re-created text boundary boxes (integer coordinates), sorted ascending
by the top, then by the left coordinates.
Restrictions
-------------
- Only supporting horizontal, left-to-right text
- Returns a list of text boundary boxes - not the text itself. The caller is
expected to extract text from within the returned boxes.
- Text written above images is ignored altogether (option).
- This utility works as expected in most cases. The following situation cannot
be handled correctly:
* overlapping (non-disjoint) text blocks
* image captions are not recognized and are handled like normal text
Usage
------
- As a CLI shell command use
python multi_column.py input.pdf footer_margin
Where footer margin is the height of the bottom stripe to ignore on each page.
This code is intended to be modified according to your need.
- Use in a Python script as follows:
----------------------------------------------------------------------------------
from multi_column import column_boxes
# for each page execute
bboxes = column_boxes(page, footer_margin=50, no_image_text=True)
# bboxes is a list of fitz.IRect objects, that are sort ascending by their y0,
# then x0 coordinates. Their text content can be extracted by all PyMuPDF
# get_text() variants, like for instance the following:
for rect in bboxes:
print(page.get_text(clip=rect, sort=True))
----------------------------------------------------------------------------------
"""
import sys
from magic_pdf.libs.commons import fitz
def column_boxes(page, footer_margin=50, header_margin=50, no_image_text=True):
"""Determine bboxes which wrap a column."""
paths = page.get_drawings()
bboxes = []
# path rectangles
path_rects = []
# image bboxes
img_bboxes = []
# bboxes of non-horizontal text
# avoid when expanding horizontal text boxes
vert_bboxes = []
# compute relevant page area
clip = +page.rect
clip.y1 -= footer_margin # Remove footer area
clip.y0 += header_margin # Remove header area
def can_extend(temp, bb, bboxlist):
"""Determines whether rectangle 'temp' can be extended by 'bb'
without intersecting any of the rectangles contained in 'bboxlist'.
Items of bboxlist may be None if they have been removed.
Returns:
True if 'temp' has no intersections with items of 'bboxlist'.
"""
for b in bboxlist:
if not intersects_bboxes(temp, vert_bboxes) and (
b == None or b == bb or (temp & b).is_empty
):
continue
return False
return True
def in_bbox(bb, bboxes):
"""Return 1-based number if a bbox contains bb, else return 0."""
for i, bbox in enumerate(bboxes):
if bb in bbox:
return i + 1
return 0
def intersects_bboxes(bb, bboxes):
"""Return True if a bbox intersects bb, else return False."""
for bbox in bboxes:
if not (bb & bbox).is_empty:
return True
return False
def extend_right(bboxes, width, path_bboxes, vert_bboxes, img_bboxes):
"""Extend a bbox to the right page border.
Whenever there is no text to the right of a bbox, enlarge it up
to the right page border.
Args:
bboxes: (list[IRect]) bboxes to check
width: (int) page width
path_bboxes: (list[IRect]) bboxes with a background color
vert_bboxes: (list[IRect]) bboxes with vertical text
img_bboxes: (list[IRect]) bboxes of images
Returns:
Potentially modified bboxes.
"""
for i, bb in enumerate(bboxes):
# do not extend text with background color
if in_bbox(bb, path_bboxes):
continue
# do not extend text in images
if in_bbox(bb, img_bboxes):
continue
# temp extends bb to the right page border
temp = +bb
temp.x1 = width
# do not cut through colored background or images
if intersects_bboxes(temp, path_bboxes + vert_bboxes + img_bboxes):
continue
# also, do not intersect other text bboxes
check = can_extend(temp, bb, bboxes)
if check:
bboxes[i] = temp # replace with enlarged bbox
return [b for b in bboxes if b != None]
def clean_nblocks(nblocks):
"""Do some elementary cleaning."""
# 1. remove any duplicate blocks.
blen = len(nblocks)
if blen < 2:
return nblocks
start = blen - 1
for i in range(start, -1, -1):
bb1 = nblocks[i]
bb0 = nblocks[i - 1]
if bb0 == bb1:
del nblocks[i]
# 2. repair sequence in special cases:
# consecutive bboxes with almost same bottom value are sorted ascending
# by x-coordinate.
y1 = nblocks[0].y1 # first bottom coordinate
i0 = 0 # its index
i1 = -1 # index of last bbox with same bottom
# Iterate over bboxes, identifying segments with approx. same bottom value.
# Replace every segment by its sorted version.
for i in range(1, len(nblocks)):
b1 = nblocks[i]
if abs(b1.y1 - y1) > 10: # different bottom
if i1 > i0: # segment length > 1? Sort it!
nblocks[i0 : i1 + 1] = sorted(
nblocks[i0 : i1 + 1], key=lambda b: b.x0
)
y1 = b1.y1 # store new bottom value
i0 = i # store its start index
i1 = i # store current index
if i1 > i0: # segment waiting to be sorted
nblocks[i0 : i1 + 1] = sorted(nblocks[i0 : i1 + 1], key=lambda b: b.x0)
return nblocks
# extract vector graphics
for p in paths:
path_rects.append(p["rect"].irect)
path_bboxes = path_rects
# sort path bboxes by ascending top, then left coordinates
path_bboxes.sort(key=lambda b: (b.y0, b.x0))
# bboxes of images on page, no need to sort them
for item in page.get_images():
img_bboxes.extend(page.get_image_rects(item[0]))
# blocks of text on page
blocks = page.get_text(
"dict",
flags=fitz.TEXTFLAGS_TEXT,
clip=clip,
)["blocks"]
# Make block rectangles, ignoring non-horizontal text
for b in blocks:
bbox = fitz.IRect(b["bbox"]) # bbox of the block
# ignore text written upon images
if no_image_text and in_bbox(bbox, img_bboxes):
continue
# confirm first line to be horizontal
line0 = b["lines"][0] # get first line
if line0["dir"] != (1, 0): # only accept horizontal text
vert_bboxes.append(bbox)
continue
srect = fitz.EMPTY_IRECT()
for line in b["lines"]:
lbbox = fitz.IRect(line["bbox"])
text = "".join([s["text"].strip() for s in line["spans"]])
if len(text) > 1:
srect |= lbbox
bbox = +srect
if not bbox.is_empty:
bboxes.append(bbox)
# Sort text bboxes by ascending background, top, then left coordinates
bboxes.sort(key=lambda k: (in_bbox(k, path_bboxes), k.y0, k.x0))
# Extend bboxes to the right where possible
bboxes = extend_right(
bboxes, int(page.rect.width), path_bboxes, vert_bboxes, img_bboxes
)
# immediately return of no text found
if bboxes == []:
return []
# --------------------------------------------------------------------
# Join bboxes to establish some column structure
# --------------------------------------------------------------------
# the final block bboxes on page
nblocks = [bboxes[0]] # pre-fill with first bbox
bboxes = bboxes[1:] # remaining old bboxes
for i, bb in enumerate(bboxes): # iterate old bboxes
check = False # indicates unwanted joins
# check if bb can extend one of the new blocks
for j in range(len(nblocks)):
nbb = nblocks[j] # a new block
# never join across columns
if bb == None or nbb.x1 < bb.x0 or bb.x1 < nbb.x0:
continue
# never join across different background colors
if in_bbox(nbb, path_bboxes) != in_bbox(bb, path_bboxes):
continue
temp = bb | nbb # temporary extension of new block
check = can_extend(temp, nbb, nblocks)
if check == True:
break
if not check: # bb cannot be used to extend any of the new bboxes
nblocks.append(bb) # so add it to the list
j = len(nblocks) - 1 # index of it
temp = nblocks[j] # new bbox added
# check if some remaining bbox is contained in temp
check = can_extend(temp, bb, bboxes)
if check == False:
nblocks.append(bb)
else:
nblocks[j] = temp
bboxes[i] = None
# do some elementary cleaning
nblocks = clean_nblocks(nblocks)
# return identified text bboxes
return nblocks
if __name__ == "__main__":
"""Only for debugging purposes, currently.
Draw red borders around the returned text bboxes and insert
the bbox number.
Then save the file under the name "input-blocks.pdf".
"""
# get the file name
filename = sys.argv[1]
# check if footer margin is given
if len(sys.argv) > 2:
footer_margin = int(sys.argv[2])
else: # use default vaue
footer_margin = 50
# check if header margin is given
if len(sys.argv) > 3:
header_margin = int(sys.argv[3])
else: # use default vaue
header_margin = 50
# open document
doc = fitz.open(filename)
# iterate over the pages
for page in doc:
# remove any geometry issues
page.wrap_contents()
# get the text bboxes
bboxes = column_boxes(page, footer_margin=footer_margin, header_margin=header_margin)
# prepare a canvas to draw rectangles and text
shape = page.new_shape()
# iterate over the bboxes
for i, rect in enumerate(bboxes):
shape.draw_rect(rect) # draw a border
# write sequence number
shape.insert_text(rect.tl + (5, 15), str(i), color=fitz.pdfcolor["red"])
# finish drawing / text with color red
shape.finish(color=fitz.pdfcolor["red"])
shape.commit() # store to the page
# save document with text bboxes
doc.ez_save(filename.replace(".pdf", "-blocks.pdf"))
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment