Commit 9ce72d78 authored by myhloli's avatar myhloli
Browse files

Merge remote-tracking branch 'origin/dev' into dev

parents 59435d88 27281c92
from .configuration_unimer_swin import UnimerSwinConfig
from .modeling_unimer_swin import UnimerSwinModel
from .image_processing_unimer_swin import UnimerSwinImageProcessor
__all__ = [
"UnimerSwinConfig",
"UnimerSwinModel",
"UnimerSwinImageProcessor",
]
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Donut Swin Transformer model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class UnimerSwinConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`UnimerSwinModel`]. It is used to instantiate a
Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Donut
[naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import UnimerSwinConfig, UnimerSwinModel
>>> # Initializing a Donut naver-clova-ix/donut-base style configuration
>>> configuration = UnimerSwinConfig()
>>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration
>>> model = UnimerSwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "unimer-swin"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
from transformers.image_processing_utils import BaseImageProcessor
import numpy as np
import cv2
import albumentations as alb
from albumentations.pytorch import ToTensorV2
# TODO: dereference cv2 if possible
class UnimerSwinImageProcessor(BaseImageProcessor):
def __init__(
self,
image_size = (192, 672),
):
self.input_size = [int(_) for _ in image_size]
assert len(self.input_size) == 2
self.transform = alb.Compose(
[
alb.ToGray(always_apply=True),
alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)),
# alb.Sharpen()
ToTensorV2(),
]
)
def __call__(self, item):
image = self.prepare_input(item)
return self.transform(image=image)['image'][:1]
@staticmethod
def crop_margin_numpy(img: np.ndarray) -> np.ndarray:
"""Crop margins of image using NumPy operations"""
# Convert to grayscale if it's a color image
if len(img.shape) == 3 and img.shape[2] == 3:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
else:
gray = img.copy()
# Normalize and threshold
if gray.max() == gray.min():
return img
normalized = (((gray - gray.min()) / (gray.max() - gray.min())) * 255).astype(np.uint8)
binary = 255 * (normalized < 200).astype(np.uint8)
# Find bounding box
coords = cv2.findNonZero(binary) # Find all non-zero points (text)
x, y, w, h = cv2.boundingRect(coords) # Find minimum spanning bounding box
# Return cropped image
return img[y:y + h, x:x + w]
def prepare_input(self, img, random_padding: bool = False):
"""
Convert PIL Image or numpy array to properly sized and padded image after:
- crop margins
- resize while maintaining aspect ratio
- pad to target size
"""
if img is None:
return None
try:
img = self.crop_margin_numpy(img)
except Exception:
# might throw an error for broken files
return None
if img.shape[0] == 0 or img.shape[1] == 0:
return None
# Resize while preserving aspect ratio
h, w = img.shape[:2]
scale = min(self.input_size[0] / h, self.input_size[1] / w)
new_h, new_w = int(h * scale), int(w * scale)
resized_img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_AREA)
# Calculate padding
pad_width, pad_height = self._get_padding_values(new_w, new_h, random_padding)
# Create and apply padding
channels = 3 if len(img.shape) == 3 else 1
padded_img = np.full((self.input_size[0], self.input_size[1], channels), 255, dtype=np.uint8)
padded_img[pad_height:pad_height + new_h, pad_width:pad_width + new_w] = resized_img
return padded_img
def _calculate_padding(self, new_w, new_h, random_padding):
"""Calculate padding values for PIL images"""
delta_width = self.input_size[1] - new_w
delta_height = self.input_size[0] - new_h
pad_width, pad_height = self._get_padding_values(new_w, new_h, random_padding)
return (
pad_width,
pad_height,
delta_width - pad_width,
delta_height - pad_height,
)
def _get_padding_values(self, new_w, new_h, random_padding):
"""Get padding values based on image dimensions and padding strategy"""
delta_width = self.input_size[1] - new_w
delta_height = self.input_size[0] - new_h
if random_padding:
pad_width = np.random.randint(low=0, high=delta_width + 1)
pad_height = np.random.randint(low=0, high=delta_height + 1)
else:
pad_width = delta_width // 2
pad_height = delta_height // 2
return pad_width, pad_height
...@@ -5,12 +5,13 @@ from magic_pdf.config.constants import MODEL_NAME ...@@ -5,12 +5,13 @@ from magic_pdf.config.constants import MODEL_NAME
from magic_pdf.model.model_list import AtomicModel from magic_pdf.model.model_list import AtomicModel
from magic_pdf.model.sub_modules.language_detection.yolov11.YOLOv11 import YOLOv11LangDetModel from magic_pdf.model.sub_modules.language_detection.yolov11.YOLOv11 import YOLOv11LangDetModel
from magic_pdf.model.sub_modules.layout.doclayout_yolo.DocLayoutYOLO import DocLayoutYOLOModel from magic_pdf.model.sub_modules.layout.doclayout_yolo.DocLayoutYOLO import DocLayoutYOLOModel
from magic_pdf.model.sub_modules.layout.layoutlmv3.model_init import Layoutlmv3_Predictor
from magic_pdf.model.sub_modules.mfd.yolov8.YOLOv8 import YOLOv8MFDModel from magic_pdf.model.sub_modules.mfd.yolov8.YOLOv8 import YOLOv8MFDModel
from magic_pdf.model.sub_modules.mfr.unimernet.Unimernet import UnimernetModel from magic_pdf.model.sub_modules.mfr.unimernet.Unimernet import UnimernetModel
try: try:
from magic_pdf_ascend_plugin.libs.license_verifier import load_license, LicenseFormatError, LicenseSignatureError, LicenseExpiredError from magic_pdf_ascend_plugin.libs.license_verifier import (
LicenseExpiredError, LicenseFormatError, LicenseSignatureError,
load_license)
from magic_pdf_ascend_plugin.model_plugin.ocr.paddleocr.ppocr_273_npu import ModifiedPaddleOCR from magic_pdf_ascend_plugin.model_plugin.ocr.paddleocr.ppocr_273_npu import ModifiedPaddleOCR
from magic_pdf_ascend_plugin.model_plugin.table.rapidtable.rapid_table_npu import RapidTableModel from magic_pdf_ascend_plugin.model_plugin.table.rapidtable.rapid_table_npu import RapidTableModel
license_key = load_license() license_key = load_license()
...@@ -20,26 +21,26 @@ except Exception as e: ...@@ -20,26 +21,26 @@ except Exception as e:
if isinstance(e, ImportError): if isinstance(e, ImportError):
pass pass
elif isinstance(e, LicenseFormatError): elif isinstance(e, LicenseFormatError):
logger.error("Ascend Plugin: Invalid license format. Please check the license file.") logger.error('Ascend Plugin: Invalid license format. Please check the license file.')
elif isinstance(e, LicenseSignatureError): elif isinstance(e, LicenseSignatureError):
logger.error("Ascend Plugin: Invalid signature. The license may be tampered with.") logger.error('Ascend Plugin: Invalid signature. The license may be tampered with.')
elif isinstance(e, LicenseExpiredError): elif isinstance(e, LicenseExpiredError):
logger.error("Ascend Plugin: License has expired. Please renew your license.") logger.error('Ascend Plugin: License has expired. Please renew your license.')
elif isinstance(e, FileNotFoundError): elif isinstance(e, FileNotFoundError):
logger.error("Ascend Plugin: Not found License file.") logger.error('Ascend Plugin: Not found License file.')
else: else:
logger.error(f"Ascend Plugin: {e}") logger.error(f'Ascend Plugin: {e}')
from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_273_mod import ModifiedPaddleOCR from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_273_mod import ModifiedPaddleOCR
# from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_291_mod import ModifiedPaddleOCR # from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_291_mod import ModifiedPaddleOCR
from magic_pdf.model.sub_modules.table.rapidtable.rapid_table import RapidTableModel from magic_pdf.model.sub_modules.table.rapidtable.rapid_table import RapidTableModel
from magic_pdf.model.sub_modules.table.structeqtable.struct_eqtable import StructTableModel
from magic_pdf.model.sub_modules.table.tablemaster.tablemaster_paddle import TableMasterPaddleModel
def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr_engine=None, table_sub_model_name=None): def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr_engine=None, table_sub_model_name=None):
if table_model_type == MODEL_NAME.STRUCT_EQTABLE: if table_model_type == MODEL_NAME.STRUCT_EQTABLE:
from magic_pdf.model.sub_modules.table.structeqtable.struct_eqtable import StructTableModel
table_model = StructTableModel(model_path, max_new_tokens=2048, max_time=max_time) table_model = StructTableModel(model_path, max_new_tokens=2048, max_time=max_time)
elif table_model_type == MODEL_NAME.TABLE_MASTER: elif table_model_type == MODEL_NAME.TABLE_MASTER:
from magic_pdf.model.sub_modules.table.tablemaster.tablemaster_paddle import TableMasterPaddleModel
config = { config = {
'model_dir': model_path, 'model_dir': model_path,
'device': _device_ 'device': _device_
...@@ -55,7 +56,7 @@ def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr ...@@ -55,7 +56,7 @@ def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr
def mfd_model_init(weight, device='cpu'): def mfd_model_init(weight, device='cpu'):
if str(device).startswith("npu"): if str(device).startswith('npu'):
device = torch.device(device) device = torch.device(device)
mfd_model = YOLOv8MFDModel(weight, device) mfd_model = YOLOv8MFDModel(weight, device)
return mfd_model return mfd_model
...@@ -67,19 +68,20 @@ def mfr_model_init(weight_dir, cfg_path, device='cpu'): ...@@ -67,19 +68,20 @@ def mfr_model_init(weight_dir, cfg_path, device='cpu'):
def layout_model_init(weight, config_file, device): def layout_model_init(weight, config_file, device):
from magic_pdf.model.sub_modules.layout.layoutlmv3.model_init import Layoutlmv3_Predictor
model = Layoutlmv3_Predictor(weight, config_file, device) model = Layoutlmv3_Predictor(weight, config_file, device)
return model return model
def doclayout_yolo_model_init(weight, device='cpu'): def doclayout_yolo_model_init(weight, device='cpu'):
if str(device).startswith("npu"): if str(device).startswith('npu'):
device = torch.device(device) device = torch.device(device)
model = DocLayoutYOLOModel(weight, device) model = DocLayoutYOLOModel(weight, device)
return model return model
def langdetect_model_init(langdetect_model_weight, device='cpu'): def langdetect_model_init(langdetect_model_weight, device='cpu'):
if str(device).startswith("npu"): if str(device).startswith('npu'):
device = torch.device(device) device = torch.device(device)
model = YOLOv11LangDetModel(langdetect_model_weight, device) model = YOLOv11LangDetModel(langdetect_model_weight, device)
return model return model
......
import time import time
import torch import torch
from PIL import Image
from loguru import logger from loguru import logger
import numpy as np
from magic_pdf.libs.clean_memory import clean_memory from magic_pdf.libs.clean_memory import clean_memory
def crop_img(input_res, input_pil_img, crop_paste_x=0, crop_paste_y=0): def crop_img(input_res, input_np_img, crop_paste_x=0, crop_paste_y=0):
crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1]) crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1])
crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5]) crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5])
# Create a white background with an additional width and height of 50
# Calculate new dimensions
crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2 crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2
crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2 crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2
return_image = Image.new('RGB', (crop_new_width, crop_new_height), 'white')
# Crop image # Create a white background array
crop_box = (crop_xmin, crop_ymin, crop_xmax, crop_ymax) return_image = np.ones((crop_new_height, crop_new_width, 3), dtype=np.uint8) * 255
cropped_img = input_pil_img.crop(crop_box)
return_image.paste(cropped_img, (crop_paste_x, crop_paste_y)) # Crop the original image using numpy slicing
return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width, crop_new_height] cropped_img = input_np_img[crop_ymin:crop_ymax, crop_xmin:crop_xmax]
# Paste the cropped image onto the white background
return_image[crop_paste_y:crop_paste_y + (crop_ymax - crop_ymin),
crop_paste_x:crop_paste_x + (crop_xmax - crop_xmin)] = cropped_img
return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width,
crop_new_height]
return return_image, return_list return return_image, return_list
......
...@@ -5,6 +5,7 @@ import cv2 ...@@ -5,6 +5,7 @@ import cv2
import numpy as np import numpy as np
import torch import torch
from paddleocr import PaddleOCR from paddleocr import PaddleOCR
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
from ppocr.utils.utility import alpha_to_color, binarize_img from ppocr.utils.utility import alpha_to_color, binarize_img
......
...@@ -2,6 +2,7 @@ import os ...@@ -2,6 +2,7 @@ import os
import cv2 import cv2
import numpy as np import numpy as np
from paddleocr import PaddleOCR
from ppstructure.table.predict_table import TableSystem from ppstructure.table.predict_table import TableSystem
from ppstructure.utility import init_args from ppstructure.utility import init_args
from PIL import Image from PIL import Image
......
...@@ -492,7 +492,7 @@ def insert_lines_into_block(block_bbox, line_height, page_w, page_h): ...@@ -492,7 +492,7 @@ def insert_lines_into_block(block_bbox, line_height, page_w, page_h):
else: else:
return [[x0, y0, x1, y1]] return [[x0, y0, x1, y1]]
# @measure_time
def sort_lines_by_model(fix_blocks, page_w, page_h, line_height): def sort_lines_by_model(fix_blocks, page_w, page_h, line_height):
page_line_list = [] page_line_list = []
......
...@@ -2,7 +2,7 @@ weights: ...@@ -2,7 +2,7 @@ weights:
layoutlmv3: Layout/LayoutLMv3/model_final.pth layoutlmv3: Layout/LayoutLMv3/model_final.pth
doclayout_yolo: Layout/YOLO/doclayout_yolo_docstructbench_imgsz1280_2501.pt doclayout_yolo: Layout/YOLO/doclayout_yolo_docstructbench_imgsz1280_2501.pt
yolo_v8_mfd: MFD/YOLO/yolo_v8_ft.pt yolo_v8_mfd: MFD/YOLO/yolo_v8_ft.pt
unimernet_small: MFR/unimernet_small_2501 unimernet_small: MFR/unimernet_hf_small_2503
struct_eqtable: TabRec/StructEqTable struct_eqtable: TabRec/StructEqTable
tablemaster: TabRec/TableMaster tablemaster: TabRec/TableMaster
rapid_table: TabRec/RapidTable rapid_table: TabRec/RapidTable
\ No newline at end of file
import os import os
import shutil import shutil
import tempfile import tempfile
from pathlib import Path
import click import click
import fitz import fitz
from loguru import logger from loguru import logger
from pathlib import Path
import magic_pdf.model as model_config import magic_pdf.model as model_config
from magic_pdf.data.batch_build_dataset import batch_build_dataset
from magic_pdf.data.data_reader_writer import FileBasedDataReader from magic_pdf.data.data_reader_writer import FileBasedDataReader
from magic_pdf.data.dataset import Dataset
from magic_pdf.libs.version import __version__ from magic_pdf.libs.version import __version__
from magic_pdf.tools.common import do_parse, parse_pdf_methods from magic_pdf.tools.common import batch_do_parse, do_parse, parse_pdf_methods
from magic_pdf.utils.office_to_pdf import convert_file_to_pdf from magic_pdf.utils.office_to_pdf import convert_file_to_pdf
pdf_suffixes = ['.pdf'] pdf_suffixes = ['.pdf']
...@@ -94,30 +97,33 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id): ...@@ -94,30 +97,33 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id):
def read_fn(path: Path): def read_fn(path: Path):
if path.suffix in ms_office_suffixes: if path.suffix in ms_office_suffixes:
convert_file_to_pdf(str(path), temp_dir) convert_file_to_pdf(str(path), temp_dir)
fn = os.path.join(temp_dir, f"{path.stem}.pdf") fn = os.path.join(temp_dir, f'{path.stem}.pdf')
elif path.suffix in image_suffixes: elif path.suffix in image_suffixes:
with open(str(path), 'rb') as f: with open(str(path), 'rb') as f:
bits = f.read() bits = f.read()
pdf_bytes = fitz.open(stream=bits).convert_to_pdf() pdf_bytes = fitz.open(stream=bits).convert_to_pdf()
fn = os.path.join(temp_dir, f"{path.stem}.pdf") fn = os.path.join(temp_dir, f'{path.stem}.pdf')
with open(fn, 'wb') as f: with open(fn, 'wb') as f:
f.write(pdf_bytes) f.write(pdf_bytes)
elif path.suffix in pdf_suffixes: elif path.suffix in pdf_suffixes:
fn = str(path) fn = str(path)
else: else:
raise Exception(f"Unknown file suffix: {path.suffix}") raise Exception(f'Unknown file suffix: {path.suffix}')
disk_rw = FileBasedDataReader(os.path.dirname(fn)) disk_rw = FileBasedDataReader(os.path.dirname(fn))
return disk_rw.read(os.path.basename(fn)) return disk_rw.read(os.path.basename(fn))
def parse_doc(doc_path: Path): def parse_doc(doc_path: Path, dataset: Dataset | None = None):
try: try:
file_name = str(Path(doc_path).stem) file_name = str(Path(doc_path).stem)
pdf_data = read_fn(doc_path) if dataset is None:
pdf_data_or_dataset = read_fn(doc_path)
else:
pdf_data_or_dataset = dataset
do_parse( do_parse(
output_dir, output_dir,
file_name, file_name,
pdf_data, pdf_data_or_dataset,
[], [],
method, method,
debug_able, debug_able,
...@@ -130,9 +136,12 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id): ...@@ -130,9 +136,12 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id):
logger.exception(e) logger.exception(e)
if os.path.isdir(path): if os.path.isdir(path):
doc_paths = []
for doc_path in Path(path).glob('*'): for doc_path in Path(path).glob('*'):
if doc_path.suffix in pdf_suffixes + image_suffixes + ms_office_suffixes: if doc_path.suffix in pdf_suffixes + image_suffixes + ms_office_suffixes:
parse_doc(doc_path) doc_paths.append(doc_path)
datasets = batch_build_dataset(doc_paths, 4, lang)
batch_do_parse(output_dir, [str(doc_path.stem) for doc_path in doc_paths], datasets, method, debug_able, lang=lang)
else: else:
parse_doc(Path(path)) parse_doc(Path(path))
......
...@@ -8,10 +8,10 @@ import magic_pdf.model as model_config ...@@ -8,10 +8,10 @@ import magic_pdf.model as model_config
from magic_pdf.config.enums import SupportedPdfParseMethod from magic_pdf.config.enums import SupportedPdfParseMethod
from magic_pdf.config.make_content_config import DropMode, MakeMode from magic_pdf.config.make_content_config import DropMode, MakeMode
from magic_pdf.data.data_reader_writer import FileBasedDataWriter from magic_pdf.data.data_reader_writer import FileBasedDataWriter
from magic_pdf.data.dataset import PymuDocDataset from magic_pdf.data.dataset import Dataset, PymuDocDataset
from magic_pdf.libs.draw_bbox import draw_char_bbox from magic_pdf.libs.draw_bbox import draw_char_bbox
from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze from magic_pdf.model.doc_analyze_by_custom_model import (batch_doc_analyze,
from magic_pdf.operators.models import InferenceResult doc_analyze)
# from io import BytesIO # from io import BytesIO
# from pypdf import PdfReader, PdfWriter # from pypdf import PdfReader, PdfWriter
...@@ -67,10 +67,10 @@ def convert_pdf_bytes_to_bytes_by_pymupdf(pdf_bytes, start_page_id=0, end_page_i ...@@ -67,10 +67,10 @@ def convert_pdf_bytes_to_bytes_by_pymupdf(pdf_bytes, start_page_id=0, end_page_i
return output_bytes return output_bytes
def do_parse( def _do_parse(
output_dir, output_dir,
pdf_file_name, pdf_file_name,
pdf_bytes, pdf_bytes_or_dataset,
model_list, model_list,
parse_method, parse_method,
debug_able, debug_able,
...@@ -92,16 +92,21 @@ def do_parse( ...@@ -92,16 +92,21 @@ def do_parse(
formula_enable=None, formula_enable=None,
table_enable=None, table_enable=None,
): ):
from magic_pdf.operators.models import InferenceResult
if debug_able: if debug_able:
logger.warning('debug mode is on') logger.warning('debug mode is on')
f_draw_model_bbox = True f_draw_model_bbox = True
f_draw_line_sort_bbox = True f_draw_line_sort_bbox = True
# f_draw_char_bbox = True # f_draw_char_bbox = True
if isinstance(pdf_bytes_or_dataset, bytes):
pdf_bytes = convert_pdf_bytes_to_bytes_by_pymupdf( pdf_bytes = convert_pdf_bytes_to_bytes_by_pymupdf(
pdf_bytes, start_page_id, end_page_id pdf_bytes_or_dataset, start_page_id, end_page_id
) )
ds = PymuDocDataset(pdf_bytes, lang=lang)
else:
ds = pdf_bytes_or_dataset
pdf_bytes = ds._raw_data
local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method) local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter( image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(
...@@ -109,8 +114,6 @@ def do_parse( ...@@ -109,8 +114,6 @@ def do_parse(
) )
image_dir = str(os.path.basename(local_image_dir)) image_dir = str(os.path.basename(local_image_dir))
ds = PymuDocDataset(pdf_bytes, lang=lang)
if len(model_list) == 0: if len(model_list) == 0:
if model_config.__use_inside_model__: if model_config.__use_inside_model__:
if parse_method == 'auto': if parse_method == 'auto':
...@@ -241,5 +244,79 @@ def do_parse( ...@@ -241,5 +244,79 @@ def do_parse(
logger.info(f'local output dir is {local_md_dir}') logger.info(f'local output dir is {local_md_dir}')
def do_parse(
output_dir,
pdf_file_name,
pdf_bytes_or_dataset,
model_list,
parse_method,
debug_able,
f_draw_span_bbox=True,
f_draw_layout_bbox=True,
f_dump_md=True,
f_dump_middle_json=True,
f_dump_model_json=True,
f_dump_orig_pdf=True,
f_dump_content_list=True,
f_make_md_mode=MakeMode.MM_MD,
f_draw_model_bbox=False,
f_draw_line_sort_bbox=False,
f_draw_char_bbox=False,
start_page_id=0,
end_page_id=None,
lang=None,
layout_model=None,
formula_enable=None,
table_enable=None,
):
parallel_count = 1
if os.environ.get('MINERU_PARALLEL_INFERENCE_COUNT'):
parallel_count = int(os.environ['MINERU_PARALLEL_INFERENCE_COUNT'])
if parallel_count > 1:
if isinstance(pdf_bytes_or_dataset, bytes):
pdf_bytes = convert_pdf_bytes_to_bytes_by_pymupdf(
pdf_bytes_or_dataset, start_page_id, end_page_id
)
ds = PymuDocDataset(pdf_bytes, lang=lang)
else:
ds = pdf_bytes_or_dataset
batch_do_parse(output_dir, [pdf_file_name], [ds], parse_method, debug_able, f_draw_span_bbox=f_draw_span_bbox, f_draw_layout_bbox=f_draw_layout_bbox, f_dump_md=f_dump_md, f_dump_middle_json=f_dump_middle_json, f_dump_model_json=f_dump_model_json, f_dump_orig_pdf=f_dump_orig_pdf, f_dump_content_list=f_dump_content_list, f_make_md_mode=f_make_md_mode, f_draw_model_bbox=f_draw_model_bbox, f_draw_line_sort_bbox=f_draw_line_sort_bbox, f_draw_char_bbox=f_draw_char_bbox)
else:
_do_parse(output_dir, pdf_file_name, pdf_bytes_or_dataset, model_list, parse_method, debug_able, start_page_id=start_page_id, end_page_id=end_page_id, lang=lang, layout_model=layout_model, formula_enable=formula_enable, table_enable=table_enable, f_draw_span_bbox=f_draw_span_bbox, f_draw_layout_bbox=f_draw_layout_bbox, f_dump_md=f_dump_md, f_dump_middle_json=f_dump_middle_json, f_dump_model_json=f_dump_model_json, f_dump_orig_pdf=f_dump_orig_pdf, f_dump_content_list=f_dump_content_list, f_make_md_mode=f_make_md_mode, f_draw_model_bbox=f_draw_model_bbox, f_draw_line_sort_bbox=f_draw_line_sort_bbox, f_draw_char_bbox=f_draw_char_bbox)
def batch_do_parse(
output_dir,
pdf_file_names: list[str],
pdf_bytes_or_datasets: list[bytes | Dataset],
parse_method,
debug_able,
f_draw_span_bbox=True,
f_draw_layout_bbox=True,
f_dump_md=True,
f_dump_middle_json=True,
f_dump_model_json=True,
f_dump_orig_pdf=True,
f_dump_content_list=True,
f_make_md_mode=MakeMode.MM_MD,
f_draw_model_bbox=False,
f_draw_line_sort_bbox=False,
f_draw_char_bbox=False,
lang=None,
layout_model=None,
formula_enable=None,
table_enable=None,
):
dss = []
for v in pdf_bytes_or_datasets:
if isinstance(v, bytes):
dss.append(PymuDocDataset(v, lang=lang))
else:
dss.append(v)
infer_results = batch_doc_analyze(dss, lang=lang, layout_model=layout_model, formula_enable=formula_enable, table_enable=table_enable)
for idx, infer_result in enumerate(infer_results):
_do_parse(output_dir, pdf_file_names[idx], dss[idx], infer_result.get_infer_res(), parse_method, debug_able, f_draw_span_bbox=f_draw_span_bbox, f_draw_layout_bbox=f_draw_layout_bbox, f_dump_md=f_dump_md, f_dump_middle_json=f_dump_middle_json, f_dump_model_json=f_dump_model_json, f_dump_orig_pdf=f_dump_orig_pdf, f_dump_content_list=f_dump_content_list, f_make_md_mode=f_make_md_mode, f_draw_model_bbox=f_draw_model_bbox, f_draw_line_sort_bbox=f_draw_line_sort_bbox, f_draw_char_bbox=f_draw_char_bbox)
parse_pdf_methods = click.Choice(['ocr', 'txt', 'auto']) parse_pdf_methods = click.Choice(['ocr', 'txt', 'auto'])
...@@ -3,6 +3,7 @@ import os ...@@ -3,6 +3,7 @@ import os
from base64 import b64encode from base64 import b64encode
from glob import glob from glob import glob
from io import StringIO from io import StringIO
import tempfile
from typing import Tuple, Union from typing import Tuple, Union
import uvicorn import uvicorn
...@@ -10,11 +11,12 @@ from fastapi import FastAPI, HTTPException, UploadFile ...@@ -10,11 +11,12 @@ from fastapi import FastAPI, HTTPException, UploadFile
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from loguru import logger from loguru import logger
from magic_pdf.data.read_api import read_local_images, read_local_office
import magic_pdf.model as model_config import magic_pdf.model as model_config
from magic_pdf.config.enums import SupportedPdfParseMethod from magic_pdf.config.enums import SupportedPdfParseMethod
from magic_pdf.data.data_reader_writer import DataWriter, FileBasedDataWriter from magic_pdf.data.data_reader_writer import DataWriter, FileBasedDataWriter
from magic_pdf.data.data_reader_writer.s3 import S3DataReader, S3DataWriter from magic_pdf.data.data_reader_writer.s3 import S3DataReader, S3DataWriter
from magic_pdf.data.dataset import PymuDocDataset from magic_pdf.data.dataset import ImageDataset, PymuDocDataset
from magic_pdf.libs.config_reader import get_bucket_name, get_s3_config from magic_pdf.libs.config_reader import get_bucket_name, get_s3_config
from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
from magic_pdf.operators.models import InferenceResult from magic_pdf.operators.models import InferenceResult
...@@ -24,6 +26,9 @@ model_config.__use_inside_model__ = True ...@@ -24,6 +26,9 @@ model_config.__use_inside_model__ = True
app = FastAPI() app = FastAPI()
pdf_extensions = [".pdf"]
office_extensions = [".ppt", ".pptx", ".doc", ".docx"]
image_extensions = [".png", ".jpg"]
class MemoryDataWriter(DataWriter): class MemoryDataWriter(DataWriter):
def __init__(self): def __init__(self):
...@@ -46,8 +51,8 @@ class MemoryDataWriter(DataWriter): ...@@ -46,8 +51,8 @@ class MemoryDataWriter(DataWriter):
def init_writers( def init_writers(
pdf_path: str = None, file_path: str = None,
pdf_file: UploadFile = None, file: UploadFile = None,
output_path: str = None, output_path: str = None,
output_image_path: str = None, output_image_path: str = None,
) -> Tuple[ ) -> Tuple[
...@@ -59,19 +64,19 @@ def init_writers( ...@@ -59,19 +64,19 @@ def init_writers(
Initialize writers based on path type Initialize writers based on path type
Args: Args:
pdf_path: PDF file path (local path or S3 path) file_path: file path (local path or S3 path)
pdf_file: Uploaded PDF file object file: Uploaded file object
output_path: Output directory path output_path: Output directory path
output_image_path: Image output directory path output_image_path: Image output directory path
Returns: Returns:
Tuple[writer, image_writer, pdf_bytes]: Returns initialized writer tuple and PDF Tuple[writer, image_writer, file_bytes]: Returns initialized writer tuple and file content
file content
""" """
if pdf_path: file_extension:str = None
is_s3_path = pdf_path.startswith("s3://") if file_path:
is_s3_path = file_path.startswith("s3://")
if is_s3_path: if is_s3_path:
bucket = get_bucket_name(pdf_path) bucket = get_bucket_name(file_path)
ak, sk, endpoint = get_s3_config(bucket) ak, sk, endpoint = get_s3_config(bucket)
writer = S3DataWriter( writer = S3DataWriter(
...@@ -84,25 +89,29 @@ def init_writers( ...@@ -84,25 +89,29 @@ def init_writers(
temp_reader = S3DataReader( temp_reader = S3DataReader(
"", bucket=bucket, ak=ak, sk=sk, endpoint_url=endpoint "", bucket=bucket, ak=ak, sk=sk, endpoint_url=endpoint
) )
pdf_bytes = temp_reader.read(pdf_path) file_bytes = temp_reader.read(file_path)
file_extension = os.path.splitext(file_path)[1]
else: else:
writer = FileBasedDataWriter(output_path) writer = FileBasedDataWriter(output_path)
image_writer = FileBasedDataWriter(output_image_path) image_writer = FileBasedDataWriter(output_image_path)
os.makedirs(output_image_path, exist_ok=True) os.makedirs(output_image_path, exist_ok=True)
with open(pdf_path, "rb") as f: with open(file_path, "rb") as f:
pdf_bytes = f.read() file_bytes = f.read()
file_extension = os.path.splitext(file_path)[1]
else: else:
# 处理上传的文件 # 处理上传的文件
pdf_bytes = pdf_file.file.read() file_bytes = file.file.read()
file_extension = os.path.splitext(file.filename)[1]
writer = FileBasedDataWriter(output_path) writer = FileBasedDataWriter(output_path)
image_writer = FileBasedDataWriter(output_image_path) image_writer = FileBasedDataWriter(output_image_path)
os.makedirs(output_image_path, exist_ok=True) os.makedirs(output_image_path, exist_ok=True)
return writer, image_writer, pdf_bytes return writer, image_writer, file_bytes, file_extension
def process_pdf( def process_file(
pdf_bytes: bytes, file_bytes: bytes,
file_extension: str,
parse_method: str, parse_method: str,
image_writer: Union[S3DataWriter, FileBasedDataWriter], image_writer: Union[S3DataWriter, FileBasedDataWriter],
) -> Tuple[InferenceResult, PipeResult]: ) -> Tuple[InferenceResult, PipeResult]:
...@@ -110,14 +119,30 @@ def process_pdf( ...@@ -110,14 +119,30 @@ def process_pdf(
Process PDF file content Process PDF file content
Args: Args:
pdf_bytes: Binary content of PDF file file_bytes: Binary content of file
file_extension: file extension
parse_method: Parse method ('ocr', 'txt', 'auto') parse_method: Parse method ('ocr', 'txt', 'auto')
image_writer: Image writer image_writer: Image writer
Returns: Returns:
Tuple[InferenceResult, PipeResult]: Returns inference result and pipeline result Tuple[InferenceResult, PipeResult]: Returns inference result and pipeline result
""" """
ds = PymuDocDataset(pdf_bytes)
ds = Union[PymuDocDataset, ImageDataset]
if file_extension in pdf_extensions:
ds = PymuDocDataset(file_bytes)
elif file_extension in office_extensions:
# 需要使用office解析
temp_dir = tempfile.mkdtemp()
with open(os.path.join(temp_dir, f"temp_file.{file_extension}"), "wb") as f:
f.write(file_bytes)
ds = read_local_office(temp_dir)[0]
elif file_extension in image_extensions:
# 需要使用ocr解析
temp_dir = tempfile.mkdtemp()
with open(os.path.join(temp_dir, f"temp_file.{file_extension}"), "wb") as f:
f.write(file_bytes)
ds = read_local_images(temp_dir)[0]
infer_result: InferenceResult = None infer_result: InferenceResult = None
pipe_result: PipeResult = None pipe_result: PipeResult = None
...@@ -145,13 +170,13 @@ def encode_image(image_path: str) -> str: ...@@ -145,13 +170,13 @@ def encode_image(image_path: str) -> str:
@app.post( @app.post(
"/pdf_parse", "/file_parse",
tags=["projects"], tags=["projects"],
summary="Parse PDF files (supports local files and S3)", summary="Parse files (supports local files and S3)",
) )
async def pdf_parse( async def file_parse(
pdf_file: UploadFile = None, file: UploadFile = None,
pdf_path: str = None, file_path: str = None,
parse_method: str = "auto", parse_method: str = "auto",
is_json_md_dump: bool = False, is_json_md_dump: bool = False,
output_dir: str = "output", output_dir: str = "output",
...@@ -165,10 +190,10 @@ async def pdf_parse( ...@@ -165,10 +190,10 @@ async def pdf_parse(
to the specified directory. to the specified directory.
Args: Args:
pdf_file: The PDF file to be parsed. Must not be specified together with file: The PDF file to be parsed. Must not be specified together with
`pdf_path` `file_path`
pdf_path: The path to the PDF file to be parsed. Must not be specified together file_path: The path to the PDF file to be parsed. Must not be specified together
with `pdf_file` with `file`
parse_method: Parsing method, can be auto, ocr, or txt. Default is auto. If parse_method: Parsing method, can be auto, ocr, or txt. Default is auto. If
results are not satisfactory, try ocr results are not satisfactory, try ocr
is_json_md_dump: Whether to write parsed data to .json and .md files. Default is_json_md_dump: Whether to write parsed data to .json and .md files. Default
...@@ -181,31 +206,31 @@ async def pdf_parse( ...@@ -181,31 +206,31 @@ async def pdf_parse(
return_content_list: Whether to return parsed PDF content list. Default to False return_content_list: Whether to return parsed PDF content list. Default to False
""" """
try: try:
if (pdf_file is None and pdf_path is None) or ( if (file is None and file_path is None) or (
pdf_file is not None and pdf_path is not None file is not None and file_path is not None
): ):
return JSONResponse( return JSONResponse(
content={"error": "Must provide either pdf_file or pdf_path"}, content={"error": "Must provide either file or file_path"},
status_code=400, status_code=400,
) )
# Get PDF filename # Get PDF filename
pdf_name = os.path.basename(pdf_path if pdf_path else pdf_file.filename).split( file_name = os.path.basename(file_path if file_path else file.filename).split(
"." "."
)[0] )[0]
output_path = f"{output_dir}/{pdf_name}" output_path = f"{output_dir}/{file_name}"
output_image_path = f"{output_path}/images" output_image_path = f"{output_path}/images"
# Initialize readers/writers and get PDF content # Initialize readers/writers and get PDF content
writer, image_writer, pdf_bytes = init_writers( writer, image_writer, file_bytes, file_extension = init_writers(
pdf_path=pdf_path, file_path=file_path,
pdf_file=pdf_file, file=file,
output_path=output_path, output_path=output_path,
output_image_path=output_image_path, output_image_path=output_image_path,
) )
# Process PDF # Process PDF
infer_result, pipe_result = process_pdf(pdf_bytes, parse_method, image_writer) infer_result, pipe_result = process_file(file_bytes, file_extension, parse_method, image_writer)
# Use MemoryDataWriter to get results # Use MemoryDataWriter to get results
content_list_writer = MemoryDataWriter() content_list_writer = MemoryDataWriter()
...@@ -226,23 +251,23 @@ async def pdf_parse( ...@@ -226,23 +251,23 @@ async def pdf_parse(
# If results need to be saved # If results need to be saved
if is_json_md_dump: if is_json_md_dump:
writer.write_string( writer.write_string(
f"{pdf_name}_content_list.json", content_list_writer.get_value() f"{file_name}_content_list.json", content_list_writer.get_value()
) )
writer.write_string(f"{pdf_name}.md", md_content) writer.write_string(f"{file_name}.md", md_content)
writer.write_string( writer.write_string(
f"{pdf_name}_middle.json", middle_json_writer.get_value() f"{file_name}_middle.json", middle_json_writer.get_value()
) )
writer.write_string( writer.write_string(
f"{pdf_name}_model.json", f"{file_name}_model.json",
json.dumps(model_json, indent=4, ensure_ascii=False), json.dumps(model_json, indent=4, ensure_ascii=False),
) )
# Save visualization results # Save visualization results
pipe_result.draw_layout(os.path.join(output_path, f"{pdf_name}_layout.pdf")) pipe_result.draw_layout(os.path.join(output_path, f"{file_name}_layout.pdf"))
pipe_result.draw_span(os.path.join(output_path, f"{pdf_name}_spans.pdf")) pipe_result.draw_span(os.path.join(output_path, f"{file_name}_spans.pdf"))
pipe_result.draw_line_sort( pipe_result.draw_line_sort(
os.path.join(output_path, f"{pdf_name}_line_sort.pdf") os.path.join(output_path, f"{file_name}_line_sort.pdf")
) )
infer_result.draw_model(os.path.join(output_path, f"{pdf_name}_model.pdf")) infer_result.draw_model(os.path.join(output_path, f"{file_name}_model.pdf"))
# Build return data # Build return data
data = {} data = {}
......
...@@ -7,7 +7,8 @@ numpy>=1.21.6,<2.0.0 ...@@ -7,7 +7,8 @@ numpy>=1.21.6,<2.0.0
pydantic>=2.7.2 pydantic>=2.7.2
PyMuPDF>=1.24.9,<=1.24.14 PyMuPDF>=1.24.9,<=1.24.14
scikit-learn>=1.0.2 scikit-learn>=1.0.2
torch>=2.2.2 torch>=2.2.2,!=2.5.0,!=2.5.1,<=2.6.0
transformers torchvision
transformers>=4.49.0
pdfminer.six==20231228 pdfminer.six==20231228
# The requirements.txt must ensure that only necessary external dependencies are introduced. If there are new dependencies to add, please contact the project administrator. # The requirements.txt must ensure that only necessary external dependencies are introduced. If there are new dependencies to add, please contact the project administrator.
import json import json
import shutil
import os import os
import requests import requests
...@@ -16,7 +17,7 @@ def download_and_modify_json(url, local_filename, modifications): ...@@ -16,7 +17,7 @@ def download_and_modify_json(url, local_filename, modifications):
if os.path.exists(local_filename): if os.path.exists(local_filename):
data = json.load(open(local_filename)) data = json.load(open(local_filename))
config_version = data.get('config_version', '0.0.0') config_version = data.get('config_version', '0.0.0')
if config_version < '1.1.1': if config_version < '1.2.0':
data = download_json(url) data = download_json(url)
else: else:
data = download_json(url) data = download_json(url)
...@@ -32,12 +33,13 @@ def download_and_modify_json(url, local_filename, modifications): ...@@ -32,12 +33,13 @@ def download_and_modify_json(url, local_filename, modifications):
if __name__ == '__main__': if __name__ == '__main__':
mineru_patterns = [ mineru_patterns = [
"models/Layout/LayoutLMv3/*", # "models/Layout/LayoutLMv3/*",
"models/Layout/YOLO/*", "models/Layout/YOLO/*",
"models/MFD/YOLO/*", "models/MFD/YOLO/*",
"models/MFR/unimernet_small_2501/*", "models/MFR/unimernet_hf_small_2503/*",
"models/TabRec/TableMaster/*", "models/OCR/paddleocr/*",
"models/TabRec/StructEqTable/*", # "models/TabRec/TableMaster/*",
# "models/TabRec/StructEqTable/*",
] ]
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns) model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns)
layoutreader_model_dir = snapshot_download('ppaanngggg/layoutreader') layoutreader_model_dir = snapshot_download('ppaanngggg/layoutreader')
...@@ -45,6 +47,12 @@ if __name__ == '__main__': ...@@ -45,6 +47,12 @@ if __name__ == '__main__':
print(f'model_dir is: {model_dir}') print(f'model_dir is: {model_dir}')
print(f'layoutreader_model_dir is: {layoutreader_model_dir}') print(f'layoutreader_model_dir is: {layoutreader_model_dir}')
paddleocr_model_dir = model_dir + '/OCR/paddleocr'
user_paddleocr_dir = os.path.expanduser('~/.paddleocr')
if os.path.exists(user_paddleocr_dir):
shutil.rmtree(user_paddleocr_dir)
shutil.copytree(paddleocr_model_dir, user_paddleocr_dir)
json_url = 'https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/magic-pdf.template.json' json_url = 'https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/magic-pdf.template.json'
config_file_name = 'magic-pdf.json' config_file_name = 'magic-pdf.json'
home_dir = os.path.expanduser('~') home_dir = os.path.expanduser('~')
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment