Commit 9ce72d78 authored by myhloli's avatar myhloli
Browse files

Merge remote-tracking branch 'origin/dev' into dev

parents 59435d88 27281c92
from .configuration_unimer_swin import UnimerSwinConfig
from .modeling_unimer_swin import UnimerSwinModel
from .image_processing_unimer_swin import UnimerSwinImageProcessor
__all__ = [
"UnimerSwinConfig",
"UnimerSwinModel",
"UnimerSwinImageProcessor",
]
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Donut Swin Transformer model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class UnimerSwinConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`UnimerSwinModel`]. It is used to instantiate a
Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Donut
[naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import UnimerSwinConfig, UnimerSwinModel
>>> # Initializing a Donut naver-clova-ix/donut-base style configuration
>>> configuration = UnimerSwinConfig()
>>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration
>>> model = UnimerSwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "unimer-swin"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
from transformers.image_processing_utils import BaseImageProcessor
import numpy as np
import cv2
import albumentations as alb
from albumentations.pytorch import ToTensorV2
# TODO: dereference cv2 if possible
class UnimerSwinImageProcessor(BaseImageProcessor):
def __init__(
self,
image_size = (192, 672),
):
self.input_size = [int(_) for _ in image_size]
assert len(self.input_size) == 2
self.transform = alb.Compose(
[
alb.ToGray(always_apply=True),
alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)),
# alb.Sharpen()
ToTensorV2(),
]
)
def __call__(self, item):
image = self.prepare_input(item)
return self.transform(image=image)['image'][:1]
@staticmethod
def crop_margin_numpy(img: np.ndarray) -> np.ndarray:
"""Crop margins of image using NumPy operations"""
# Convert to grayscale if it's a color image
if len(img.shape) == 3 and img.shape[2] == 3:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
else:
gray = img.copy()
# Normalize and threshold
if gray.max() == gray.min():
return img
normalized = (((gray - gray.min()) / (gray.max() - gray.min())) * 255).astype(np.uint8)
binary = 255 * (normalized < 200).astype(np.uint8)
# Find bounding box
coords = cv2.findNonZero(binary) # Find all non-zero points (text)
x, y, w, h = cv2.boundingRect(coords) # Find minimum spanning bounding box
# Return cropped image
return img[y:y + h, x:x + w]
def prepare_input(self, img, random_padding: bool = False):
"""
Convert PIL Image or numpy array to properly sized and padded image after:
- crop margins
- resize while maintaining aspect ratio
- pad to target size
"""
if img is None:
return None
try:
img = self.crop_margin_numpy(img)
except Exception:
# might throw an error for broken files
return None
if img.shape[0] == 0 or img.shape[1] == 0:
return None
# Resize while preserving aspect ratio
h, w = img.shape[:2]
scale = min(self.input_size[0] / h, self.input_size[1] / w)
new_h, new_w = int(h * scale), int(w * scale)
resized_img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_AREA)
# Calculate padding
pad_width, pad_height = self._get_padding_values(new_w, new_h, random_padding)
# Create and apply padding
channels = 3 if len(img.shape) == 3 else 1
padded_img = np.full((self.input_size[0], self.input_size[1], channels), 255, dtype=np.uint8)
padded_img[pad_height:pad_height + new_h, pad_width:pad_width + new_w] = resized_img
return padded_img
def _calculate_padding(self, new_w, new_h, random_padding):
"""Calculate padding values for PIL images"""
delta_width = self.input_size[1] - new_w
delta_height = self.input_size[0] - new_h
pad_width, pad_height = self._get_padding_values(new_w, new_h, random_padding)
return (
pad_width,
pad_height,
delta_width - pad_width,
delta_height - pad_height,
)
def _get_padding_values(self, new_w, new_h, random_padding):
"""Get padding values based on image dimensions and padding strategy"""
delta_width = self.input_size[1] - new_w
delta_height = self.input_size[0] - new_h
if random_padding:
pad_width = np.random.randint(low=0, high=delta_width + 1)
pad_height = np.random.randint(low=0, high=delta_height + 1)
else:
pad_width = delta_width // 2
pad_height = delta_height // 2
return pad_width, pad_height
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch UnimerSwin Transformer model.
This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden
states."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.activations import ACT2FN
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
torch_int,
)
from .configuration_unimer_swin import UnimerSwinConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "UnimerSwinConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base"
_EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
@dataclass
# Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->UnimerSwin
class UnimerSwinEncoderOutput(ModelOutput):
"""
UnimerSwin encoder's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
@dataclass
# Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->UnimerSwin
class UnimerSwinModelOutput(ModelOutput):
"""
UnimerSwin model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
# Copied from transformers.models.swin.modeling_swin.window_partition
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.window_reverse
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
num_channels = windows.shape[-1]
windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->UnimerSwin
class UnimerSwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = UnimerSwinPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
### code added. ###
if config.use_2d_embeddings:
self.row_embeddings = nn.Parameter(torch.zeros(1, self.patch_grid[0] + 1, config.embed_dim))
self.column_embeddings = nn.Parameter(torch.zeros(1, self.patch_grid[1] + 1, config.embed_dim))
else:
self.row_embeddings = None
self.column_embeddings = None
######
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images.
Source:
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
h0 = height // self.config.patch_size
w0 = width // self.config.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
h0, w0 = h0 + 0.1, w0 + 0.1
patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
scale_factor=(h0 / math.sqrt(num_positions), w0 / math.sqrt(num_positions)),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(
self,
pixel_values: Optional[torch.FloatTensor],
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: bool = False,
) -> Tuple[torch.Tensor]:
_, num_channels, height, width = pixel_values.shape
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
# if interpolate_pos_encoding:
# embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
# else:
# embeddings = embeddings + self.position_embeddings
embeddings = embeddings + self.position_embeddings[:, :seq_len, :] # code edited.
### code added. ###
if self.row_embeddings is not None and self.column_embeddings is not None:
# Repeat the x position embeddings across the y axis like 0, 1, 2, 3, 0, 1, 2, 3, ...
row_embeddings = self.row_embeddings[:, :output_dimensions[0], :].repeat_interleave(output_dimensions[1], dim=1)
column_embeddings = self.column_embeddings[:, :output_dimensions[1], :].repeat(1, output_dimensions[0], 1)
embeddings = embeddings + row_embeddings + column_embeddings
######
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
class StemLayer(nn.Module):
r""" Stem layer of InternImage
Args:
in_chans (int): number of input channels
out_chans (int): number of output channels
act_layer (str): activation layer
norm_layer (str): normalization layer
"""
def __init__(self, in_chans=3, out_chans=96, act_layer=nn.GELU, norm_layer='BN'):
super().__init__()
self.conv1 = nn.Conv2d(in_chans, out_chans // 2, kernel_size=3, stride=2, padding=1)
self.norm1 = self.build_norm_layer(out_chans // 2, norm_layer)
self.act = act_layer()
self.conv2 = nn.Conv2d(out_chans // 2, out_chans, kernel_size=3, stride=2, padding=1)
def build_norm_layer(self, dim, norm_layer):
layers = []
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(dim))
else:
raise NotImplementedError(f'build_norm_layer does not support {norm_layer}')
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.act(x)
x = self.conv2(x)
return x
# Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings with Swin->UnimerSwin
class UnimerSwinPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.embed_dim
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
### code edited. ###
# self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
self.projection = StemLayer(in_chans=num_channels, out_chans=hidden_size)
###
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
_, num_channels, height, width = pixel_values.shape
# pad the input to be divisible by self.patch_size, if needed
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, output_dimensions
# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging
class UnimerSwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`Tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be disible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# batch_size height/2 width/2 4*num_channels
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.swin.modeling_swin.SwinDropPath
class UnimerSwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->UnimerSwin
class UnimerSwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = (
window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
)
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in UnimerSwinModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput
class UnimerSwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->UnimerSwin
class UnimerSwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
self.self = UnimerSwinSelfAttention(config, dim, num_heads, window_size)
self.output = UnimerSwinSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.swin.modeling_swin.SwinIntermediate
class UnimerSwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinOutput
class UnimerSwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class ConvEnhance(nn.Module):
"""Depth-wise convolution to get the positional information.
"""
def __init__(self, config, dim, k=3):
super(ConvEnhance, self).__init__()
self.proj = nn.Conv2d(dim,
dim,
(k,k),
(1,1),
(k // 2,k // 2),
groups=dim)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
H, W = size
assert N == H * W
feat = x.transpose(1, 2).view(B, C, H, W)
feat = self.proj(feat)
feat = self.act_fn(feat)
feat = feat.flatten(2).transpose(1, 2)
x = x + feat
return x
# Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->UnimerSwin
class UnimerSwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.ce = nn.ModuleList([ConvEnhance(config, dim=dim, k=3),
ConvEnhance(config, dim=dim, k=3)])
self.attention = UnimerSwinAttention(config, dim, num_heads, window_size=self.window_size)
self.drop_path = UnimerSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = UnimerSwinIntermediate(config, dim)
self.output = UnimerSwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = torch_int(0)
self.window_size = (
torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
)
def get_attn_mask(self, height, width, dtype, device):
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: Tuple[int, int],
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
if not always_partition:
self.set_shift_and_window_size(input_dimensions)
else:
pass
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
hidden_states = self.ce[0](hidden_states, input_dimensions)
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
# pad hidden_states to multiples of window size
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(
height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device
)
attention_outputs = self.attention(
hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
hidden_states = self.ce[1](hidden_states, input_dimensions)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->UnimerSwin
class UnimerSwinStage(nn.Module):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList(
[
UnimerSwinLayer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
shift_size=0,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: Tuple[int, int],
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
# Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->UnimerSwin
class UnimerSwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
self.layers = nn.ModuleList(
[
UnimerSwinStage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=UnimerSwinPatchMerging if (i_layer < self.num_layers - 1) else None,
)
for i_layer in range(self.num_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: Tuple[int, int],
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
always_partition: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, UnimerSwinEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
layer_module.__call__,
hidden_states,
input_dimensions,
layer_head_mask,
output_attentions,
always_partition,
)
else:
layer_outputs = layer_module(
hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
# rearrange b (h w) c -> b c h w
# here we use the original (not downsampled) height and width
reshaped_hidden_state = hidden_states_before_downsampling.view(
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return UnimerSwinEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
reshaped_hidden_states=all_reshaped_hidden_states,
)
# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->UnimerSwin
class UnimerSwinPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = UnimerSwinConfig
base_model_prefix = "unimer-swin"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["UnimerSwinStage"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
SWIN_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`UnimerSwinConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SWIN_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`DonutImageProcessor.__call__`] for details.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare UnimerSwin Model transformer outputting raw hidden-states without any specific head on top.",
SWIN_START_DOCSTRING,
)
class UnimerSwinModel(UnimerSwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = UnimerSwinEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = UnimerSwinEncoder(config, self.embeddings.patch_grid)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=UnimerSwinModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[Tuple, UnimerSwinModelOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(
pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return UnimerSwinModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
...@@ -5,12 +5,13 @@ from magic_pdf.config.constants import MODEL_NAME ...@@ -5,12 +5,13 @@ from magic_pdf.config.constants import MODEL_NAME
from magic_pdf.model.model_list import AtomicModel from magic_pdf.model.model_list import AtomicModel
from magic_pdf.model.sub_modules.language_detection.yolov11.YOLOv11 import YOLOv11LangDetModel from magic_pdf.model.sub_modules.language_detection.yolov11.YOLOv11 import YOLOv11LangDetModel
from magic_pdf.model.sub_modules.layout.doclayout_yolo.DocLayoutYOLO import DocLayoutYOLOModel from magic_pdf.model.sub_modules.layout.doclayout_yolo.DocLayoutYOLO import DocLayoutYOLOModel
from magic_pdf.model.sub_modules.layout.layoutlmv3.model_init import Layoutlmv3_Predictor
from magic_pdf.model.sub_modules.mfd.yolov8.YOLOv8 import YOLOv8MFDModel from magic_pdf.model.sub_modules.mfd.yolov8.YOLOv8 import YOLOv8MFDModel
from magic_pdf.model.sub_modules.mfr.unimernet.Unimernet import UnimernetModel from magic_pdf.model.sub_modules.mfr.unimernet.Unimernet import UnimernetModel
try: try:
from magic_pdf_ascend_plugin.libs.license_verifier import load_license, LicenseFormatError, LicenseSignatureError, LicenseExpiredError from magic_pdf_ascend_plugin.libs.license_verifier import (
LicenseExpiredError, LicenseFormatError, LicenseSignatureError,
load_license)
from magic_pdf_ascend_plugin.model_plugin.ocr.paddleocr.ppocr_273_npu import ModifiedPaddleOCR from magic_pdf_ascend_plugin.model_plugin.ocr.paddleocr.ppocr_273_npu import ModifiedPaddleOCR
from magic_pdf_ascend_plugin.model_plugin.table.rapidtable.rapid_table_npu import RapidTableModel from magic_pdf_ascend_plugin.model_plugin.table.rapidtable.rapid_table_npu import RapidTableModel
license_key = load_license() license_key = load_license()
...@@ -20,26 +21,26 @@ except Exception as e: ...@@ -20,26 +21,26 @@ except Exception as e:
if isinstance(e, ImportError): if isinstance(e, ImportError):
pass pass
elif isinstance(e, LicenseFormatError): elif isinstance(e, LicenseFormatError):
logger.error("Ascend Plugin: Invalid license format. Please check the license file.") logger.error('Ascend Plugin: Invalid license format. Please check the license file.')
elif isinstance(e, LicenseSignatureError): elif isinstance(e, LicenseSignatureError):
logger.error("Ascend Plugin: Invalid signature. The license may be tampered with.") logger.error('Ascend Plugin: Invalid signature. The license may be tampered with.')
elif isinstance(e, LicenseExpiredError): elif isinstance(e, LicenseExpiredError):
logger.error("Ascend Plugin: License has expired. Please renew your license.") logger.error('Ascend Plugin: License has expired. Please renew your license.')
elif isinstance(e, FileNotFoundError): elif isinstance(e, FileNotFoundError):
logger.error("Ascend Plugin: Not found License file.") logger.error('Ascend Plugin: Not found License file.')
else: else:
logger.error(f"Ascend Plugin: {e}") logger.error(f'Ascend Plugin: {e}')
from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_273_mod import ModifiedPaddleOCR from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_273_mod import ModifiedPaddleOCR
# from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_291_mod import ModifiedPaddleOCR # from magic_pdf.model.sub_modules.ocr.paddleocr.ppocr_291_mod import ModifiedPaddleOCR
from magic_pdf.model.sub_modules.table.rapidtable.rapid_table import RapidTableModel from magic_pdf.model.sub_modules.table.rapidtable.rapid_table import RapidTableModel
from magic_pdf.model.sub_modules.table.structeqtable.struct_eqtable import StructTableModel
from magic_pdf.model.sub_modules.table.tablemaster.tablemaster_paddle import TableMasterPaddleModel
def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr_engine=None, table_sub_model_name=None): def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr_engine=None, table_sub_model_name=None):
if table_model_type == MODEL_NAME.STRUCT_EQTABLE: if table_model_type == MODEL_NAME.STRUCT_EQTABLE:
from magic_pdf.model.sub_modules.table.structeqtable.struct_eqtable import StructTableModel
table_model = StructTableModel(model_path, max_new_tokens=2048, max_time=max_time) table_model = StructTableModel(model_path, max_new_tokens=2048, max_time=max_time)
elif table_model_type == MODEL_NAME.TABLE_MASTER: elif table_model_type == MODEL_NAME.TABLE_MASTER:
from magic_pdf.model.sub_modules.table.tablemaster.tablemaster_paddle import TableMasterPaddleModel
config = { config = {
'model_dir': model_path, 'model_dir': model_path,
'device': _device_ 'device': _device_
...@@ -55,7 +56,7 @@ def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr ...@@ -55,7 +56,7 @@ def table_model_init(table_model_type, model_path, max_time, _device_='cpu', ocr
def mfd_model_init(weight, device='cpu'): def mfd_model_init(weight, device='cpu'):
if str(device).startswith("npu"): if str(device).startswith('npu'):
device = torch.device(device) device = torch.device(device)
mfd_model = YOLOv8MFDModel(weight, device) mfd_model = YOLOv8MFDModel(weight, device)
return mfd_model return mfd_model
...@@ -67,19 +68,20 @@ def mfr_model_init(weight_dir, cfg_path, device='cpu'): ...@@ -67,19 +68,20 @@ def mfr_model_init(weight_dir, cfg_path, device='cpu'):
def layout_model_init(weight, config_file, device): def layout_model_init(weight, config_file, device):
from magic_pdf.model.sub_modules.layout.layoutlmv3.model_init import Layoutlmv3_Predictor
model = Layoutlmv3_Predictor(weight, config_file, device) model = Layoutlmv3_Predictor(weight, config_file, device)
return model return model
def doclayout_yolo_model_init(weight, device='cpu'): def doclayout_yolo_model_init(weight, device='cpu'):
if str(device).startswith("npu"): if str(device).startswith('npu'):
device = torch.device(device) device = torch.device(device)
model = DocLayoutYOLOModel(weight, device) model = DocLayoutYOLOModel(weight, device)
return model return model
def langdetect_model_init(langdetect_model_weight, device='cpu'): def langdetect_model_init(langdetect_model_weight, device='cpu'):
if str(device).startswith("npu"): if str(device).startswith('npu'):
device = torch.device(device) device = torch.device(device)
model = YOLOv11LangDetModel(langdetect_model_weight, device) model = YOLOv11LangDetModel(langdetect_model_weight, device)
return model return model
......
import time import time
import torch import torch
from PIL import Image
from loguru import logger from loguru import logger
import numpy as np
from magic_pdf.libs.clean_memory import clean_memory from magic_pdf.libs.clean_memory import clean_memory
def crop_img(input_res, input_pil_img, crop_paste_x=0, crop_paste_y=0): def crop_img(input_res, input_np_img, crop_paste_x=0, crop_paste_y=0):
crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1]) crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1])
crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5]) crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5])
# Create a white background with an additional width and height of 50
# Calculate new dimensions
crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2 crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2
crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2 crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2
return_image = Image.new('RGB', (crop_new_width, crop_new_height), 'white')
# Crop image # Create a white background array
crop_box = (crop_xmin, crop_ymin, crop_xmax, crop_ymax) return_image = np.ones((crop_new_height, crop_new_width, 3), dtype=np.uint8) * 255
cropped_img = input_pil_img.crop(crop_box)
return_image.paste(cropped_img, (crop_paste_x, crop_paste_y)) # Crop the original image using numpy slicing
return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width, crop_new_height] cropped_img = input_np_img[crop_ymin:crop_ymax, crop_xmin:crop_xmax]
# Paste the cropped image onto the white background
return_image[crop_paste_y:crop_paste_y + (crop_ymax - crop_ymin),
crop_paste_x:crop_paste_x + (crop_xmax - crop_xmin)] = cropped_img
return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width,
crop_new_height]
return return_image, return_list return return_image, return_list
......
...@@ -5,6 +5,7 @@ import cv2 ...@@ -5,6 +5,7 @@ import cv2
import numpy as np import numpy as np
import torch import torch
from paddleocr import PaddleOCR from paddleocr import PaddleOCR
from ppocr.utils.logging import get_logger from ppocr.utils.logging import get_logger
from ppocr.utils.utility import alpha_to_color, binarize_img from ppocr.utils.utility import alpha_to_color, binarize_img
......
...@@ -2,6 +2,7 @@ import os ...@@ -2,6 +2,7 @@ import os
import cv2 import cv2
import numpy as np import numpy as np
from paddleocr import PaddleOCR
from ppstructure.table.predict_table import TableSystem from ppstructure.table.predict_table import TableSystem
from ppstructure.utility import init_args from ppstructure.utility import init_args
from PIL import Image from PIL import Image
......
...@@ -492,7 +492,7 @@ def insert_lines_into_block(block_bbox, line_height, page_w, page_h): ...@@ -492,7 +492,7 @@ def insert_lines_into_block(block_bbox, line_height, page_w, page_h):
else: else:
return [[x0, y0, x1, y1]] return [[x0, y0, x1, y1]]
# @measure_time
def sort_lines_by_model(fix_blocks, page_w, page_h, line_height): def sort_lines_by_model(fix_blocks, page_w, page_h, line_height):
page_line_list = [] page_line_list = []
......
...@@ -2,7 +2,7 @@ weights: ...@@ -2,7 +2,7 @@ weights:
layoutlmv3: Layout/LayoutLMv3/model_final.pth layoutlmv3: Layout/LayoutLMv3/model_final.pth
doclayout_yolo: Layout/YOLO/doclayout_yolo_docstructbench_imgsz1280_2501.pt doclayout_yolo: Layout/YOLO/doclayout_yolo_docstructbench_imgsz1280_2501.pt
yolo_v8_mfd: MFD/YOLO/yolo_v8_ft.pt yolo_v8_mfd: MFD/YOLO/yolo_v8_ft.pt
unimernet_small: MFR/unimernet_small_2501 unimernet_small: MFR/unimernet_hf_small_2503
struct_eqtable: TabRec/StructEqTable struct_eqtable: TabRec/StructEqTable
tablemaster: TabRec/TableMaster tablemaster: TabRec/TableMaster
rapid_table: TabRec/RapidTable rapid_table: TabRec/RapidTable
\ No newline at end of file
import os import os
import shutil import shutil
import tempfile import tempfile
from pathlib import Path
import click import click
import fitz import fitz
from loguru import logger from loguru import logger
from pathlib import Path
import magic_pdf.model as model_config import magic_pdf.model as model_config
from magic_pdf.data.batch_build_dataset import batch_build_dataset
from magic_pdf.data.data_reader_writer import FileBasedDataReader from magic_pdf.data.data_reader_writer import FileBasedDataReader
from magic_pdf.data.dataset import Dataset
from magic_pdf.libs.version import __version__ from magic_pdf.libs.version import __version__
from magic_pdf.tools.common import do_parse, parse_pdf_methods from magic_pdf.tools.common import batch_do_parse, do_parse, parse_pdf_methods
from magic_pdf.utils.office_to_pdf import convert_file_to_pdf from magic_pdf.utils.office_to_pdf import convert_file_to_pdf
pdf_suffixes = ['.pdf'] pdf_suffixes = ['.pdf']
...@@ -94,30 +97,33 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id): ...@@ -94,30 +97,33 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id):
def read_fn(path: Path): def read_fn(path: Path):
if path.suffix in ms_office_suffixes: if path.suffix in ms_office_suffixes:
convert_file_to_pdf(str(path), temp_dir) convert_file_to_pdf(str(path), temp_dir)
fn = os.path.join(temp_dir, f"{path.stem}.pdf") fn = os.path.join(temp_dir, f'{path.stem}.pdf')
elif path.suffix in image_suffixes: elif path.suffix in image_suffixes:
with open(str(path), 'rb') as f: with open(str(path), 'rb') as f:
bits = f.read() bits = f.read()
pdf_bytes = fitz.open(stream=bits).convert_to_pdf() pdf_bytes = fitz.open(stream=bits).convert_to_pdf()
fn = os.path.join(temp_dir, f"{path.stem}.pdf") fn = os.path.join(temp_dir, f'{path.stem}.pdf')
with open(fn, 'wb') as f: with open(fn, 'wb') as f:
f.write(pdf_bytes) f.write(pdf_bytes)
elif path.suffix in pdf_suffixes: elif path.suffix in pdf_suffixes:
fn = str(path) fn = str(path)
else: else:
raise Exception(f"Unknown file suffix: {path.suffix}") raise Exception(f'Unknown file suffix: {path.suffix}')
disk_rw = FileBasedDataReader(os.path.dirname(fn)) disk_rw = FileBasedDataReader(os.path.dirname(fn))
return disk_rw.read(os.path.basename(fn)) return disk_rw.read(os.path.basename(fn))
def parse_doc(doc_path: Path): def parse_doc(doc_path: Path, dataset: Dataset | None = None):
try: try:
file_name = str(Path(doc_path).stem) file_name = str(Path(doc_path).stem)
pdf_data = read_fn(doc_path) if dataset is None:
pdf_data_or_dataset = read_fn(doc_path)
else:
pdf_data_or_dataset = dataset
do_parse( do_parse(
output_dir, output_dir,
file_name, file_name,
pdf_data, pdf_data_or_dataset,
[], [],
method, method,
debug_able, debug_able,
...@@ -130,9 +136,12 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id): ...@@ -130,9 +136,12 @@ def cli(path, output_dir, method, lang, debug_able, start_page_id, end_page_id):
logger.exception(e) logger.exception(e)
if os.path.isdir(path): if os.path.isdir(path):
doc_paths = []
for doc_path in Path(path).glob('*'): for doc_path in Path(path).glob('*'):
if doc_path.suffix in pdf_suffixes + image_suffixes + ms_office_suffixes: if doc_path.suffix in pdf_suffixes + image_suffixes + ms_office_suffixes:
parse_doc(doc_path) doc_paths.append(doc_path)
datasets = batch_build_dataset(doc_paths, 4, lang)
batch_do_parse(output_dir, [str(doc_path.stem) for doc_path in doc_paths], datasets, method, debug_able, lang=lang)
else: else:
parse_doc(Path(path)) parse_doc(Path(path))
......
...@@ -8,10 +8,10 @@ import magic_pdf.model as model_config ...@@ -8,10 +8,10 @@ import magic_pdf.model as model_config
from magic_pdf.config.enums import SupportedPdfParseMethod from magic_pdf.config.enums import SupportedPdfParseMethod
from magic_pdf.config.make_content_config import DropMode, MakeMode from magic_pdf.config.make_content_config import DropMode, MakeMode
from magic_pdf.data.data_reader_writer import FileBasedDataWriter from magic_pdf.data.data_reader_writer import FileBasedDataWriter
from magic_pdf.data.dataset import PymuDocDataset from magic_pdf.data.dataset import Dataset, PymuDocDataset
from magic_pdf.libs.draw_bbox import draw_char_bbox from magic_pdf.libs.draw_bbox import draw_char_bbox
from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze from magic_pdf.model.doc_analyze_by_custom_model import (batch_doc_analyze,
from magic_pdf.operators.models import InferenceResult doc_analyze)
# from io import BytesIO # from io import BytesIO
# from pypdf import PdfReader, PdfWriter # from pypdf import PdfReader, PdfWriter
...@@ -67,10 +67,10 @@ def convert_pdf_bytes_to_bytes_by_pymupdf(pdf_bytes, start_page_id=0, end_page_i ...@@ -67,10 +67,10 @@ def convert_pdf_bytes_to_bytes_by_pymupdf(pdf_bytes, start_page_id=0, end_page_i
return output_bytes return output_bytes
def do_parse( def _do_parse(
output_dir, output_dir,
pdf_file_name, pdf_file_name,
pdf_bytes, pdf_bytes_or_dataset,
model_list, model_list,
parse_method, parse_method,
debug_able, debug_able,
...@@ -92,16 +92,21 @@ def do_parse( ...@@ -92,16 +92,21 @@ def do_parse(
formula_enable=None, formula_enable=None,
table_enable=None, table_enable=None,
): ):
from magic_pdf.operators.models import InferenceResult
if debug_able: if debug_able:
logger.warning('debug mode is on') logger.warning('debug mode is on')
f_draw_model_bbox = True f_draw_model_bbox = True
f_draw_line_sort_bbox = True f_draw_line_sort_bbox = True
# f_draw_char_bbox = True # f_draw_char_bbox = True
pdf_bytes = convert_pdf_bytes_to_bytes_by_pymupdf( if isinstance(pdf_bytes_or_dataset, bytes):
pdf_bytes, start_page_id, end_page_id pdf_bytes = convert_pdf_bytes_to_bytes_by_pymupdf(
) pdf_bytes_or_dataset, start_page_id, end_page_id
)
ds = PymuDocDataset(pdf_bytes, lang=lang)
else:
ds = pdf_bytes_or_dataset
pdf_bytes = ds._raw_data
local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method) local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter( image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(
...@@ -109,8 +114,6 @@ def do_parse( ...@@ -109,8 +114,6 @@ def do_parse(
) )
image_dir = str(os.path.basename(local_image_dir)) image_dir = str(os.path.basename(local_image_dir))
ds = PymuDocDataset(pdf_bytes, lang=lang)
if len(model_list) == 0: if len(model_list) == 0:
if model_config.__use_inside_model__: if model_config.__use_inside_model__:
if parse_method == 'auto': if parse_method == 'auto':
...@@ -241,5 +244,79 @@ def do_parse( ...@@ -241,5 +244,79 @@ def do_parse(
logger.info(f'local output dir is {local_md_dir}') logger.info(f'local output dir is {local_md_dir}')
def do_parse(
output_dir,
pdf_file_name,
pdf_bytes_or_dataset,
model_list,
parse_method,
debug_able,
f_draw_span_bbox=True,
f_draw_layout_bbox=True,
f_dump_md=True,
f_dump_middle_json=True,
f_dump_model_json=True,
f_dump_orig_pdf=True,
f_dump_content_list=True,
f_make_md_mode=MakeMode.MM_MD,
f_draw_model_bbox=False,
f_draw_line_sort_bbox=False,
f_draw_char_bbox=False,
start_page_id=0,
end_page_id=None,
lang=None,
layout_model=None,
formula_enable=None,
table_enable=None,
):
parallel_count = 1
if os.environ.get('MINERU_PARALLEL_INFERENCE_COUNT'):
parallel_count = int(os.environ['MINERU_PARALLEL_INFERENCE_COUNT'])
if parallel_count > 1:
if isinstance(pdf_bytes_or_dataset, bytes):
pdf_bytes = convert_pdf_bytes_to_bytes_by_pymupdf(
pdf_bytes_or_dataset, start_page_id, end_page_id
)
ds = PymuDocDataset(pdf_bytes, lang=lang)
else:
ds = pdf_bytes_or_dataset
batch_do_parse(output_dir, [pdf_file_name], [ds], parse_method, debug_able, f_draw_span_bbox=f_draw_span_bbox, f_draw_layout_bbox=f_draw_layout_bbox, f_dump_md=f_dump_md, f_dump_middle_json=f_dump_middle_json, f_dump_model_json=f_dump_model_json, f_dump_orig_pdf=f_dump_orig_pdf, f_dump_content_list=f_dump_content_list, f_make_md_mode=f_make_md_mode, f_draw_model_bbox=f_draw_model_bbox, f_draw_line_sort_bbox=f_draw_line_sort_bbox, f_draw_char_bbox=f_draw_char_bbox)
else:
_do_parse(output_dir, pdf_file_name, pdf_bytes_or_dataset, model_list, parse_method, debug_able, start_page_id=start_page_id, end_page_id=end_page_id, lang=lang, layout_model=layout_model, formula_enable=formula_enable, table_enable=table_enable, f_draw_span_bbox=f_draw_span_bbox, f_draw_layout_bbox=f_draw_layout_bbox, f_dump_md=f_dump_md, f_dump_middle_json=f_dump_middle_json, f_dump_model_json=f_dump_model_json, f_dump_orig_pdf=f_dump_orig_pdf, f_dump_content_list=f_dump_content_list, f_make_md_mode=f_make_md_mode, f_draw_model_bbox=f_draw_model_bbox, f_draw_line_sort_bbox=f_draw_line_sort_bbox, f_draw_char_bbox=f_draw_char_bbox)
def batch_do_parse(
output_dir,
pdf_file_names: list[str],
pdf_bytes_or_datasets: list[bytes | Dataset],
parse_method,
debug_able,
f_draw_span_bbox=True,
f_draw_layout_bbox=True,
f_dump_md=True,
f_dump_middle_json=True,
f_dump_model_json=True,
f_dump_orig_pdf=True,
f_dump_content_list=True,
f_make_md_mode=MakeMode.MM_MD,
f_draw_model_bbox=False,
f_draw_line_sort_bbox=False,
f_draw_char_bbox=False,
lang=None,
layout_model=None,
formula_enable=None,
table_enable=None,
):
dss = []
for v in pdf_bytes_or_datasets:
if isinstance(v, bytes):
dss.append(PymuDocDataset(v, lang=lang))
else:
dss.append(v)
infer_results = batch_doc_analyze(dss, lang=lang, layout_model=layout_model, formula_enable=formula_enable, table_enable=table_enable)
for idx, infer_result in enumerate(infer_results):
_do_parse(output_dir, pdf_file_names[idx], dss[idx], infer_result.get_infer_res(), parse_method, debug_able, f_draw_span_bbox=f_draw_span_bbox, f_draw_layout_bbox=f_draw_layout_bbox, f_dump_md=f_dump_md, f_dump_middle_json=f_dump_middle_json, f_dump_model_json=f_dump_model_json, f_dump_orig_pdf=f_dump_orig_pdf, f_dump_content_list=f_dump_content_list, f_make_md_mode=f_make_md_mode, f_draw_model_bbox=f_draw_model_bbox, f_draw_line_sort_bbox=f_draw_line_sort_bbox, f_draw_char_bbox=f_draw_char_bbox)
parse_pdf_methods = click.Choice(['ocr', 'txt', 'auto']) parse_pdf_methods = click.Choice(['ocr', 'txt', 'auto'])
...@@ -3,6 +3,7 @@ import os ...@@ -3,6 +3,7 @@ import os
from base64 import b64encode from base64 import b64encode
from glob import glob from glob import glob
from io import StringIO from io import StringIO
import tempfile
from typing import Tuple, Union from typing import Tuple, Union
import uvicorn import uvicorn
...@@ -10,11 +11,12 @@ from fastapi import FastAPI, HTTPException, UploadFile ...@@ -10,11 +11,12 @@ from fastapi import FastAPI, HTTPException, UploadFile
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from loguru import logger from loguru import logger
from magic_pdf.data.read_api import read_local_images, read_local_office
import magic_pdf.model as model_config import magic_pdf.model as model_config
from magic_pdf.config.enums import SupportedPdfParseMethod from magic_pdf.config.enums import SupportedPdfParseMethod
from magic_pdf.data.data_reader_writer import DataWriter, FileBasedDataWriter from magic_pdf.data.data_reader_writer import DataWriter, FileBasedDataWriter
from magic_pdf.data.data_reader_writer.s3 import S3DataReader, S3DataWriter from magic_pdf.data.data_reader_writer.s3 import S3DataReader, S3DataWriter
from magic_pdf.data.dataset import PymuDocDataset from magic_pdf.data.dataset import ImageDataset, PymuDocDataset
from magic_pdf.libs.config_reader import get_bucket_name, get_s3_config from magic_pdf.libs.config_reader import get_bucket_name, get_s3_config
from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
from magic_pdf.operators.models import InferenceResult from magic_pdf.operators.models import InferenceResult
...@@ -24,6 +26,9 @@ model_config.__use_inside_model__ = True ...@@ -24,6 +26,9 @@ model_config.__use_inside_model__ = True
app = FastAPI() app = FastAPI()
pdf_extensions = [".pdf"]
office_extensions = [".ppt", ".pptx", ".doc", ".docx"]
image_extensions = [".png", ".jpg"]
class MemoryDataWriter(DataWriter): class MemoryDataWriter(DataWriter):
def __init__(self): def __init__(self):
...@@ -46,8 +51,8 @@ class MemoryDataWriter(DataWriter): ...@@ -46,8 +51,8 @@ class MemoryDataWriter(DataWriter):
def init_writers( def init_writers(
pdf_path: str = None, file_path: str = None,
pdf_file: UploadFile = None, file: UploadFile = None,
output_path: str = None, output_path: str = None,
output_image_path: str = None, output_image_path: str = None,
) -> Tuple[ ) -> Tuple[
...@@ -59,19 +64,19 @@ def init_writers( ...@@ -59,19 +64,19 @@ def init_writers(
Initialize writers based on path type Initialize writers based on path type
Args: Args:
pdf_path: PDF file path (local path or S3 path) file_path: file path (local path or S3 path)
pdf_file: Uploaded PDF file object file: Uploaded file object
output_path: Output directory path output_path: Output directory path
output_image_path: Image output directory path output_image_path: Image output directory path
Returns: Returns:
Tuple[writer, image_writer, pdf_bytes]: Returns initialized writer tuple and PDF Tuple[writer, image_writer, file_bytes]: Returns initialized writer tuple and file content
file content
""" """
if pdf_path: file_extension:str = None
is_s3_path = pdf_path.startswith("s3://") if file_path:
is_s3_path = file_path.startswith("s3://")
if is_s3_path: if is_s3_path:
bucket = get_bucket_name(pdf_path) bucket = get_bucket_name(file_path)
ak, sk, endpoint = get_s3_config(bucket) ak, sk, endpoint = get_s3_config(bucket)
writer = S3DataWriter( writer = S3DataWriter(
...@@ -84,25 +89,29 @@ def init_writers( ...@@ -84,25 +89,29 @@ def init_writers(
temp_reader = S3DataReader( temp_reader = S3DataReader(
"", bucket=bucket, ak=ak, sk=sk, endpoint_url=endpoint "", bucket=bucket, ak=ak, sk=sk, endpoint_url=endpoint
) )
pdf_bytes = temp_reader.read(pdf_path) file_bytes = temp_reader.read(file_path)
file_extension = os.path.splitext(file_path)[1]
else: else:
writer = FileBasedDataWriter(output_path) writer = FileBasedDataWriter(output_path)
image_writer = FileBasedDataWriter(output_image_path) image_writer = FileBasedDataWriter(output_image_path)
os.makedirs(output_image_path, exist_ok=True) os.makedirs(output_image_path, exist_ok=True)
with open(pdf_path, "rb") as f: with open(file_path, "rb") as f:
pdf_bytes = f.read() file_bytes = f.read()
file_extension = os.path.splitext(file_path)[1]
else: else:
# 处理上传的文件 # 处理上传的文件
pdf_bytes = pdf_file.file.read() file_bytes = file.file.read()
file_extension = os.path.splitext(file.filename)[1]
writer = FileBasedDataWriter(output_path) writer = FileBasedDataWriter(output_path)
image_writer = FileBasedDataWriter(output_image_path) image_writer = FileBasedDataWriter(output_image_path)
os.makedirs(output_image_path, exist_ok=True) os.makedirs(output_image_path, exist_ok=True)
return writer, image_writer, pdf_bytes return writer, image_writer, file_bytes, file_extension
def process_pdf( def process_file(
pdf_bytes: bytes, file_bytes: bytes,
file_extension: str,
parse_method: str, parse_method: str,
image_writer: Union[S3DataWriter, FileBasedDataWriter], image_writer: Union[S3DataWriter, FileBasedDataWriter],
) -> Tuple[InferenceResult, PipeResult]: ) -> Tuple[InferenceResult, PipeResult]:
...@@ -110,14 +119,30 @@ def process_pdf( ...@@ -110,14 +119,30 @@ def process_pdf(
Process PDF file content Process PDF file content
Args: Args:
pdf_bytes: Binary content of PDF file file_bytes: Binary content of file
file_extension: file extension
parse_method: Parse method ('ocr', 'txt', 'auto') parse_method: Parse method ('ocr', 'txt', 'auto')
image_writer: Image writer image_writer: Image writer
Returns: Returns:
Tuple[InferenceResult, PipeResult]: Returns inference result and pipeline result Tuple[InferenceResult, PipeResult]: Returns inference result and pipeline result
""" """
ds = PymuDocDataset(pdf_bytes)
ds = Union[PymuDocDataset, ImageDataset]
if file_extension in pdf_extensions:
ds = PymuDocDataset(file_bytes)
elif file_extension in office_extensions:
# 需要使用office解析
temp_dir = tempfile.mkdtemp()
with open(os.path.join(temp_dir, f"temp_file.{file_extension}"), "wb") as f:
f.write(file_bytes)
ds = read_local_office(temp_dir)[0]
elif file_extension in image_extensions:
# 需要使用ocr解析
temp_dir = tempfile.mkdtemp()
with open(os.path.join(temp_dir, f"temp_file.{file_extension}"), "wb") as f:
f.write(file_bytes)
ds = read_local_images(temp_dir)[0]
infer_result: InferenceResult = None infer_result: InferenceResult = None
pipe_result: PipeResult = None pipe_result: PipeResult = None
...@@ -145,13 +170,13 @@ def encode_image(image_path: str) -> str: ...@@ -145,13 +170,13 @@ def encode_image(image_path: str) -> str:
@app.post( @app.post(
"/pdf_parse", "/file_parse",
tags=["projects"], tags=["projects"],
summary="Parse PDF files (supports local files and S3)", summary="Parse files (supports local files and S3)",
) )
async def pdf_parse( async def file_parse(
pdf_file: UploadFile = None, file: UploadFile = None,
pdf_path: str = None, file_path: str = None,
parse_method: str = "auto", parse_method: str = "auto",
is_json_md_dump: bool = False, is_json_md_dump: bool = False,
output_dir: str = "output", output_dir: str = "output",
...@@ -165,10 +190,10 @@ async def pdf_parse( ...@@ -165,10 +190,10 @@ async def pdf_parse(
to the specified directory. to the specified directory.
Args: Args:
pdf_file: The PDF file to be parsed. Must not be specified together with file: The PDF file to be parsed. Must not be specified together with
`pdf_path` `file_path`
pdf_path: The path to the PDF file to be parsed. Must not be specified together file_path: The path to the PDF file to be parsed. Must not be specified together
with `pdf_file` with `file`
parse_method: Parsing method, can be auto, ocr, or txt. Default is auto. If parse_method: Parsing method, can be auto, ocr, or txt. Default is auto. If
results are not satisfactory, try ocr results are not satisfactory, try ocr
is_json_md_dump: Whether to write parsed data to .json and .md files. Default is_json_md_dump: Whether to write parsed data to .json and .md files. Default
...@@ -181,31 +206,31 @@ async def pdf_parse( ...@@ -181,31 +206,31 @@ async def pdf_parse(
return_content_list: Whether to return parsed PDF content list. Default to False return_content_list: Whether to return parsed PDF content list. Default to False
""" """
try: try:
if (pdf_file is None and pdf_path is None) or ( if (file is None and file_path is None) or (
pdf_file is not None and pdf_path is not None file is not None and file_path is not None
): ):
return JSONResponse( return JSONResponse(
content={"error": "Must provide either pdf_file or pdf_path"}, content={"error": "Must provide either file or file_path"},
status_code=400, status_code=400,
) )
# Get PDF filename # Get PDF filename
pdf_name = os.path.basename(pdf_path if pdf_path else pdf_file.filename).split( file_name = os.path.basename(file_path if file_path else file.filename).split(
"." "."
)[0] )[0]
output_path = f"{output_dir}/{pdf_name}" output_path = f"{output_dir}/{file_name}"
output_image_path = f"{output_path}/images" output_image_path = f"{output_path}/images"
# Initialize readers/writers and get PDF content # Initialize readers/writers and get PDF content
writer, image_writer, pdf_bytes = init_writers( writer, image_writer, file_bytes, file_extension = init_writers(
pdf_path=pdf_path, file_path=file_path,
pdf_file=pdf_file, file=file,
output_path=output_path, output_path=output_path,
output_image_path=output_image_path, output_image_path=output_image_path,
) )
# Process PDF # Process PDF
infer_result, pipe_result = process_pdf(pdf_bytes, parse_method, image_writer) infer_result, pipe_result = process_file(file_bytes, file_extension, parse_method, image_writer)
# Use MemoryDataWriter to get results # Use MemoryDataWriter to get results
content_list_writer = MemoryDataWriter() content_list_writer = MemoryDataWriter()
...@@ -226,23 +251,23 @@ async def pdf_parse( ...@@ -226,23 +251,23 @@ async def pdf_parse(
# If results need to be saved # If results need to be saved
if is_json_md_dump: if is_json_md_dump:
writer.write_string( writer.write_string(
f"{pdf_name}_content_list.json", content_list_writer.get_value() f"{file_name}_content_list.json", content_list_writer.get_value()
) )
writer.write_string(f"{pdf_name}.md", md_content) writer.write_string(f"{file_name}.md", md_content)
writer.write_string( writer.write_string(
f"{pdf_name}_middle.json", middle_json_writer.get_value() f"{file_name}_middle.json", middle_json_writer.get_value()
) )
writer.write_string( writer.write_string(
f"{pdf_name}_model.json", f"{file_name}_model.json",
json.dumps(model_json, indent=4, ensure_ascii=False), json.dumps(model_json, indent=4, ensure_ascii=False),
) )
# Save visualization results # Save visualization results
pipe_result.draw_layout(os.path.join(output_path, f"{pdf_name}_layout.pdf")) pipe_result.draw_layout(os.path.join(output_path, f"{file_name}_layout.pdf"))
pipe_result.draw_span(os.path.join(output_path, f"{pdf_name}_spans.pdf")) pipe_result.draw_span(os.path.join(output_path, f"{file_name}_spans.pdf"))
pipe_result.draw_line_sort( pipe_result.draw_line_sort(
os.path.join(output_path, f"{pdf_name}_line_sort.pdf") os.path.join(output_path, f"{file_name}_line_sort.pdf")
) )
infer_result.draw_model(os.path.join(output_path, f"{pdf_name}_model.pdf")) infer_result.draw_model(os.path.join(output_path, f"{file_name}_model.pdf"))
# Build return data # Build return data
data = {} data = {}
......
...@@ -7,7 +7,8 @@ numpy>=1.21.6,<2.0.0 ...@@ -7,7 +7,8 @@ numpy>=1.21.6,<2.0.0
pydantic>=2.7.2 pydantic>=2.7.2
PyMuPDF>=1.24.9,<=1.24.14 PyMuPDF>=1.24.9,<=1.24.14
scikit-learn>=1.0.2 scikit-learn>=1.0.2
torch>=2.2.2 torch>=2.2.2,!=2.5.0,!=2.5.1,<=2.6.0
transformers torchvision
transformers>=4.49.0
pdfminer.six==20231228 pdfminer.six==20231228
# The requirements.txt must ensure that only necessary external dependencies are introduced. If there are new dependencies to add, please contact the project administrator. # The requirements.txt must ensure that only necessary external dependencies are introduced. If there are new dependencies to add, please contact the project administrator.
import json import json
import shutil
import os import os
import requests import requests
...@@ -16,7 +17,7 @@ def download_and_modify_json(url, local_filename, modifications): ...@@ -16,7 +17,7 @@ def download_and_modify_json(url, local_filename, modifications):
if os.path.exists(local_filename): if os.path.exists(local_filename):
data = json.load(open(local_filename)) data = json.load(open(local_filename))
config_version = data.get('config_version', '0.0.0') config_version = data.get('config_version', '0.0.0')
if config_version < '1.1.1': if config_version < '1.2.0':
data = download_json(url) data = download_json(url)
else: else:
data = download_json(url) data = download_json(url)
...@@ -32,12 +33,13 @@ def download_and_modify_json(url, local_filename, modifications): ...@@ -32,12 +33,13 @@ def download_and_modify_json(url, local_filename, modifications):
if __name__ == '__main__': if __name__ == '__main__':
mineru_patterns = [ mineru_patterns = [
"models/Layout/LayoutLMv3/*", # "models/Layout/LayoutLMv3/*",
"models/Layout/YOLO/*", "models/Layout/YOLO/*",
"models/MFD/YOLO/*", "models/MFD/YOLO/*",
"models/MFR/unimernet_small_2501/*", "models/MFR/unimernet_hf_small_2503/*",
"models/TabRec/TableMaster/*", "models/OCR/paddleocr/*",
"models/TabRec/StructEqTable/*", # "models/TabRec/TableMaster/*",
# "models/TabRec/StructEqTable/*",
] ]
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns) model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns)
layoutreader_model_dir = snapshot_download('ppaanngggg/layoutreader') layoutreader_model_dir = snapshot_download('ppaanngggg/layoutreader')
...@@ -45,6 +47,12 @@ if __name__ == '__main__': ...@@ -45,6 +47,12 @@ if __name__ == '__main__':
print(f'model_dir is: {model_dir}') print(f'model_dir is: {model_dir}')
print(f'layoutreader_model_dir is: {layoutreader_model_dir}') print(f'layoutreader_model_dir is: {layoutreader_model_dir}')
paddleocr_model_dir = model_dir + '/OCR/paddleocr'
user_paddleocr_dir = os.path.expanduser('~/.paddleocr')
if os.path.exists(user_paddleocr_dir):
shutil.rmtree(user_paddleocr_dir)
shutil.copytree(paddleocr_model_dir, user_paddleocr_dir)
json_url = 'https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/magic-pdf.template.json' json_url = 'https://gcore.jsdelivr.net/gh/opendatalab/MinerU@master/magic-pdf.template.json'
config_file_name = 'magic-pdf.json' config_file_name = 'magic-pdf.json'
home_dir = os.path.expanduser('~') home_dir = os.path.expanduser('~')
......
import json import json
import os import os
import shutil
import requests import requests
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
...@@ -16,7 +17,7 @@ def download_and_modify_json(url, local_filename, modifications): ...@@ -16,7 +17,7 @@ def download_and_modify_json(url, local_filename, modifications):
if os.path.exists(local_filename): if os.path.exists(local_filename):
data = json.load(open(local_filename)) data = json.load(open(local_filename))
config_version = data.get('config_version', '0.0.0') config_version = data.get('config_version', '0.0.0')
if config_version < '1.1.1': if config_version < '1.2.0':
data = download_json(url) data = download_json(url)
else: else:
data = download_json(url) data = download_json(url)
...@@ -33,12 +34,13 @@ def download_and_modify_json(url, local_filename, modifications): ...@@ -33,12 +34,13 @@ def download_and_modify_json(url, local_filename, modifications):
if __name__ == '__main__': if __name__ == '__main__':
mineru_patterns = [ mineru_patterns = [
"models/Layout/LayoutLMv3/*", # "models/Layout/LayoutLMv3/*",
"models/Layout/YOLO/*", "models/Layout/YOLO/*",
"models/MFD/YOLO/*", "models/MFD/YOLO/*",
"models/MFR/unimernet_small_2501/*", "models/MFR/unimernet_hf_small_2503/*",
"models/TabRec/TableMaster/*", "models/OCR/paddleocr/*",
"models/TabRec/StructEqTable/*", # "models/TabRec/TableMaster/*",
# "models/TabRec/StructEqTable/*",
] ]
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns) model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns)
...@@ -52,6 +54,12 @@ if __name__ == '__main__': ...@@ -52,6 +54,12 @@ if __name__ == '__main__':
print(f'model_dir is: {model_dir}') print(f'model_dir is: {model_dir}')
print(f'layoutreader_model_dir is: {layoutreader_model_dir}') print(f'layoutreader_model_dir is: {layoutreader_model_dir}')
paddleocr_model_dir = model_dir + '/OCR/paddleocr'
user_paddleocr_dir = os.path.expanduser('~/.paddleocr')
if os.path.exists(user_paddleocr_dir):
shutil.rmtree(user_paddleocr_dir)
shutil.copytree(paddleocr_model_dir, user_paddleocr_dir)
json_url = 'https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json' json_url = 'https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json'
config_file_name = 'magic-pdf.json' config_file_name = 'magic-pdf.json'
home_dir = os.path.expanduser('~') home_dir = os.path.expanduser('~')
......
...@@ -36,29 +36,32 @@ if __name__ == '__main__': ...@@ -36,29 +36,32 @@ if __name__ == '__main__':
"paddlepaddle==3.0.0b1;platform_system=='Linux'", "paddlepaddle==3.0.0b1;platform_system=='Linux'",
"paddlepaddle==2.6.1;platform_system=='Windows' or platform_system=='Darwin'", "paddlepaddle==2.6.1;platform_system=='Windows' or platform_system=='Darwin'",
], ],
"full": ["unimernet==0.2.3", # unimernet升级0.2.3,移除torchtext/eva-decord的依赖 "full": [
"torch>=2.2.2,<=2.3.1", # torch2.4.0及之后版本未测试,先卡住版本上限
"torchvision>=0.17.2,<=0.18.1", # torchvision 受torch版本约束
"matplotlib<=3.9.0;platform_system=='Windows'", # 3.9.1及之后不提供windows的预编译包,避免一些没有编译环境的windows设备安装失败 "matplotlib<=3.9.0;platform_system=='Windows'", # 3.9.1及之后不提供windows的预编译包,避免一些没有编译环境的windows设备安装失败
"matplotlib;platform_system=='Linux' or platform_system=='Darwin'", # linux 和 macos 不应限制matplotlib的最高版本,以避免无法更新导致的一些bug "matplotlib;platform_system=='Linux' or platform_system=='Darwin'", # linux 和 macos 不应限制matplotlib的最高版本,以避免无法更新导致的一些bug
"ultralytics>=8.3.48", # yolov8,公式检测 "ultralytics>=8.3.48", # yolov8,公式检测
"paddleocr==2.7.3", # 2.8.0及2.8.1版本与detectron2有冲突,需锁定2.7.3 "paddleocr==2.7.3", # 2.8.0及2.8.1版本与detectron2有冲突,需锁定2.7.3
"paddlepaddle==3.0.0rc1;platform_system=='Linux' or platform_system=='Darwin'", # 解决linux的段异常问题 "paddlepaddle==3.0.0rc1;platform_system=='Linux' or platform_system=='Darwin'", # 解决linux的段异常问题
"paddlepaddle==2.6.1;platform_system=='Windows'", # windows版本3.0.0效率下降,需锁定2.6.1 "paddlepaddle==2.6.1;platform_system=='Windows'", # windows版本3.0.0效率下降,需锁定2.6.1
"struct-eqtable==0.3.2", # 表格解析
"einops", # struct-eqtable依赖
"accelerate", # struct-eqtable依赖
"doclayout_yolo==0.0.2b1", # doclayout_yolo "doclayout_yolo==0.0.2b1", # doclayout_yolo
"rapidocr-paddle>=1.4.5,<2.0.0", # rapidocr-paddle "rapidocr-paddle>=1.4.5,<2.0.0", # rapidocr-paddle
"rapidocr_onnxruntime>=1.4.4,<2.0.0", "rapidocr_onnxruntime>=1.4.4,<2.0.0",
"rapid_table>=1.0.3,<2.0.0", # rapid_table "rapid_table>=1.0.3,<2.0.0", # rapid_table
"PyYAML", # yaml "PyYAML", # yaml
"ftfy"
"openai", # openai SDK "openai", # openai SDK
"detectron2"
], ],
"old_linux":[ "old_linux":[
"albumentations<=1.4.20", # 1.4.21引入的simsimd不支持2019年及更早的linux系统 "albumentations<=1.4.20", # 1.4.21引入的simsimd不支持2019年及更早的linux系统
] ],
"layoutlmv3":[
"detectron2"
],
"struct_eqtable":[
"struct-eqtable==0.3.2", # 表格解析
"einops", # struct-eqtable依赖
"accelerate", # struct-eqtable依赖
],
}, },
description="A practical tool for converting PDF to Markdown", # 简短描述 description="A practical tool for converting PDF to Markdown", # 简短描述
long_description=long_description, # 详细描述 long_description=long_description, # 详细描述
......
...@@ -183,6 +183,30 @@ ...@@ -183,6 +183,30 @@
"created_at": "2025-02-26T09:23:25Z", "created_at": "2025-02-26T09:23:25Z",
"repoId": 765083837, "repoId": 765083837,
"pullRequestNo": 1785 "pullRequestNo": 1785
},
{
"name": "rschutski",
"id": 179498169,
"comment_id": 2705150371,
"created_at": "2025-03-06T23:16:30Z",
"repoId": 765083837,
"pullRequestNo": 1863
},
{
"name": "qbit-",
"id": 4794088,
"comment_id": 2705914730,
"created_at": "2025-03-07T09:09:13Z",
"repoId": 765083837,
"pullRequestNo": 1863
},
{
"name": "mauryaland",
"id": 22381129,
"comment_id": 2717322316,
"created_at": "2025-03-12T10:03:11Z",
"repoId": 765083837,
"pullRequestNo": 1906
} }
] ]
} }
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment