Unverified Commit 07f6ba72 authored by Xiaomeng Zhao's avatar Xiaomeng Zhao Committed by GitHub
Browse files

Merge pull request #3139 from opendatalab/release-2.1.2

Release 2.1.2
parents 2bf2337e 973443d6
...@@ -18,9 +18,9 @@ jobs: ...@@ -18,9 +18,9 @@ jobs:
steps: steps:
- name: "CLA Assistant" - name: "CLA Assistant"
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
uses: contributor-assistant/github-action@v2.5.0 uses: contributor-assistant/github-action@v2.6.1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# the below token should have repo scope and must be manually added by you in the repository's secret # the below token should have repo scope and must be manually added by you in the repository's secret
# This token is required only if you have configured to store the signatures in a remote repository/organization # This token is required only if you have configured to store the signatures in a remote repository/organization
PERSONAL_ACCESS_TOKEN: ${{ secrets.RELEASE_TOKEN }} PERSONAL_ACCESS_TOKEN: ${{ secrets.RELEASE_TOKEN }}
...@@ -28,7 +28,7 @@ jobs: ...@@ -28,7 +28,7 @@ jobs:
path-to-signatures: 'signatures/version1/cla.json' path-to-signatures: 'signatures/version1/cla.json'
path-to-document: 'https://github.com/opendatalab/MinerU/blob/master/MinerU_CLA.md' # e.g. a CLA or a DCO document path-to-document: 'https://github.com/opendatalab/MinerU/blob/master/MinerU_CLA.md' # e.g. a CLA or a DCO document
# branch should not be protected # branch should not be protected
branch: 'master' branch: 'cla'
allowlist: myhloli,dt-yy,Focusshang,renpengli01,icecraft,drunkpig,wangbinDL,qiangqiang199,GDDGCZ518,papayalove,conghui,quyuan,LollipopsAndWine,Sidney233 allowlist: myhloli,dt-yy,Focusshang,renpengli01,icecraft,drunkpig,wangbinDL,qiangqiang199,GDDGCZ518,papayalove,conghui,quyuan,LollipopsAndWine,Sidney233
# the followings are the optional inputs - If the optional inputs are not given, then default values will be taken # the followings are the optional inputs - If the optional inputs are not given, then default values will be taken
......
...@@ -43,7 +43,10 @@ ...@@ -43,7 +43,10 @@
</div> </div>
# Changelog # Changelog
- 2025/07/22 2.1.2 Released
- Bug Fixes
- Fixed the issue of excessive memory consumption during the `MFR` step in the `pipeline` backend under certain scenarios #2771
- Fixed the inaccurate matching between `image`/`table` and `caption`/`footnote` under certain conditions #3129
- 2025/07/16 2.1.1 Released - 2025/07/16 2.1.1 Released
- Bug fixes - Bug fixes
- Fixed text block content loss issue that could occur in certain `pipeline` scenarios #3005 - Fixed text block content loss issue that could occur in certain `pipeline` scenarios #3005
......
...@@ -43,6 +43,10 @@ ...@@ -43,6 +43,10 @@
</div> </div>
# 更新记录 # 更新记录
- 2025/07/22 2.1.2发布
- bug修复
- 修复`pipeline`后端中`MFR`步骤在某些情况下显存消耗过大的问题 #2771
- 修复某些情况下`image`/`table``caption`/`footnote`匹配不准确的问题 #3129
- 2025/07/16 2.1.1发布 - 2025/07/16 2.1.1发布
- bug修复 - bug修复
- 修复`pipeline`在某些情况可能发生的文本块内容丢失问题 #3005 - 修复`pipeline`在某些情况可能发生的文本块内容丢失问题 #3005
......
...@@ -12,6 +12,7 @@ from ...utils.ocr_utils import get_adjusted_mfdetrec_res, get_ocr_result_list, O ...@@ -12,6 +12,7 @@ from ...utils.ocr_utils import get_adjusted_mfdetrec_res, get_ocr_result_list, O
YOLO_LAYOUT_BASE_BATCH_SIZE = 8 YOLO_LAYOUT_BASE_BATCH_SIZE = 8
MFD_BASE_BATCH_SIZE = 1 MFD_BASE_BATCH_SIZE = 1
MFR_BASE_BATCH_SIZE = 16 MFR_BASE_BATCH_SIZE = 16
OCR_DET_BASE_BATCH_SIZE = 16
class BatchAnalyze: class BatchAnalyze:
...@@ -170,9 +171,9 @@ class BatchAnalyze: ...@@ -170,9 +171,9 @@ class BatchAnalyze:
batch_images.append(padded_img) batch_images.append(padded_img)
# 批处理检测 # 批处理检测
batch_size = min(len(batch_images), self.batch_ratio * 16) # 增加批处理大小 det_batch_size = min(len(batch_images), self.batch_ratio * OCR_DET_BASE_BATCH_SIZE) # 增加批处理大小
# logger.debug(f"OCR-det batch: {batch_size} images, target size: {target_h}x{target_w}") # logger.debug(f"OCR-det batch: {det_batch_size} images, target size: {target_h}x{target_w}")
batch_results = ocr_model.text_detector.batch_predict(batch_images, batch_size) batch_results = ocr_model.text_detector.batch_predict(batch_images, det_batch_size)
# 处理批处理结果 # 处理批处理结果
for i, (crop_info, (dt_boxes, elapse)) in enumerate(zip(group_crops, batch_results)): for i, (crop_info, (dt_boxes, elapse)) in enumerate(zip(group_crops, batch_results)):
......
...@@ -74,10 +74,10 @@ def doc_analyze( ...@@ -74,10 +74,10 @@ def doc_analyze(
table_enable=True, table_enable=True,
): ):
""" """
适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能,可能会增加显存使用量 适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能,更大的 MIN_BATCH_INFERENCE_SIZE会消耗更多内存
可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置,默认值为128 可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置,默认值为384
""" """
min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 128)) min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 384))
# 收集所有页面信息 # 收集所有页面信息
all_pages_info = [] # 存储(dataset_index, page_index, img, ocr, lang, width, height) all_pages_info = [] # 存储(dataset_index, page_index, img, ocr, lang, width, height)
......
...@@ -275,7 +275,8 @@ class MagicModel: ...@@ -275,7 +275,8 @@ class MagicModel:
fst_idx, fst_kind, left_x, top_y = candidates[0] fst_idx, fst_kind, left_x, top_y = candidates[0]
candidates.sort(key=lambda x: (x[2] - left_x) ** 2 + (x[3] - top_y)**2) fst_bbox = subjects[fst_idx]['bbox'] if fst_kind == SUB_BIT_KIND else objects[fst_idx - OBJ_IDX_OFFSET]['bbox']
candidates.sort(key=lambda x: bbox_distance(fst_bbox, subjects[x[0]]['bbox']) if x[1] == SUB_BIT_KIND else bbox_distance(fst_bbox, objects[x[0] - OBJ_IDX_OFFSET]['bbox']))
nxt = None nxt = None
for i in range(1, len(candidates)): for i in range(1, len(candidates)):
...@@ -294,7 +295,8 @@ class MagicModel: ...@@ -294,7 +295,8 @@ class MagicModel:
pair_dis = bbox_distance(subjects[sub_idx]['bbox'], objects[obj_idx]['bbox']) pair_dis = bbox_distance(subjects[sub_idx]['bbox'], objects[obj_idx]['bbox'])
nearest_dis = float('inf') nearest_dis = float('inf')
for i in range(N): for i in range(N):
if i in seen_idx or i == sub_idx:continue # 取消原先算法中 1对1 匹配的偏置
# if i in seen_idx or i == sub_idx:continue
nearest_dis = min(nearest_dis, bbox_distance(subjects[i]['bbox'], objects[obj_idx]['bbox'])) nearest_dis = min(nearest_dis, bbox_distance(subjects[i]['bbox'], objects[obj_idx]['bbox']))
if pair_dis >= 3*nearest_dis: if pair_dis >= 3*nearest_dis:
......
...@@ -115,7 +115,7 @@ class UnimernetModel(object): ...@@ -115,7 +115,7 @@ class UnimernetModel(object):
mf_img = mf_img.to(dtype=self.model.dtype) mf_img = mf_img.to(dtype=self.model.dtype)
mf_img = mf_img.to(self.device) mf_img = mf_img.to(self.device)
with torch.no_grad(): with torch.no_grad():
output = self.model.generate({"image": mf_img}) output = self.model.generate({"image": mf_img}, batch_size=batch_size)
mfr_res.extend(output["fixed_str"]) mfr_res.extend(output["fixed_str"])
# 更新进度条,每次增加batch_size,但要注意最后一个batch可能不足batch_size # 更新进度条,每次增加batch_size,但要注意最后一个batch可能不足batch_size
......
...@@ -468,7 +468,7 @@ class UnimernetModel(VisionEncoderDecoderModel): ...@@ -468,7 +468,7 @@ class UnimernetModel(VisionEncoderDecoderModel):
).loss ).loss
return {"loss": loss} return {"loss": loss}
def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95): def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95, batch_size=64):
pixel_values = samples["image"] pixel_values = samples["image"]
num_channels = pixel_values.shape[1] num_channels = pixel_values.shape[1]
if num_channels == 1: if num_channels == 1:
...@@ -478,7 +478,13 @@ class UnimernetModel(VisionEncoderDecoderModel): ...@@ -478,7 +478,13 @@ class UnimernetModel(VisionEncoderDecoderModel):
if do_sample: if do_sample:
kwargs["temperature"] = temperature kwargs["temperature"] = temperature
kwargs["top_p"] = top_p kwargs["top_p"] = top_p
if self.tokenizer.tokenizer.model_max_length > 1152:
if batch_size <= 32:
self.tokenizer.tokenizer.model_max_length = 1152 # 6g
else:
self.tokenizer.tokenizer.model_max_length = 1344 # 8g
outputs = super().generate( outputs = super().generate(
pixel_values=pixel_values, pixel_values=pixel_values,
max_new_tokens=self.tokenizer.tokenizer.model_max_length, # required max_new_tokens=self.tokenizer.tokenizer.model_max_length, # required
......
...@@ -88,7 +88,7 @@ class PytorchPaddleOCR(TextSystem): ...@@ -88,7 +88,7 @@ class PytorchPaddleOCR(TextSystem):
kwargs['det_model_path'] = det_model_path kwargs['det_model_path'] = det_model_path
kwargs['rec_model_path'] = rec_model_path kwargs['rec_model_path'] = rec_model_path
kwargs['rec_char_dict_path'] = os.path.join(root_dir, 'pytorchocr', 'utils', 'resources', 'dict', dict_file) kwargs['rec_char_dict_path'] = os.path.join(root_dir, 'pytorchocr', 'utils', 'resources', 'dict', dict_file)
# kwargs['rec_batch_num'] = 8 kwargs['rec_batch_num'] = 16
kwargs['device'] = device kwargs['device'] = device
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment