Commit a5e41791 authored by myhloli's avatar myhloli
Browse files

fix: adjust batch sizes and improve performance settings in various modules

parent f7f35189
......@@ -12,6 +12,7 @@ from ...utils.ocr_utils import get_adjusted_mfdetrec_res, get_ocr_result_list, O
YOLO_LAYOUT_BASE_BATCH_SIZE = 8
MFD_BASE_BATCH_SIZE = 1
MFR_BASE_BATCH_SIZE = 16
OCR_DET_BASE_BATCH_SIZE = 16
class BatchAnalyze:
......@@ -170,9 +171,9 @@ class BatchAnalyze:
batch_images.append(padded_img)
# 批处理检测
batch_size = min(len(batch_images), self.batch_ratio * 16) # 增加批处理大小
# logger.debug(f"OCR-det batch: {batch_size} images, target size: {target_h}x{target_w}")
batch_results = ocr_model.text_detector.batch_predict(batch_images, batch_size)
det_batch_size = min(len(batch_images), self.batch_ratio * OCR_DET_BASE_BATCH_SIZE) # 增加批处理大小
# logger.debug(f"OCR-det batch: {det_batch_size} images, target size: {target_h}x{target_w}")
batch_results = ocr_model.text_detector.batch_predict(batch_images, det_batch_size)
# 处理批处理结果
for i, (crop_info, (dt_boxes, elapse)) in enumerate(zip(group_crops, batch_results)):
......
......@@ -74,10 +74,10 @@ def doc_analyze(
table_enable=True,
):
"""
适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能,可能会增加显存使用量
可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置,默认值为128
适当调大MIN_BATCH_INFERENCE_SIZE可以提高性能,更大的 MIN_BATCH_INFERENCE_SIZE会消耗更多内存
可通过环境变量MINERU_MIN_BATCH_INFERENCE_SIZE设置,默认值为384
"""
min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 128))
min_batch_inference_size = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 384))
# 收集所有页面信息
all_pages_info = [] # 存储(dataset_index, page_index, img, ocr, lang, width, height)
......
......@@ -115,7 +115,7 @@ class UnimernetModel(object):
mf_img = mf_img.to(dtype=self.model.dtype)
mf_img = mf_img.to(self.device)
with torch.no_grad():
output = self.model.generate({"image": mf_img})
output = self.model.generate({"image": mf_img}, batch_size=batch_size)
mfr_res.extend(output["fixed_str"])
# 更新进度条,每次增加batch_size,但要注意最后一个batch可能不足batch_size
......
......@@ -468,7 +468,7 @@ class UnimernetModel(VisionEncoderDecoderModel):
).loss
return {"loss": loss}
def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95):
def generate(self, samples, do_sample: bool = False, temperature: float = 0.2, top_p: float = 0.95, batch_size=64):
pixel_values = samples["image"]
num_channels = pixel_values.shape[1]
if num_channels == 1:
......@@ -478,7 +478,13 @@ class UnimernetModel(VisionEncoderDecoderModel):
if do_sample:
kwargs["temperature"] = temperature
kwargs["top_p"] = top_p
if self.tokenizer.tokenizer.model_max_length > 1152:
if batch_size <= 32:
self.tokenizer.tokenizer.model_max_length = 1152 # 6g
else:
self.tokenizer.tokenizer.model_max_length = 1344 # 8g
outputs = super().generate(
pixel_values=pixel_values,
max_new_tokens=self.tokenizer.tokenizer.model_max_length, # required
......
......@@ -88,7 +88,7 @@ class PytorchPaddleOCR(TextSystem):
kwargs['det_model_path'] = det_model_path
kwargs['rec_model_path'] = rec_model_path
kwargs['rec_char_dict_path'] = os.path.join(root_dir, 'pytorchocr', 'utils', 'resources', 'dict', dict_file)
# kwargs['rec_batch_num'] = 8
kwargs['rec_batch_num'] = 16
kwargs['device'] = device
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment