Unverified Commit 636d78a3 authored by Xiaomeng Zhao's avatar Xiaomeng Zhao Committed by GitHub
Browse files

Merge pull request #1593 from myhloli/dev

perf(magic_pdf): optimize batch ratio calculation for GPU
parents 6be32776 e74a2960
...@@ -19,7 +19,7 @@ from magic_pdf.model.sub_modules.ocr.paddleocr.ocr_utils import ( ...@@ -19,7 +19,7 @@ from magic_pdf.model.sub_modules.ocr.paddleocr.ocr_utils import (
get_adjusted_mfdetrec_res, get_ocr_result_list) get_adjusted_mfdetrec_res, get_ocr_result_list)
# from magic_pdf.operators.models import InferenceResult # from magic_pdf.operators.models import InferenceResult
YOLO_LAYOUT_BASE_BATCH_SIZE = 4 YOLO_LAYOUT_BASE_BATCH_SIZE = 1
MFD_BASE_BATCH_SIZE = 1 MFD_BASE_BATCH_SIZE = 1
MFR_BASE_BATCH_SIZE = 16 MFR_BASE_BATCH_SIZE = 16
...@@ -56,7 +56,8 @@ class BatchAnalyze: ...@@ -56,7 +56,8 @@ class BatchAnalyze:
layout_images.append(pil_img) layout_images.append(pil_img)
images_layout_res += self.model.layout_model.batch_predict( images_layout_res += self.model.layout_model.batch_predict(
layout_images, self.batch_ratio * YOLO_LAYOUT_BASE_BATCH_SIZE # layout_images, self.batch_ratio * YOLO_LAYOUT_BASE_BATCH_SIZE
layout_images, YOLO_LAYOUT_BASE_BATCH_SIZE
) )
for image_index, useful_list in modified_images: for image_index, useful_list in modified_images:
...@@ -78,7 +79,8 @@ class BatchAnalyze: ...@@ -78,7 +79,8 @@ class BatchAnalyze:
# 公式检测 # 公式检测
mfd_start_time = time.time() mfd_start_time = time.time()
images_mfd_res = self.model.mfd_model.batch_predict( images_mfd_res = self.model.mfd_model.batch_predict(
images, self.batch_ratio * MFD_BASE_BATCH_SIZE # images, self.batch_ratio * MFD_BASE_BATCH_SIZE
images, MFD_BASE_BATCH_SIZE
) )
logger.info( logger.info(
f'mfd time: {round(time.time() - mfd_start_time, 2)}, image num: {len(images)}' f'mfd time: {round(time.time() - mfd_start_time, 2)}, image num: {len(images)}'
......
...@@ -175,13 +175,9 @@ def doc_analyze( ...@@ -175,13 +175,9 @@ def doc_analyze(
npu_support = True npu_support = True
if torch.cuda.is_available() and device != 'cpu' or npu_support: if torch.cuda.is_available() and device != 'cpu' or npu_support:
gpu_memory = get_vram(device) gpu_memory = int(os.getenv("VIRTUAL_VRAM_SIZE", round(get_vram(device))))
if gpu_memory is not None and gpu_memory >= 7: if gpu_memory is not None and gpu_memory >= 8:
# batch_ratio = int((gpu_memory-3) // 1.5) batch_ratio = int(gpu_memory-5)
batch_ratio = 2
if 8 < gpu_memory:
batch_ratio = 4
if batch_ratio >= 1: if batch_ratio >= 1:
logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}') logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio) batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
......
...@@ -89,7 +89,7 @@ class UnimernetModel(object): ...@@ -89,7 +89,7 @@ class UnimernetModel(object):
mf_image_list.append(bbox_img) mf_image_list.append(bbox_img)
dataset = MathDataset(mf_image_list, transform=self.mfr_transform) dataset = MathDataset(mf_image_list, transform=self.mfr_transform)
dataloader = DataLoader(dataset, batch_size=64, num_workers=0) dataloader = DataLoader(dataset, batch_size=32, num_workers=0)
mfr_res = [] mfr_res = []
for mf_img in dataloader: for mf_img in dataloader:
mf_img = mf_img.to(self.device) mf_img = mf_img.to(self.device)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment