Commit 052a4d72 authored by myhloli's avatar myhloli
Browse files

perf(magic_pdf): optimize batch ratio calculation for GPU

- Update GPU memory check and batch ratio calculation logic
- Add support for virtual VRAM size environment variable
- Improve logging for GPU memory and batch ratio
parent 49d140c5
......@@ -175,9 +175,9 @@ def doc_analyze(
npu_support = True
if torch.cuda.is_available() and device != 'cpu' or npu_support:
gpu_memory = get_vram(device)
if gpu_memory is not None and gpu_memory >= 7.5:
batch_ratio = int((gpu_memory-5) // 1)
gpu_memory = int(os.getenv("virtual_vram_size", round(get_vram(device))))
if gpu_memory is not None and gpu_memory >= 8:
batch_ratio = int(gpu_memory-5)
if batch_ratio >= 1:
logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment