Unverified Commit 9bb2d581 authored by Xiaomeng Zhao's avatar Xiaomeng Zhao Committed by GitHub
Browse files

Merge pull request #1661 from myhloli/dev

perf(model): optimize batch ratio for different GPU memory sizes
parents bd35e9d0 b1ac7afd
...@@ -178,18 +178,17 @@ def doc_analyze( ...@@ -178,18 +178,17 @@ def doc_analyze(
gpu_memory = int(os.getenv("VIRTUAL_VRAM_SIZE", round(get_vram(device)))) gpu_memory = int(os.getenv("VIRTUAL_VRAM_SIZE", round(get_vram(device))))
if gpu_memory is not None and gpu_memory >= 8: if gpu_memory is not None and gpu_memory >= 8:
if 8 <= gpu_memory < 10: if gpu_memory >= 40:
batch_ratio = 2 batch_ratio = 32
elif 10 <= gpu_memory <= 12: elif gpu_memory >=20:
batch_ratio = 4
elif 12 < gpu_memory <= 20:
batch_ratio = 8
elif 20 < gpu_memory <= 32:
batch_ratio = 16 batch_ratio = 16
elif gpu_memory >= 16:
batch_ratio = 8
elif gpu_memory >= 10:
batch_ratio = 4
else: else:
batch_ratio = 32 batch_ratio = 2
if batch_ratio >= 1:
logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}') logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio) batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
batch_analyze = True batch_analyze = True
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment