Commit b1ac7afd authored by myhloli's avatar myhloli
Browse files

perf(model): optimize batch ratio for different GPU memory sizes

- Update batch ratio calculation logic to better utilize available GPU memory
- Improve logging for all GPU memory sizes
parent 29e7a948
...@@ -178,18 +178,17 @@ def doc_analyze( ...@@ -178,18 +178,17 @@ def doc_analyze(
gpu_memory = int(os.getenv("VIRTUAL_VRAM_SIZE", round(get_vram(device)))) gpu_memory = int(os.getenv("VIRTUAL_VRAM_SIZE", round(get_vram(device))))
if gpu_memory is not None and gpu_memory >= 8: if gpu_memory is not None and gpu_memory >= 8:
if 8 <= gpu_memory < 10: if gpu_memory >= 40:
batch_ratio = 2 batch_ratio = 32
elif 10 <= gpu_memory <= 12: elif gpu_memory >=20:
batch_ratio = 4
elif 12 < gpu_memory <= 20:
batch_ratio = 8
elif 20 < gpu_memory <= 32:
batch_ratio = 16 batch_ratio = 16
elif gpu_memory >= 16:
batch_ratio = 8
elif gpu_memory >= 10:
batch_ratio = 4
else: else:
batch_ratio = 32 batch_ratio = 2
if batch_ratio >= 1:
logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}') logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio) batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
batch_analyze = True batch_analyze = True
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment