Commit 104273cc authored by myhloli's avatar myhloli
Browse files

fix(vram): improve VRAM checking logic

- Update VRAM checking logic in app.py and model_utils.py
- Add None and type checks for VRAM values
- Adjust concurrency limit calculation in app.py
- Modify clean_vram function to handle cases with no VRAM information
parent b1fe9d4f
......@@ -43,7 +43,7 @@ def get_res_list_from_layout_res(layout_res):
def clean_vram(device, vram_threshold=8):
total_memory = get_vram(device)
if total_memory <= vram_threshold:
if total_memory and total_memory <= vram_threshold:
gc_start = time.time()
clean_memory()
gc_time = round(time.time() - gc_start, 2)
......@@ -54,4 +54,4 @@ def get_vram(device):
if torch.cuda.is_available() and device != 'cpu':
total_memory = torch.cuda.get_device_properties(device).total_memory / (1024 ** 3) # 将字节转换为 GB
return total_memory
return 0
\ No newline at end of file
return None
\ No newline at end of file
......@@ -187,8 +187,9 @@ def to_pdf(file_path):
def get_concurrency_limit(vram_threshold=7.5):
vram = get_vram(device = get_device())
concurrency_limit = int(vram // vram_threshold)
if concurrency_limit < 1:
if vram is not None and isinstance(vram, (int, float)):
concurrency_limit = max(1, int(vram // vram_threshold))
else:
concurrency_limit = 1
# logger.info(f'concurrency_limit: {concurrency_limit}')
return concurrency_limit
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment