"sgl-kernel/python/vscode:/vscode.git/clone" did not exist on "ca63f075b7d8cef11576bc2f8d3f2ebf32228604"
Commit 2684e775 authored by myhloli's avatar myhloli
Browse files

fix(npu): correct module name for NPU operations

- Update `clean_memory.py` to use `torch_npu.npu` instead of `torch.npu`
- Update `model_utils.py` to use `torch_npu.npu` instead of `torch.npu`
- Simplify NPU availability check and bfloat16 support in `pdf_parse_union_core_v2.py`
parent 2e87e649
......@@ -10,7 +10,6 @@ def clean_memory(device='cuda'):
torch.cuda.ipc_collect()
elif str(device).startswith("npu"):
import torch_npu
if torch.npu.is_available():
torch_npu.empty_cache()
torch_npu.ipc_collect()
if torch_npu.npu.is_available():
torch_npu.npu.empty_cache()
gc.collect()
\ No newline at end of file
......@@ -56,8 +56,8 @@ def get_vram(device):
return total_memory
elif str(device).startswith("npu"):
import torch_npu
if torch.npu.is_available():
total_memory = torch.npu.get_device_properties(device).total_memory / (1024 ** 3) # 转为 GB
if torch_npu.npu.is_available():
total_memory = torch_npu.npu.get_device_properties(device).total_memory / (1024 ** 3) # 转为 GB
return total_memory
else:
return None
\ No newline at end of file
......@@ -286,12 +286,9 @@ def model_init(model_name: str):
supports_bfloat16 = False
elif str(device).startswith("npu"):
import torch_npu
if torch.npu.is_available():
if torch_npu.npu.is_available():
device = torch.device('npu')
if torch.npu.is_bf16_supported():
supports_bfloat16 = True
else:
supports_bfloat16 = False
supports_bfloat16 = False
else:
device = torch.device('cpu')
supports_bfloat16 = False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment