Commit 6ab9e0ab authored by wenjh's avatar wenjh
Browse files

Fix missing IS_HIP_EXTENSION


Signed-off-by: wenjh's avatarwenjh <wenjh@sugon.com>
parent a7830f2f
...@@ -40,6 +40,7 @@ from transformer_engine.pytorch.triton.per_token_group_quant import (per_token_q ...@@ -40,6 +40,7 @@ from transformer_engine.pytorch.triton.per_token_group_quant import (per_token_q
tensorwise_dequantize_float, tensorwise_dequantize_float,
tensorwise_dequantize_float_add) tensorwise_dequantize_float_add)
from transformer_engine.pytorch.utils import get_device_compute_capability from transformer_engine.pytorch.utils import get_device_compute_capability
from torch.utils.cpp_extension import IS_HIP_EXTENSION
from transformer_engine.pytorch.quantization import int8_simulation_fp8, int8_simulation_fp8_tensorwise from transformer_engine.pytorch.quantization import int8_simulation_fp8, int8_simulation_fp8_tensorwise
int8_simulation_fp8_tensorwise_batched = bool(int(os.getenv("NVTE_INT8_SIM_FP8_TENSORWISE_BATCHED", "0"))) int8_simulation_fp8_tensorwise_batched = bool(int(os.getenv("NVTE_INT8_SIM_FP8_TENSORWISE_BATCHED", "0")))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment