Unverified Commit 364cb718 authored by Andrei Ivanov's avatar Andrei Ivanov Committed by GitHub
Browse files

Skip test when atomic operations are not supported on GPU. (#7117)

parent 938deec8
......@@ -407,22 +407,22 @@ def test_segment_mm(idtype, feat_size, dtype, tol):
def test_gather_mm_idx_b(feat_size, dtype, tol):
if F._default_context_str == "cpu" and dtype == torch.float16:
pytest.skip("float16 is not supported on CPU.")
if (
F._default_context_str == "gpu"
and dtype == torch.bfloat16
and not torch.cuda.is_bf16_supported()
):
pytest.skip("BF16 is not supported.")
if (
F._default_context_str == "gpu"
and dtype == torch.float16
and torch.cuda.get_device_capability() < (7, 0)
):
pytest.skip(
f"FP16 is not supported for atomic operations on GPU with "
f"cuda capability ({torch.cuda.get_device_capability()})."
)
if F._default_context_str == "gpu":
if dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
pytest.skip("BF16 is not supported.")
if (
dtype == torch.float16
and torch.cuda.get_device_capability() < (7, 0)
) or (
dtype == torch.bfloat16
and torch.cuda.get_device_capability() < (8, 0)
):
pytest.skip(
f"{dtype} is not supported for atomic operations on GPU with "
f"cuda capability ({torch.cuda.get_device_capability()})."
)
dev = F.ctx()
# input
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment