Unverified Commit 364cb718 authored by Andrei Ivanov's avatar Andrei Ivanov Committed by GitHub
Browse files

Skip test when atomic operations are not supported on GPU. (#7117)

parent 938deec8
...@@ -407,20 +407,20 @@ def test_segment_mm(idtype, feat_size, dtype, tol): ...@@ -407,20 +407,20 @@ def test_segment_mm(idtype, feat_size, dtype, tol):
def test_gather_mm_idx_b(feat_size, dtype, tol): def test_gather_mm_idx_b(feat_size, dtype, tol):
if F._default_context_str == "cpu" and dtype == torch.float16: if F._default_context_str == "cpu" and dtype == torch.float16:
pytest.skip("float16 is not supported on CPU.") pytest.skip("float16 is not supported on CPU.")
if (
F._default_context_str == "gpu" if F._default_context_str == "gpu":
and dtype == torch.bfloat16 if dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
and not torch.cuda.is_bf16_supported()
):
pytest.skip("BF16 is not supported.") pytest.skip("BF16 is not supported.")
if ( if (
F._default_context_str == "gpu" dtype == torch.float16
and dtype == torch.float16
and torch.cuda.get_device_capability() < (7, 0) and torch.cuda.get_device_capability() < (7, 0)
) or (
dtype == torch.bfloat16
and torch.cuda.get_device_capability() < (8, 0)
): ):
pytest.skip( pytest.skip(
f"FP16 is not supported for atomic operations on GPU with " f"{dtype} is not supported for atomic operations on GPU with "
f"cuda capability ({torch.cuda.get_device_capability()})." f"cuda capability ({torch.cuda.get_device_capability()})."
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment