"git@developer.sourcefind.cn:OpenDAS/bitsandbytes.git" did not exist on "97073cdb8a78618b8a56f51a9495254b645fd085"
Unverified Commit 364cb718 authored by Andrei Ivanov's avatar Andrei Ivanov Committed by GitHub
Browse files

Skip test when atomic operations are not supported on GPU. (#7117)

parent 938deec8
...@@ -407,22 +407,22 @@ def test_segment_mm(idtype, feat_size, dtype, tol): ...@@ -407,22 +407,22 @@ def test_segment_mm(idtype, feat_size, dtype, tol):
def test_gather_mm_idx_b(feat_size, dtype, tol): def test_gather_mm_idx_b(feat_size, dtype, tol):
if F._default_context_str == "cpu" and dtype == torch.float16: if F._default_context_str == "cpu" and dtype == torch.float16:
pytest.skip("float16 is not supported on CPU.") pytest.skip("float16 is not supported on CPU.")
if (
F._default_context_str == "gpu"
and dtype == torch.bfloat16
and not torch.cuda.is_bf16_supported()
):
pytest.skip("BF16 is not supported.")
if ( if F._default_context_str == "gpu":
F._default_context_str == "gpu" if dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
and dtype == torch.float16 pytest.skip("BF16 is not supported.")
and torch.cuda.get_device_capability() < (7, 0)
): if (
pytest.skip( dtype == torch.float16
f"FP16 is not supported for atomic operations on GPU with " and torch.cuda.get_device_capability() < (7, 0)
f"cuda capability ({torch.cuda.get_device_capability()})." ) or (
) dtype == torch.bfloat16
and torch.cuda.get_device_capability() < (8, 0)
):
pytest.skip(
f"{dtype} is not supported for atomic operations on GPU with "
f"cuda capability ({torch.cuda.get_device_capability()})."
)
dev = F.ctx() dev = F.ctx()
# input # input
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment