Commit 13c8d152 authored by Jeff Daily's avatar Jeff Daily
Browse files

skip failing tests on ROCm

parent 4ebf2b90
......@@ -87,6 +87,7 @@ class TestFusedAdam(TestFusedOptimizer):
self.ref_optim = torch.optim.Adam
self.fused_optim = apex.optimizers.FusedAdam
@skipIfRocm
def test_float(self):
self.gen_single_type_test(param_type=torch.float)
......@@ -102,6 +103,7 @@ class TestFusedAdam(TestFusedOptimizer):
self.max_abs_diff = 1e-2
self.gen_single_type_test(param_type=torch.bfloat16, apex_only=True)
@skipIfRocm
@unittest.skipIf(torch.cuda.device_count()<2, "more than 1 GPU required")
def test_multi_device(self):
devices = ("cuda:0", "cuda:1")
......@@ -194,6 +196,7 @@ class TestFusedAdagrad(TestFusedOptimizer):
self.ref_optim = torch.optim.Adagrad
self.fused_optim = apex.optimizers.FusedAdagrad
@skipIfRocm
def test_float(self):
self.gen_single_type_test(param_type=torch.float)
......@@ -201,6 +204,7 @@ class TestFusedAdagrad(TestFusedOptimizer):
def test_half(self):
self.gen_single_type_test(param_type=torch.float16)
@skipIfRocm
@unittest.skipIf(torch.cuda.device_count()<2, "more than 1 GPU required")
def test_multi_device(self):
devices = ("cuda:0", "cuda:1")
......@@ -209,6 +213,7 @@ class TestFusedAdagrad(TestFusedOptimizer):
self.gen_single_type_test(param_type=torch.float, device=tensor_dev)
@skipIfRocm
def test_multi_params(self):
sizes = [[4096, 1024], [4096], [4096, 2048], [32320, 1024], [1]]
adagrad_option = {"lr": 5e-4, "eps": 1e-08, "weight_decay": 0}
......
......@@ -228,6 +228,7 @@ class TestFusedLAMB(unittest.TestCase):
with torch.cuda.device(current_dev):
self.gen_single_type_test(param_type=torch.float, device=tensor_dev)
@skipIfRocm
def test_multi_params(self):
sizes = [[4096, 1024], [4096], [4096, 2048], [32320, 1024], [1]]
weight_decay = [0, 0.01]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment