Commit aee9f00d authored by hubertlu's avatar hubertlu
Browse files

Revert "Enable MLP unit tests on ROCm"

This reverts commit 964e61f1.
parent 964e61f1
......@@ -18,6 +18,7 @@ class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
@skipIfRocm
def test_numeric(self):
mlp = MLP(mlp_sizes).cuda()
......@@ -52,6 +53,7 @@ class TestMLP(unittest.TestCase):
ref_mlp[0].bias.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
@skipIfRocm
def test_no_bias(self):
for use_activation in ['none', 'relu', 'sigmoid']:
mlp = MLP(mlp_sizes, bias=False, activation=use_activation).cuda()
......@@ -89,6 +91,7 @@ class TestMLP(unittest.TestCase):
ref_mlp[0].weight.grad.detach().cpu().numpy(),
atol=1e-7, rtol=100)
@skipIfRocm
def test_with_bias(self):
for use_activation in ['none', 'relu', 'sigmoid']:
mlp = MLP(mlp_sizes, bias=True, activation=use_activation).cuda()
......@@ -131,6 +134,7 @@ class TestMLP(unittest.TestCase):
ref_mlp[0].bias.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
@skipIfRocm
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
......@@ -161,6 +165,7 @@ class TestMLP(unittest.TestCase):
ref_mlp[0].weight.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
@skipIfRocm
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
......@@ -190,7 +195,7 @@ class TestMLP(unittest.TestCase):
mlp.zero_grad()
test_loss.backward()
#torch.cuda.profiler.start()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
......@@ -212,7 +217,7 @@ class TestMLP(unittest.TestCase):
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
#torch.cuda.profiler.stop()
torch.cuda.profiler.stop()
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment