Unverified Commit 76e4e054 authored by Chaitanya Sri Krishna Lolla's avatar Chaitanya Sri Krishna Lolla Committed by GitHub
Browse files

Merge pull request #41 from lcskrishna/cl/skip-tests

Skip the unit tests
parents 663d5a4d 41bbf93c
...@@ -103,6 +103,7 @@ class TestMultiTensorAxpby(unittest.TestCase): ...@@ -103,6 +103,7 @@ class TestMultiTensorAxpby(unittest.TestCase):
# self.assertTrue(self.overflow_buf.item()) # self.assertTrue(self.overflow_buf.item())
@unittest.skipIf(disabled, "amp_C is unavailable") @unittest.skipIf(disabled, "amp_C is unavailable")
@skipIfRocm
def test_fuzz(self): def test_fuzz(self):
input_size_pairs = ( input_size_pairs = (
(7777*77, 555*555), (7777*77, 555*555),
......
...@@ -11,6 +11,8 @@ import torch.nn.functional as F ...@@ -11,6 +11,8 @@ import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\ from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
from apex.testing.common_utils import skipIfRocm
try: try:
import amp_C import amp_C
from amp_C import multi_tensor_scale from amp_C import multi_tensor_scale
...@@ -88,6 +90,7 @@ class TestMultiTensorScale(unittest.TestCase): ...@@ -88,6 +90,7 @@ class TestMultiTensorScale(unittest.TestCase):
# self.downscale(self.fp32, self.fp16, self.fp16_ref) # self.downscale(self.fp32, self.fp16, self.fp16_ref)
@unittest.skipIf(disabled, "amp_C is unavailable") @unittest.skipIf(disabled, "amp_C is unavailable")
@skipIfRocm
def test_fuzz(self): def test_fuzz(self):
input_size_pairs = ( input_size_pairs = (
(7777*77, 555*555), (7777*77, 555*555),
......
...@@ -2,7 +2,7 @@ import unittest ...@@ -2,7 +2,7 @@ import unittest
import apex import apex
import torch import torch
from apex.testing.common_utils import skipIfRocm
class TestFusedAdagrad(unittest.TestCase): class TestFusedAdagrad(unittest.TestCase):
def setUp(self, max_abs_diff=1e-6, max_rel_diff=1, iters=7): def setUp(self, max_abs_diff=1e-6, max_rel_diff=1, iters=7):
...@@ -78,6 +78,7 @@ class TestFusedAdagrad(unittest.TestCase): ...@@ -78,6 +78,7 @@ class TestFusedAdagrad(unittest.TestCase):
if not apex_only: if not apex_only:
self.assertLessEqual(max_rel_diff, self.max_rel_diff) self.assertLessEqual(max_rel_diff, self.max_rel_diff)
@skipIfRocm
def test_float(self): def test_float(self):
self.gen_single_type_test(param_type=torch.float) self.gen_single_type_test(param_type=torch.float)
...@@ -89,10 +90,12 @@ class TestFusedAdagrad(unittest.TestCase): ...@@ -89,10 +90,12 @@ class TestFusedAdagrad(unittest.TestCase):
# Uses apex optimizers(controlled by apex_only flag) for both types. # Uses apex optimizers(controlled by apex_only flag) for both types.
# Doesn't use upstream optimizer like other tests as they seem to be # Doesn't use upstream optimizer like other tests as they seem to be
# numerically unstable for half types(see skip note for test above). # numerically unstable for half types(see skip note for test above).
@skipIfRocm
def test_bfloat16(self): def test_bfloat16(self):
self.max_abs_diff = 1e-2 self.max_abs_diff = 1e-2
self.gen_single_type_test(param_type=torch.bfloat16, apex_only=True) self.gen_single_type_test(param_type=torch.bfloat16, apex_only=True)
@skipIfRocm
def test_multi_params(self): def test_multi_params(self):
sizes = [[4096, 1024], [4096], [4096, 2048], [32320, 1024], [1]] sizes = [[4096, 1024], [4096], [4096, 2048], [32320, 1024], [1]]
adagrad_option = {"lr": 5e-4, "eps": 1e-08, "weight_decay": 0} adagrad_option = {"lr": 5e-4, "eps": 1e-08, "weight_decay": 0}
......
...@@ -5,6 +5,8 @@ import random ...@@ -5,6 +5,8 @@ import random
import torch import torch
import apex import apex
from apex.testing.common_utils import skipIfRocm
class TestFusedAdam(unittest.TestCase): class TestFusedAdam(unittest.TestCase):
def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7): def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7):
self.max_abs_diff = max_abs_diff self.max_abs_diff = max_abs_diff
...@@ -77,6 +79,7 @@ class TestFusedAdam(unittest.TestCase): ...@@ -77,6 +79,7 @@ class TestFusedAdam(unittest.TestCase):
if not apex_only: if not apex_only:
self.assertLessEqual(max_rel_diff, self.max_rel_diff) self.assertLessEqual(max_rel_diff, self.max_rel_diff)
@skipIfRocm
def test_float(self): def test_float(self):
self.gen_single_type_test(param_type=torch.float) self.gen_single_type_test(param_type=torch.float)
...@@ -87,6 +90,7 @@ class TestFusedAdam(unittest.TestCase): ...@@ -87,6 +90,7 @@ class TestFusedAdam(unittest.TestCase):
# Uses apex optimizers(controlled by apex_only flag) for both types. # Uses apex optimizers(controlled by apex_only flag) for both types.
# Doesn't use upstream optimizer like other tests as they seem to be # Doesn't use upstream optimizer like other tests as they seem to be
# numerically unstable for half types # numerically unstable for half types
@skipIfRocm
def test_bfloat16(self): def test_bfloat16(self):
self.max_abs_diff = 1e-2 self.max_abs_diff = 1e-2
self.gen_single_type_test(param_type=torch.bfloat16, apex_only=True) self.gen_single_type_test(param_type=torch.bfloat16, apex_only=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment