Unverified Commit 08217121 authored by moto's avatar moto Committed by GitHub
Browse files

Get rid of dynamic test suite generation (#716)

`type` used in `common_utils` generates test class definition in `common_utils` and
this modifies the module state after it's imported. This is anti-pattern.
This PR get rid of the related utility functions and define test suite manually.
parent 50939b75
import os import os
import tempfile import tempfile
import unittest import unittest
from typing import Type, Iterable, Union from typing import Iterable, Union
from contextlib import contextmanager from contextlib import contextmanager
from shutil import copytree from shutil import copytree
...@@ -88,35 +88,7 @@ class TestBaseMixin: ...@@ -88,35 +88,7 @@ class TestBaseMixin:
device = None device = None
_SKIP_IF_NO_CUDA = unittest.skipIf(not torch.cuda.is_available(), reason='CUDA not available') skipIfNoCuda = unittest.skipIf(not torch.cuda.is_available(), reason='CUDA not available')
def define_test_suite(testbase: Type[TestBaseMixin], dtype: str, device: str):
if dtype not in ['float32', 'float64']:
raise NotImplementedError(f'Unexpected dtype: {dtype}')
if device not in ['cpu', 'cuda']:
raise NotImplementedError(f'Unexpected device: {device}')
name = f'Test{testbase.__name__}_{device.upper()}_{dtype.capitalize()}'
attrs = {'dtype': getattr(torch, dtype), 'device': torch.device(device)}
testsuite = type(name, (testbase, TestCase), attrs)
if device == 'cuda':
testsuite = _SKIP_IF_NO_CUDA(testsuite)
return testsuite
def define_test_suites(
scope: dict,
testbases: Iterable[Type[TestBaseMixin]],
dtypes: Iterable[str] = ('float32', 'float64'),
devices: Iterable[str] = ('cpu', 'cuda'),
):
for suite in testbases:
for device in devices:
for dtype in dtypes:
t = define_test_suite(suite, dtype, device)
scope[t.__name__] = t
def common_test_class_parameters( def common_test_class_parameters(
......
...@@ -7,35 +7,17 @@ import torchaudio.functional as F ...@@ -7,35 +7,17 @@ import torchaudio.functional as F
import pytest import pytest
from . import common_utils from . import common_utils
from .functional_impl import Lfilter
class Lfilter(common_utils.TestBaseMixin): class TestLFilterFloat32(Lfilter, common_utils.TestCase):
def test_simple(self): dtype = torch.float32
""" device = torch.device('cpu')
Create a very basic signal,
Then make a simple 4th order delay
The output should be same as the input but shifted
"""
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)
self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)
def test_clamp(self):
input_signal = torch.ones(1, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([1, 0], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, -0.95], dtype=self.dtype, device=self.device)
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=True)
assert output_signal.max() <= 1
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=False)
assert output_signal.max() > 1
common_utils.define_test_suites(globals(), [Lfilter]) class TestLFilterFloat64(Lfilter, common_utils.TestCase):
dtype = torch.float64
device = torch.device('cpu')
class TestComputeDeltas(unittest.TestCase): class TestComputeDeltas(unittest.TestCase):
......
import torch
from . import common_utils
from .functional_impl import Lfilter
@common_utils.skipIfNoCuda
class TestLFilterFloat32(Lfilter, common_utils.TestCase):
dtype = torch.float32
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestLFilterFloat64(Lfilter, common_utils.TestCase):
dtype = torch.float64
device = torch.device('cuda')
"""Test defintion common to CPU and CUDA"""
import torch
import torchaudio.functional as F
from . import common_utils
class Lfilter(common_utils.TestBaseMixin):
def test_simple(self):
"""
Create a very basic signal,
Then make a simple 4th order delay
The output should be same as the input but shifted
"""
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)
self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)
def test_clamp(self):
input_signal = torch.ones(1, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([1, 0], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, -0.95], dtype=self.dtype, device=self.device)
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=True)
assert output_signal.max() <= 1
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=False)
assert output_signal.max() > 1
import torch
from . import common_utils from . import common_utils
from .kaldi_compatibility_impl import Kaldi from .kaldi_compatibility_impl import Kaldi
common_utils.define_test_suites(globals(), [Kaldi], devices=['cpu']) class TestKaldiFloat32(Kaldi, common_utils.TestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestKaldiFloat64(Kaldi, common_utils.TestCase):
dtype = torch.float64
device = torch.device('cpu')
import torch
from . import common_utils from . import common_utils
from .kaldi_compatibility_impl import Kaldi from .kaldi_compatibility_impl import Kaldi
common_utils.define_test_suites(globals(), [Kaldi], devices=['cuda']) @common_utils.skipIfNoCuda
class TestKaldiFloat32(Kaldi, common_utils.TestCase):
dtype = torch.float32
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestKaldiFloat64(Kaldi, common_utils.TestCase):
dtype = torch.float64
device = torch.device('cuda')
from parameterized import parameterized_class import torch
from .common_utils import TestCase, common_test_class_parameters from . import common_utils
from .torchscript_consistency_impl import Functional, Transforms from .torchscript_consistency_impl import Functional, Transforms
parameters = list(common_test_class_parameters(devices=['cpu'])) class TestFunctionalFloat32(Functional, common_utils.TestCase):
dtype = torch.float32
device = torch.device('cpu')
@parameterized_class(parameters) class TestFunctionalFloat64(Functional, common_utils.TestCase):
class TestFunctional(Functional, TestCase): dtype = torch.float64
pass device = torch.device('cpu')
@parameterized_class(parameters) class TestTransformsFloat32(Transforms, common_utils.TestCase):
class TestTransforms(Transforms, TestCase): dtype = torch.float32
pass device = torch.device('cpu')
class TestTransformsFloat64(Transforms, common_utils.TestCase):
dtype = torch.float64
device = torch.device('cpu')
from .common_utils import define_test_suites import torch
from . import common_utils
from .torchscript_consistency_impl import Functional, Transforms from .torchscript_consistency_impl import Functional, Transforms
define_test_suites(globals(), [Functional, Transforms], devices=['cuda']) @common_utils.skipIfNoCuda
class TestFunctionalFloat32(Functional, common_utils.TestCase):
dtype = torch.float32
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestFunctionalFloat64(Functional, common_utils.TestCase):
dtype = torch.float64
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestTransformsFloat32(Transforms, common_utils.TestCase):
dtype = torch.float32
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestTransformsFloat64(Transforms, common_utils.TestCase):
dtype = torch.float64
device = torch.device('cuda')
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment