"git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "165af7edd3d5d35a77463b399a9d5257af3761db"
Unverified Commit 343d0220 authored by Vincent QB's avatar Vincent QB Committed by GitHub
Browse files

Move jitability test (#395)

* move test for scriptmodule.

* avoiding code duplication.
parent 7e07693f
...@@ -17,12 +17,18 @@ if IMPORT_LIBROSA: ...@@ -17,12 +17,18 @@ if IMPORT_LIBROSA:
import librosa import librosa
def _test_torchscript_functional(py_method, *args, **kwargs): def _test_torchscript_functional_shape(py_method, *args, **kwargs):
jit_method = torch.jit.script(py_method) jit_method = torch.jit.script(py_method)
jit_out = jit_method(*args, **kwargs) jit_out = jit_method(*args, **kwargs)
py_out = py_method(*args, **kwargs) py_out = py_method(*args, **kwargs)
assert jit_out.shape == py_out.shape
return jit_out, py_out
def _test_torchscript_functional(py_method, *args, **kwargs):
jit_out, py_out = _test_torchscript_functional_shape(py_method, *args, **kwargs)
assert torch.allclose(jit_out, py_out) assert torch.allclose(jit_out, py_out)
...@@ -500,52 +506,6 @@ class TestFunctional(unittest.TestCase): ...@@ -500,52 +506,6 @@ class TestFunctional(unittest.TestCase):
waveform = waveform.unsqueeze(0).repeat(3, 1, 1) waveform = waveform.unsqueeze(0).repeat(3, 1, 1)
computed = functional(waveform) computed = functional(waveform)
def _num_stft_bins(signal_len, fft_len, hop_length, pad):
return (signal_len + 2 * pad - fft_len + hop_length) // hop_length
@pytest.mark.parametrize('complex_specgrams', [
torch.randn(2, 1025, 400, 2)
])
@pytest.mark.parametrize('rate', [0.5, 1.01, 1.3])
@pytest.mark.parametrize('hop_length', [256])
def test_phase_vocoder(complex_specgrams, rate, hop_length):
# Using a decorator here causes parametrize to fail on Python 2
if not IMPORT_LIBROSA:
raise unittest.SkipTest('Librosa is not available')
# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
# match with librosa.
complex_specgrams = complex_specgrams.type(torch.float64)
phase_advance = torch.linspace(0, np.pi * hop_length, complex_specgrams.shape[-3], dtype=torch.float64)[..., None]
complex_specgrams_stretch = F.phase_vocoder(complex_specgrams, rate=rate, phase_advance=phase_advance)
# == Test shape
expected_size = list(complex_specgrams.size())
expected_size[-2] = int(np.ceil(expected_size[-2] / rate))
assert complex_specgrams.dim() == complex_specgrams_stretch.dim()
assert complex_specgrams_stretch.size() == torch.Size(expected_size)
# == Test values
index = [0] * (complex_specgrams.dim() - 3) + [slice(None)] * 3
mono_complex_specgram = complex_specgrams[index].numpy()
mono_complex_specgram = mono_complex_specgram[..., 0] + \
mono_complex_specgram[..., 1] * 1j
expected_complex_stretch = librosa.phase_vocoder(mono_complex_specgram,
rate=rate,
hop_length=hop_length)
complex_stretch = complex_specgrams_stretch[index].numpy()
complex_stretch = complex_stretch[..., 0] + 1j * complex_stretch[..., 1]
assert np.allclose(complex_stretch, expected_complex_stretch, atol=1e-5)
def test_torchscript_create_fb_matrix(self): def test_torchscript_create_fb_matrix(self):
n_stft = 100 n_stft = 100
...@@ -590,14 +550,14 @@ def test_phase_vocoder(complex_specgrams, rate, hop_length): ...@@ -590,14 +550,14 @@ def test_phase_vocoder(complex_specgrams, rate, hop_length):
def test_torchscript_complex_norm(self): def test_torchscript_complex_norm(self):
complex_tensor = torch.randn(1, 2, 1025, 400, 2), complex_tensor = torch.randn(1, 2, 1025, 400, 2)
power = 2 power = 2
_test_torchscript_functional(F.complex_norm, complex_tensor, power) _test_torchscript_functional(F.complex_norm, complex_tensor, power)
def test_mask_along_axis(self): def test_mask_along_axis(self):
specgram = torch.randn(2, 1025, 400), specgram = torch.randn(2, 1025, 400)
mask_param = 100 mask_param = 100
mask_value = 30. mask_value = 30.
axis = 2 axis = 2
...@@ -606,8 +566,7 @@ def test_phase_vocoder(complex_specgrams, rate, hop_length): ...@@ -606,8 +566,7 @@ def test_phase_vocoder(complex_specgrams, rate, hop_length):
def test_mask_along_axis_iid(self): def test_mask_along_axis_iid(self):
specgram = torch.randn(2, 1025, 400), specgrams = torch.randn(4, 2, 1025, 400)
specgrams = torch.randn(4, 2, 1025, 400),
mask_param = 100 mask_param = 100
mask_value = 30. mask_value = 30.
axis = 2 axis = 2
...@@ -621,11 +580,57 @@ def test_phase_vocoder(complex_specgrams, rate, hop_length): ...@@ -621,11 +580,57 @@ def test_phase_vocoder(complex_specgrams, rate, hop_length):
_test_torchscript_functional(F.gain, tensor, gainDB) _test_torchscript_functional(F.gain, tensor, gainDB)
def test_torchscript_dither(self): def test_torchscript_dither(self):
tensor = torch.rand((1, 1000)) tensor = torch.rand((2, 1000))
_test_torchscript_functional_shape(F.dither, tensor)
_test_torchscript_functional_shape(F.dither, tensor, "RPDF")
_test_torchscript_functional_shape(F.dither, tensor, "GPDF")
def _num_stft_bins(signal_len, fft_len, hop_length, pad):
return (signal_len + 2 * pad - fft_len + hop_length) // hop_length
@pytest.mark.parametrize('complex_specgrams', [
torch.randn(2, 1025, 400, 2)
])
@pytest.mark.parametrize('rate', [0.5, 1.01, 1.3])
@pytest.mark.parametrize('hop_length', [256])
def test_phase_vocoder(complex_specgrams, rate, hop_length):
# Using a decorator here causes parametrize to fail on Python 2
if not IMPORT_LIBROSA:
raise unittest.SkipTest('Librosa is not available')
# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
# match with librosa.
complex_specgrams = complex_specgrams.type(torch.float64)
phase_advance = torch.linspace(0, np.pi * hop_length, complex_specgrams.shape[-3], dtype=torch.float64)[..., None]
complex_specgrams_stretch = F.phase_vocoder(complex_specgrams, rate=rate, phase_advance=phase_advance)
# == Test shape
expected_size = list(complex_specgrams.size())
expected_size[-2] = int(np.ceil(expected_size[-2] / rate))
assert complex_specgrams.dim() == complex_specgrams_stretch.dim()
assert complex_specgrams_stretch.size() == torch.Size(expected_size)
# == Test values
index = [0] * (complex_specgrams.dim() - 3) + [slice(None)] * 3
mono_complex_specgram = complex_specgrams[index].numpy()
mono_complex_specgram = mono_complex_specgram[..., 0] + \
mono_complex_specgram[..., 1] * 1j
expected_complex_stretch = librosa.phase_vocoder(mono_complex_specgram,
rate=rate,
hop_length=hop_length)
complex_stretch = complex_specgrams_stretch[index].numpy()
complex_stretch = complex_stretch[..., 0] + 1j * complex_stretch[..., 1]
_test_torchscript_functional(F.dither, tensor) assert np.allclose(complex_stretch, expected_complex_stretch, atol=1e-5)
_test_torchscript_functional(F.dither, tensor, "RPDF")
_test_torchscript_functional(F.dither, tensor, "GPDF")
@pytest.mark.parametrize('complex_tensor', [ @pytest.mark.parametrize('complex_tensor', [
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment