Commit ac82bdc4 authored by Sean Kim's avatar Sean Kim Committed by Facebook GitHub Bot
Browse files

Move Seed to Setup (#2425)

Summary:
Bringing in move seed commit from previous open commit https://github.com/pytorch/audio/issues/2267. Organizes seed to utils.

Pull Request resolved: https://github.com/pytorch/audio/pull/2425

Reviewed By: carolineechen, nateanl

Differential Revision: D36787599

Pulled By: skim0514

fbshipit-source-id: 37a0d632d13d4336a830c4b98bdb04828ed88c20
parent 94653bf4
......@@ -97,6 +97,7 @@ class TestBaseMixin:
def setUp(self):
super().setUp()
set_audio_backend(self.backend)
torch.random.manual_seed(2434)
@property
def complex_dtype(self):
......
......@@ -33,7 +33,6 @@ class Autograd(TestBaseMixin):
assert gradgradcheck(transform, inputs_)
def test_lfilter_x(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
......@@ -41,7 +40,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_a(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
......@@ -49,7 +47,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_b(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
......@@ -57,28 +54,24 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_all_inputs(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.lfilter, (x, a, b))
def test_lfilter_filterbanks(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=3)
a = torch.tensor([[0.7, 0.2, 0.6], [0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9], [0.7, 0.2, 0.6]])
self.assert_grad(partial(F.lfilter, batching=False), (x, a, b))
def test_lfilter_batching(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([[0.7, 0.2, 0.6], [0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9], [0.7, 0.2, 0.6]])
self.assert_grad(F.lfilter, (x, a, b))
def test_filtfilt_a(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
......@@ -86,7 +79,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)
def test_filtfilt_b(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
......@@ -94,21 +86,18 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)
def test_filtfilt_all_inputs(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.filtfilt, (x, a, b))
def test_filtfilt_batching(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([[0.7, 0.2, 0.6], [0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9], [0.7, 0.2, 0.6]])
self.assert_grad(F.filtfilt, (x, a, b))
def test_biquad(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
......@@ -121,7 +110,6 @@ class Autograd(TestBaseMixin):
]
)
def test_band_biquad(self, central_freq, Q, noise):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
......@@ -135,7 +123,6 @@ class Autograd(TestBaseMixin):
]
)
def test_bass_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
......@@ -150,7 +137,6 @@ class Autograd(TestBaseMixin):
]
)
def test_treble_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
......@@ -167,7 +153,6 @@ class Autograd(TestBaseMixin):
]
)
def test_allpass_biquad(self, central_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
......@@ -183,7 +168,6 @@ class Autograd(TestBaseMixin):
]
)
def test_lowpass_biquad(self, cutoff_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
cutoff_freq = torch.tensor(cutoff_freq)
......@@ -199,7 +183,6 @@ class Autograd(TestBaseMixin):
]
)
def test_highpass_biquad(self, cutoff_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
cutoff_freq = torch.tensor(cutoff_freq)
......@@ -213,7 +196,6 @@ class Autograd(TestBaseMixin):
]
)
def test_bandpass_biquad(self, central_freq, Q, const_skirt_gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
......@@ -227,7 +209,6 @@ class Autograd(TestBaseMixin):
]
)
def test_equalizer_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
......@@ -244,7 +225,6 @@ class Autograd(TestBaseMixin):
]
)
def test_bandreject_biquad(self, central_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
......@@ -285,7 +265,6 @@ class Autograd(TestBaseMixin):
]
)
def test_psd(self, use_mask):
torch.random.manual_seed(2434)
specgram = torch.rand(4, 10, 5, dtype=torch.cfloat)
if use_mask:
mask = torch.rand(10, 5)
......@@ -294,7 +273,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.psd, (specgram, mask))
def test_mvdr_weights_souden(self):
torch.random.manual_seed(2434)
channel = 4
n_fft_bin = 5
psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
......@@ -302,7 +280,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.mvdr_weights_souden, (psd_speech, psd_noise, 0))
def test_mvdr_weights_souden_with_tensor(self):
torch.random.manual_seed(2434)
channel = 4
n_fft_bin = 5
psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
......@@ -312,7 +289,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.mvdr_weights_souden, (psd_speech, psd_noise, reference_channel))
def test_mvdr_weights_rtf(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 10
......@@ -321,7 +297,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.mvdr_weights_rtf, (rtf, psd_noise, 0))
def test_mvdr_weights_rtf_with_tensor(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 10
......@@ -338,7 +313,6 @@ class Autograd(TestBaseMixin):
]
)
def test_rtf_power(self, n_iter, diagonal_loading):
torch.random.manual_seed(2434)
channel = 4
n_fft_bin = 5
psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
......@@ -352,7 +326,6 @@ class Autograd(TestBaseMixin):
]
)
def test_rtf_power_with_tensor(self, n_iter, diagonal_loading):
torch.random.manual_seed(2434)
channel = 4
n_fft_bin = 5
psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
......@@ -362,7 +335,6 @@ class Autograd(TestBaseMixin):
self.assert_grad(F.rtf_power, (psd_speech, psd_noise, reference_channel, n_iter, diagonal_loading))
def test_apply_beamforming(self):
torch.random.manual_seed(2434)
sr = 8000
n_fft = 400
batch_size, num_channels = 2, 3
......
......@@ -62,7 +62,6 @@ class Functional(TestBaseMixin):
The output should be same as the input but shifted
"""
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
......@@ -115,7 +114,6 @@ class Functional(TestBaseMixin):
]
)
def test_lfilter_shape(self, input_shape, coeff_shape, target_shape):
torch.random.manual_seed(42)
waveform = torch.rand(*input_shape, dtype=self.dtype, device=self.device)
b_coeffs = torch.rand(*coeff_shape, dtype=self.dtype, device=self.device)
a_coeffs = torch.rand(*coeff_shape, dtype=self.dtype, device=self.device)
......@@ -283,7 +281,6 @@ class Functional(TestBaseMixin):
ref = 1.0
db_mult = math.log10(max(amin, ref))
torch.manual_seed(0)
spec = torch.rand(*shape, dtype=self.dtype, device=self.device) * 200
# Spectrogram amplitude -> DB -> amplitude
......@@ -307,7 +304,6 @@ class Functional(TestBaseMixin):
db_mult = math.log10(max(amin, ref))
top_db = 40.0
torch.manual_seed(0)
# A random tensor is used for increased entropy, but the max and min for
# each spectrogram still need to be predictable. The max determines the
# decibel cutoff, and the distance from the min must be large enough
......@@ -333,7 +329,6 @@ class Functional(TestBaseMixin):
list(itertools.product([(2, 1025, 400), (1, 201, 100)], [100], [0.0, 30.0], [1, 2], [0.33, 1.0]))
)
def test_mask_along_axis(self, shape, mask_param, mask_value, axis, p):
torch.random.manual_seed(42)
specgram = torch.randn(*shape, dtype=self.dtype, device=self.device)
if p != 1.0:
......@@ -355,7 +350,6 @@ class Functional(TestBaseMixin):
@parameterized.expand(list(itertools.product([100], [0.0, 30.0], [2, 3], [0.2, 1.0])))
def test_mask_along_axis_iid(self, mask_param, mask_value, axis, p):
torch.random.manual_seed(42)
specgrams = torch.randn(4, 2, 1025, 400, dtype=self.dtype, device=self.device)
if p != 1.0:
......@@ -381,7 +375,6 @@ class Functional(TestBaseMixin):
Test is run 5 times to bound the probability of no masking occurring to 1e-10
See https://github.com/pytorch/audio/issues/1478
"""
torch.random.manual_seed(42)
for _ in range(5):
specgram = torch.randn(*shape, dtype=self.dtype, device=self.device)
specgram_copy = specgram.clone()
......@@ -396,7 +389,6 @@ class Functional(TestBaseMixin):
Test is run 5 times to bound the probability of no masking occurring to 1e-10
See https://github.com/pytorch/audio/issues/1478
"""
torch.random.manual_seed(42)
for _ in range(5):
specgrams = torch.randn(4, 2, 1025, 400, dtype=self.dtype, device=self.device)
specgrams_copy = specgrams.clone()
......@@ -478,7 +470,6 @@ class Functional(TestBaseMixin):
num_frames = 400
batch_size = 2
torch.random.manual_seed(42)
spec = torch.randn(batch_size, num_freq, num_frames, dtype=self.complex_dtype, device=self.device)
phase_advance = torch.linspace(0, np.pi * hop_length, num_freq, dtype=self.dtype, device=self.device)[..., None]
......@@ -534,7 +525,6 @@ class Functional(TestBaseMixin):
)
def test_pitch_shift_shape(self, n_steps):
sample_rate = 16000
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
waveform_shift = F.pitch_shift(waveform, sample_rate, n_steps)
assert waveform.size() == waveform_shift.size()
......
......@@ -207,6 +207,7 @@ class TestFairseqIntegration(TorchaudioTestCase):
imported = import_fairseq_model(original).eval()
# Without mask
torch.manual_seed(0)
x = torch.randn(batch_size, num_frames)
ref = original(x, torch.zeros_like(x))["encoder_out"].transpose(0, 1)
hyp, _ = imported(x)
......
......@@ -259,7 +259,7 @@ class AutogradTestMixin(TestBaseMixin):
spectrogram = get_spectrogram(waveform, n_fft=n_fft, power=None)
# 1e-3 is too small (on CPU)
epsilon = 1e-2
epsilon = 2e-2
too_close = spectrogram.abs() < epsilon
spectrogram[too_close] = epsilon * spectrogram[too_close] / spectrogram[too_close].abs()
self.assert_grad(transform, [spectrogram])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment