batch_consistency_test.py 7.5 KB
Newer Older
1
2
3
4
"""Test numerical consistency among single input and batched input."""
import torch
import torchaudio

5
from torchaudio_unittest import common_utils
6
7


moto's avatar
moto committed
8
9
10
class TestTransforms(common_utils.TorchaudioTestCase):
    backend = 'default'

11
12
    """Test suite for classes defined in `transforms` module"""
    def test_batch_AmplitudeToDB(self):
13
        spec = torch.rand((2, 6, 201))
14
15
16
17
18
19
20

        # Single then transform then batch
        expected = torchaudio.transforms.AmplitudeToDB()(spec).repeat(3, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.AmplitudeToDB()(spec.repeat(3, 1, 1))

21
        self.assertEqual(computed, expected)
22
23
24
25
26
27
28
29
30
31

    def test_batch_Resample(self):
        waveform = torch.randn(2, 2786)

        # Single then transform then batch
        expected = torchaudio.transforms.Resample()(waveform).repeat(3, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.Resample()(waveform.repeat(3, 1, 1))

32
        self.assertEqual(computed, expected)
33
34
35
36
37
38
39
40
41
42
43

    def test_batch_MelScale(self):
        specgram = torch.randn(2, 31, 2786)

        # Single then transform then batch
        expected = torchaudio.transforms.MelScale()(specgram).repeat(3, 1, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.MelScale()(specgram.repeat(3, 1, 1, 1))

        # shape = (3, 2, 201, 1394)
44
        self.assertEqual(computed, expected)
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

    def test_batch_InverseMelScale(self):
        n_mels = 32
        n_stft = 5
        mel_spec = torch.randn(2, n_mels, 32) ** 2

        # Single then transform then batch
        expected = torchaudio.transforms.InverseMelScale(n_stft, n_mels)(mel_spec).repeat(3, 1, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.InverseMelScale(n_stft, n_mels)(mel_spec.repeat(3, 1, 1, 1))

        # shape = (3, 2, n_mels, 32)

        # Because InverseMelScale runs SGD on randomly initialized values so they do not yield
        # exactly same result. For this reason, tolerance is very relaxed here.
61
        self.assertEqual(computed, expected, atol=1.0, rtol=1e-5)
62
63
64
65
66
67
68
69
70
71
72

    def test_batch_compute_deltas(self):
        specgram = torch.randn(2, 31, 2786)

        # Single then transform then batch
        expected = torchaudio.transforms.ComputeDeltas()(specgram).repeat(3, 1, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.ComputeDeltas()(specgram.repeat(3, 1, 1, 1))

        # shape = (3, 2, 201, 1394)
73
        self.assertEqual(computed, expected)
74
75

    def test_batch_mulaw(self):
76
        test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
77
78
79
80
81
82
83
84
85
86
87
        waveform, _ = torchaudio.load(test_filepath)  # (2, 278756), 44100

        # Single then transform then batch
        waveform_encoded = torchaudio.transforms.MuLawEncoding()(waveform)
        expected = waveform_encoded.unsqueeze(0).repeat(3, 1, 1)

        # Batch then transform
        waveform_batched = waveform.unsqueeze(0).repeat(3, 1, 1)
        computed = torchaudio.transforms.MuLawEncoding()(waveform_batched)

        # shape = (3, 2, 201, 1394)
88
        self.assertEqual(computed, expected)
89
90
91
92
93
94
95
96
97

        # Single then transform then batch
        waveform_decoded = torchaudio.transforms.MuLawDecoding()(waveform_encoded)
        expected = waveform_decoded.unsqueeze(0).repeat(3, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.MuLawDecoding()(computed)

        # shape = (3, 2, 201, 1394)
98
        self.assertEqual(computed, expected)
99
100

    def test_batch_spectrogram(self):
101
        test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
102
103
104
105
106
107
108
        waveform, _ = torchaudio.load(test_filepath)  # (2, 278756), 44100

        # Single then transform then batch
        expected = torchaudio.transforms.Spectrogram()(waveform).repeat(3, 1, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.Spectrogram()(waveform.repeat(3, 1, 1))
109
        self.assertEqual(computed, expected)
110
111

    def test_batch_melspectrogram(self):
112
        test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
113
114
115
116
117
118
119
        waveform, _ = torchaudio.load(test_filepath)  # (2, 278756), 44100

        # Single then transform then batch
        expected = torchaudio.transforms.MelSpectrogram()(waveform).repeat(3, 1, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.MelSpectrogram()(waveform.repeat(3, 1, 1))
120
        self.assertEqual(computed, expected)
121
122

    def test_batch_mfcc(self):
moto's avatar
moto committed
123
        test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
124
125
126
127
128
129
130
        waveform, _ = torchaudio.load(test_filepath)

        # Single then transform then batch
        expected = torchaudio.transforms.MFCC()(waveform).repeat(3, 1, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.MFCC()(waveform.repeat(3, 1, 1))
131
        self.assertEqual(computed, expected, atol=1e-4, rtol=1e-5)
132
133

    def test_batch_TimeStretch(self):
134
        test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
135
136
137
138
        waveform, _ = torchaudio.load(test_filepath)  # (2, 278756), 44100

        rate = 2

139
140
141
142
143
144
145
146
147
148
149
150
151
152
        complex_specgrams = torch.view_as_real(
            torch.stft(
                input=waveform,
                n_fft=2048,
                hop_length=512,
                win_length=2048,
                window=torch.hann_window(2048),
                center=True,
                pad_mode='reflect',
                normalized=True,
                onesided=True,
                return_complex=True,
            )
        )
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167

        # Single then transform then batch
        expected = torchaudio.transforms.TimeStretch(
            fixed_rate=rate,
            n_freq=1025,
            hop_length=512,
        )(complex_specgrams).repeat(3, 1, 1, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.TimeStretch(
            fixed_rate=rate,
            n_freq=1025,
            hop_length=512,
        )(complex_specgrams.repeat(3, 1, 1, 1, 1))

168
        self.assertEqual(computed, expected, atol=1e-5, rtol=1e-5)
169
170

    def test_batch_Fade(self):
171
        test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
172
173
174
175
176
177
178
179
180
        waveform, _ = torchaudio.load(test_filepath)  # (2, 278756), 44100
        fade_in_len = 3000
        fade_out_len = 3000

        # Single then transform then batch
        expected = torchaudio.transforms.Fade(fade_in_len, fade_out_len)(waveform).repeat(3, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.Fade(fade_in_len, fade_out_len)(waveform.repeat(3, 1, 1))
181
        self.assertEqual(computed, expected)
182
183

    def test_batch_Vol(self):
184
        test_filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
185
186
187
188
189
190
191
        waveform, _ = torchaudio.load(test_filepath)  # (2, 278756), 44100

        # Single then transform then batch
        expected = torchaudio.transforms.Vol(gain=1.1)(waveform).repeat(3, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.Vol(gain=1.1)(waveform.repeat(3, 1, 1))
192
        self.assertEqual(computed, expected)
193
194
195
196
197
198
199
200
201
202
203

    def test_batch_spectral_centroid(self):
        sample_rate = 44100
        waveform = common_utils.get_whitenoise(sample_rate=sample_rate)

        # Single then transform then batch
        expected = torchaudio.transforms.SpectralCentroid(sample_rate)(waveform).repeat(3, 1, 1)

        # Batch then transform
        computed = torchaudio.transforms.SpectralCentroid(sample_rate)(waveform.repeat(3, 1, 1))
        self.assertEqual(computed, expected)