test_transforms.py 11.8 KB
Newer Older
1
from __future__ import print_function
2
import os
David Pollack's avatar
David Pollack committed
3
4
5
6
7
8
import torch
import torchaudio
import torchaudio.transforms as transforms
import numpy as np
import unittest

Soumith Chintala's avatar
Soumith Chintala committed
9

David Pollack's avatar
David Pollack committed
10
11
class Tester(unittest.TestCase):

12
    # create a sinewave signal for testing
David Pollack's avatar
David Pollack committed
13
14
    sr = 16000
    freq = 440
15
    volume = .3
16
    sig = (torch.cos(2 * np.pi * torch.arange(0, 4 * sr).float() * freq / sr))
17
    sig.unsqueeze_(1)  # (64000, 1)
Soumith Chintala's avatar
Soumith Chintala committed
18
    sig = (sig * volume * 2**31).long()
19
20
21
22
    # file for stereo stft test
    test_dirpath = os.path.dirname(os.path.realpath(__file__))
    test_filepath = os.path.join(test_dirpath, "assets",
                                 "steam-train-whistle-daniel_simon.mp3")
David Pollack's avatar
David Pollack committed
23
24
25
26
27

    def test_scale(self):

        audio_orig = self.sig.clone()
        result = transforms.Scale()(audio_orig)
28
        self.assertTrue(result.min() >= -1. and result.max() <= 1.)
David Pollack's avatar
David Pollack committed
29

Soumith Chintala's avatar
Soumith Chintala committed
30
31
        maxminmax = np.abs(
            [audio_orig.min(), audio_orig.max()]).max().astype(np.float)
David Pollack's avatar
David Pollack committed
32
33
        result = transforms.Scale(factor=maxminmax)(audio_orig)
        self.assertTrue((result.min() == -1. or result.max() == 1.) and
34
                        result.min() >= -1. and result.max() <= 1.)
David Pollack's avatar
David Pollack committed
35

36
        repr_test = transforms.Scale()
37
        self.assertTrue(repr_test.__repr__())
38

David Pollack's avatar
David Pollack committed
39
40
41
42
43
44
    def test_pad_trim(self):

        audio_orig = self.sig.clone()
        length_orig = audio_orig.size(0)
        length_new = int(length_orig * 1.2)

45
46
        result = transforms.PadTrim(max_len=length_new, channels_first=False)(audio_orig)
        self.assertEqual(result.size(0), length_new)
David Pollack's avatar
David Pollack committed
47

48
49
50
        result = transforms.PadTrim(max_len=length_new, channels_first=True)(audio_orig.transpose(0, 1))
        self.assertEqual(result.size(1), length_new)

David Pollack's avatar
David Pollack committed
51
52
53
54
        audio_orig = self.sig.clone()
        length_orig = audio_orig.size(0)
        length_new = int(length_orig * 0.8)

55
        result = transforms.PadTrim(max_len=length_new, channels_first=False)(audio_orig)
David Pollack's avatar
David Pollack committed
56

57
        self.assertEqual(result.size(0), length_new)
David Pollack's avatar
David Pollack committed
58

59
        repr_test = transforms.PadTrim(max_len=length_new, channels_first=False)
60
        self.assertTrue(repr_test.__repr__())
61

David Pollack's avatar
David Pollack committed
62
    def test_downmix_mono(self):
David Pollack's avatar
David Pollack committed
63

David Pollack's avatar
David Pollack committed
64
65
66
67
68
69
70
71
72
        audio_L = self.sig.clone()
        audio_R = self.sig.clone()
        R_idx = int(audio_R.size(0) * 0.1)
        audio_R = torch.cat((audio_R[R_idx:], audio_R[:R_idx]))

        audio_Stereo = torch.cat((audio_L, audio_R), dim=1)

        self.assertTrue(audio_Stereo.size(1) == 2)

73
        result = transforms.DownmixMono(channels_first=False)(audio_Stereo)
David Pollack's avatar
David Pollack committed
74
75
76

        self.assertTrue(result.size(1) == 1)

77
        repr_test = transforms.DownmixMono(channels_first=False)
78
        self.assertTrue(repr_test.__repr__())
79

80
81
82
83
84
85
    def test_lc2cl(self):

        audio = self.sig.clone()
        result = transforms.LC2CL()(audio)
        self.assertTrue(result.size()[::-1] == audio.size())

86
        repr_test = transforms.LC2CL()
87
        self.assertTrue(repr_test.__repr__())
88

David Pollack's avatar
David Pollack committed
89
90
91
92
93
    def test_compose(self):

        audio_orig = self.sig.clone()
        length_orig = audio_orig.size(0)
        length_new = int(length_orig * 1.2)
Soumith Chintala's avatar
Soumith Chintala committed
94
95
        maxminmax = np.abs(
            [audio_orig.min(), audio_orig.max()]).max().astype(np.float)
David Pollack's avatar
David Pollack committed
96
97

        tset = (transforms.Scale(factor=maxminmax),
98
                transforms.PadTrim(max_len=length_new, channels_first=False))
David Pollack's avatar
David Pollack committed
99
100
101
102
103
104
        result = transforms.Compose(tset)(audio_orig)

        self.assertTrue(np.abs([result.min(), result.max()]).max() == 1.)

        self.assertTrue(result.size(0) == length_new)

105
        repr_test = transforms.Compose(tset)
106
        self.assertTrue(repr_test.__repr__())
107

David Pollack's avatar
David Pollack committed
108
109
110
111
112
113
114
115
116
117
118
119
120
    def test_mu_law_companding(self):

        quantization_channels = 256

        sig = self.sig.clone()
        sig = sig / torch.abs(sig).max()
        self.assertTrue(sig.min() >= -1. and sig.max() <= 1.)

        sig_mu = transforms.MuLawEncoding(quantization_channels)(sig)
        self.assertTrue(sig_mu.min() >= 0. and sig.max() <= quantization_channels)

        sig_exp = transforms.MuLawExpanding(quantization_channels)(sig_mu)
        self.assertTrue(sig_exp.min() >= -1. and sig_exp.max() <= 1.)
David Pollack's avatar
David Pollack committed
121

122
        repr_test = transforms.MuLawEncoding(quantization_channels)
123
        self.assertTrue(repr_test.__repr__())
124
        repr_test = transforms.MuLawExpanding(quantization_channels)
125
        self.assertTrue(repr_test.__repr__())
126

127
    def test_mel2(self):
PCerles's avatar
PCerles committed
128
129
130
        top_db = 80.
        s2db = transforms.SpectrogramToDB("power", top_db)

131
132
133
        audio_orig = self.sig.clone()  # (16000, 1)
        audio_scaled = transforms.Scale()(audio_orig)  # (16000, 1)
        audio_scaled = transforms.LC2CL()(audio_scaled)  # (1, 16000)
134
        mel_transform = transforms.MelSpectrogram()
135
        # check defaults
PCerles's avatar
PCerles committed
136
        spectrogram_torch = s2db(mel_transform(audio_scaled))  # (1, 319, 40)
137
        self.assertTrue(spectrogram_torch.dim() == 3)
PCerles's avatar
PCerles committed
138
        self.assertTrue(spectrogram_torch.ge(spectrogram_torch.max() - top_db).all())
139
        self.assertEqual(spectrogram_torch.size(-1), mel_transform.n_mels)
140
141
142
143
        # check correctness of filterbank conversion matrix
        self.assertTrue(mel_transform.fm.fb.sum(1).le(1.).all())
        self.assertTrue(mel_transform.fm.fb.sum(1).ge(0.).all())
        # check options
144
145
        kwargs = {"window": torch.hamming_window, "pad": 10, "ws": 500, "hop": 125, "n_fft": 800, "n_mels": 50}
        mel_transform2 = transforms.MelSpectrogram(**kwargs)
PCerles's avatar
PCerles committed
146
        spectrogram2_torch = s2db(mel_transform2(audio_scaled))  # (1, 506, 50)
147
        self.assertTrue(spectrogram2_torch.dim() == 3)
PCerles's avatar
PCerles committed
148
        self.assertTrue(spectrogram_torch.ge(spectrogram_torch.max() - top_db).all())
149
150
151
152
        self.assertEqual(spectrogram2_torch.size(-1), mel_transform2.n_mels)
        self.assertTrue(mel_transform2.fm.fb.sum(1).le(1.).all())
        self.assertTrue(mel_transform2.fm.fb.sum(1).ge(0.).all())
        # check on multi-channel audio
153
        x_stereo, sr_stereo = torchaudio.load(self.test_filepath)
PCerles's avatar
PCerles committed
154
        spectrogram_stereo = s2db(mel_transform(x_stereo))
155
156
        self.assertTrue(spectrogram_stereo.dim() == 3)
        self.assertTrue(spectrogram_stereo.size(0) == 2)
PCerles's avatar
PCerles committed
157
        self.assertTrue(spectrogram_torch.ge(spectrogram_torch.max() - top_db).all())
158
        self.assertEqual(spectrogram_stereo.size(-1), mel_transform.n_mels)
159
        # check filterbank matrix creation
160
        fb_matrix_transform = transforms.MelScale(n_mels=100, sr=16000, f_max=None, f_min=0., n_stft=400)
161
162
163
        self.assertTrue(fb_matrix_transform.fb.sum(1).le(1.).all())
        self.assertTrue(fb_matrix_transform.fb.sum(1).ge(0.).all())
        self.assertEqual(fb_matrix_transform.fb.size(), (400, 100))
Soumith Chintala's avatar
Soumith Chintala committed
164

PCerles's avatar
PCerles committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
    def test_mfcc(self):
        audio_orig = self.sig.clone()
        audio_scaled = transforms.Scale()(audio_orig)  # (16000, 1)
        audio_scaled = transforms.LC2CL()(audio_scaled)  # (1, 16000)

        sample_rate = 16000
        n_mfcc = 40
        n_mels = 128
        mfcc_transform = torchaudio.transforms.MFCC(sr=sample_rate,
                                                    n_mfcc=n_mfcc,
                                                    norm='ortho')
        # check defaults
        torch_mfcc = mfcc_transform(audio_scaled)
        self.assertTrue(torch_mfcc.dim() == 3)
        self.assertTrue(torch_mfcc.shape[2] == n_mfcc)
        self.assertTrue(torch_mfcc.shape[1] == 321)
        # check melkwargs are passed through
        melkwargs = {'ws': 200}
        mfcc_transform2 = torchaudio.transforms.MFCC(sr=sample_rate,
                                                     n_mfcc=n_mfcc,
                                                     norm='ortho',
                                                     melkwargs=melkwargs)
        torch_mfcc2 = mfcc_transform2(audio_scaled)
        self.assertTrue(torch_mfcc2.shape[1] == 641)

        # check norms work correctly
        mfcc_transform_norm_none = torchaudio.transforms.MFCC(sr=sample_rate,
                                                              n_mfcc=n_mfcc,
                                                              norm=None)
        torch_mfcc_norm_none = mfcc_transform_norm_none(audio_scaled)

        norm_check = torch_mfcc.clone()
        norm_check[:, :, 0] *= np.sqrt(n_mels) * 2
        norm_check[:, :, 1:] *= np.sqrt(n_mels / 2) * 2

        self.assertTrue(torch_mfcc_norm_none.allclose(norm_check))

    def test_librosa_consistency(self):
        try:
            import librosa
            import scipy
        except ImportError:
            return

        input_path = os.path.join(self.test_dirpath, 'assets', 'sinewave.wav')
        sound, sample_rate = torchaudio.load(input_path)
        sound_librosa = sound.cpu().numpy().squeeze().T  # squeeze batch and channel first

        n_fft = 400
        hop_length = 200
        power = 2.0
        n_mels = 128
        n_mfcc = 40
        sample_rate = 16000

        # test core spectrogram
        spect_transform = torchaudio.transforms.Spectrogram(n_fft=n_fft, hop=hop_length, power=2)
        out_librosa, _ = librosa.core.spectrum._spectrogram(y=sound_librosa,
                                                            n_fft=n_fft,
                                                            hop_length=hop_length,
                                                            power=2)

        out_torch = spect_transform(sound).squeeze().cpu().numpy().T
        self.assertTrue(np.allclose(out_torch, out_librosa, atol=1e-5))

        # test mel spectrogram
        melspect_transform = torchaudio.transforms.MelSpectrogram(sr=sample_rate, window=torch.hann_window,
                                                                  hop=hop_length, n_mels=n_mels, n_fft=n_fft)
        librosa_mel = librosa.feature.melspectrogram(y=sound_librosa, sr=sample_rate,
                                                     n_fft=n_fft, hop_length=hop_length, n_mels=n_mels,
                                                     htk=True, norm=None)
        torch_mel = melspect_transform(sound).squeeze().cpu().numpy().T

        # lower tolerance, think it's double vs. float
        self.assertTrue(np.allclose(torch_mel, librosa_mel, atol=5e-3))

        # test s2db

        db_transform = torchaudio.transforms.SpectrogramToDB("power", 80.)
        db_torch = db_transform(spect_transform(sound)).squeeze().cpu().numpy().T
        db_librosa = librosa.core.spectrum.power_to_db(out_librosa)
        self.assertTrue(np.allclose(db_torch, db_librosa, atol=5e-3))

        db_torch = db_transform(melspect_transform(sound)).squeeze().cpu().numpy().T
        db_librosa = librosa.core.spectrum.power_to_db(librosa_mel)

        self.assertTrue(np.allclose(db_torch, db_librosa, atol=5e-3))

        # test MFCC
        melkwargs = {'hop': hop_length, 'n_fft': n_fft}
        mfcc_transform = torchaudio.transforms.MFCC(sr=sample_rate,
                                                    n_mfcc=n_mfcc,
                                                    norm='ortho',
                                                    melkwargs=melkwargs)

        # librosa.feature.mfcc doesn't pass kwargs properly since some of the
        # kwargs for melspectrogram and mfcc are the same. We just follow the
        # function body in https://librosa.github.io/librosa/_modules/librosa/feature/spectral.html#melspectrogram
        # to mirror this function call with correct args:

#         librosa_mfcc = librosa.feature.mfcc(y=sound_librosa,
#                                             sr=sample_rate,
#                                             n_mfcc = n_mfcc,
#                                             hop_length=hop_length,
#                                             n_fft=n_fft,
#                                             htk=True,
#                                             norm=None,
#                                             n_mels=n_mels)

        librosa_mfcc = scipy.fftpack.dct(db_librosa, axis=0, type=2, norm='ortho')[:n_mfcc]
        torch_mfcc = mfcc_transform(sound).squeeze().cpu().numpy().T

        self.assertTrue(np.allclose(torch_mfcc, librosa_mfcc, atol=5e-3))


David Pollack's avatar
David Pollack committed
280
281
if __name__ == '__main__':
    unittest.main()