autograd_impl.py 14.6 KB
Newer Older
1
from functools import partial
2
3
from typing import Callable, Tuple

4
import torch
5
import torchaudio.functional as F
6
7
from parameterized import parameterized
from torch import Tensor
8
from torch.autograd import gradcheck, gradgradcheck
9
from torchaudio_unittest.common_utils import (
10
    get_spectrogram,
11
    get_whitenoise,
12
    rnnt_utils,
13
    TestBaseMixin,
14
15
16
17
18
)


class Autograd(TestBaseMixin):
    def assert_grad(
19
20
21
22
23
        self,
        transform: Callable[..., Tensor],
        inputs: Tuple[torch.Tensor],
        *,
        enable_all_grad: bool = True,
24
25
26
27
    ):
        inputs_ = []
        for i in inputs:
            if torch.is_tensor(i):
28
                i = i.to(dtype=self.complex_dtype if i.is_complex() else self.dtype, device=self.device)
29
30
31
32
                if enable_all_grad:
                    i.requires_grad = True
            inputs_.append(i)
        assert gradcheck(transform, inputs_)
33
        assert gradgradcheck(transform, inputs_)
34

35
    def test_lfilter_x(self):
36
        torch.random.manual_seed(2434)
37
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
38
39
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
40
        x.requires_grad = True
41
        self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
42

43
    def test_lfilter_a(self):
44
        torch.random.manual_seed(2434)
45
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
46
47
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
48
        a.requires_grad = True
49
        self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
50

51
    def test_lfilter_b(self):
52
        torch.random.manual_seed(2434)
53
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
54
55
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
56
        b.requires_grad = True
57
        self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
58

59
    def test_lfilter_all_inputs(self):
60
        torch.random.manual_seed(2434)
61
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
62
63
64
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
        self.assert_grad(F.lfilter, (x, a, b))
65

66
67
68
    def test_lfilter_filterbanks(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=3)
69
70
        a = torch.tensor([[0.7, 0.2, 0.6], [0.8, 0.2, 0.9]])
        b = torch.tensor([[0.4, 0.2, 0.9], [0.7, 0.2, 0.6]])
71
72
73
74
75
        self.assert_grad(partial(F.lfilter, batching=False), (x, a, b))

    def test_lfilter_batching(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
76
77
        a = torch.tensor([[0.7, 0.2, 0.6], [0.8, 0.2, 0.9]])
        b = torch.tensor([[0.4, 0.2, 0.9], [0.7, 0.2, 0.6]])
78
79
        self.assert_grad(F.lfilter, (x, a, b))

80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
    def test_filtfilt_a(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
        a.requires_grad = True
        self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)

    def test_filtfilt_b(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
        b.requires_grad = True
        self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)

    def test_filtfilt_all_inputs(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
        self.assert_grad(F.filtfilt, (x, a, b))

    def test_filtfilt_batching(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
106
107
        a = torch.tensor([[0.7, 0.2, 0.6], [0.8, 0.2, 0.9]])
        b = torch.tensor([[0.4, 0.2, 0.9], [0.7, 0.2, 0.6]])
108
109
        self.assert_grad(F.filtfilt, (x, a, b))

110
111
    def test_biquad(self):
        torch.random.manual_seed(2434)
112
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1)
113
114
115
        a = torch.tensor([0.7, 0.2, 0.6])
        b = torch.tensor([0.4, 0.2, 0.9])
        self.assert_grad(F.biquad, (x, b[0], b[1], b[2], a[0], a[1], a[2]))
116

117
118
119
120
121
122
    @parameterized.expand(
        [
            (800, 0.7, True),
            (800, 0.7, False),
        ]
    )
123
    def test_band_biquad(self, central_freq, Q, noise):
124
125
        torch.random.manual_seed(2434)
        sr = 22050
126
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
127
128
129
        central_freq = torch.tensor(central_freq)
        Q = torch.tensor(Q)
        self.assert_grad(F.band_biquad, (x, sr, central_freq, Q, noise))
130

131
132
133
134
135
136
    @parameterized.expand(
        [
            (800, 0.7, 10),
            (800, 0.7, -10),
        ]
    )
137
    def test_bass_biquad(self, central_freq, Q, gain):
138
139
        torch.random.manual_seed(2434)
        sr = 22050
140
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
141
142
143
144
        central_freq = torch.tensor(central_freq)
        Q = torch.tensor(Q)
        gain = torch.tensor(gain)
        self.assert_grad(F.bass_biquad, (x, sr, gain, central_freq, Q))
145

146
147
148
149
150
151
    @parameterized.expand(
        [
            (3000, 0.7, 10),
            (3000, 0.7, -10),
        ]
    )
152
    def test_treble_biquad(self, central_freq, Q, gain):
153
154
        torch.random.manual_seed(2434)
        sr = 22050
155
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
156
157
158
159
        central_freq = torch.tensor(central_freq)
        Q = torch.tensor(Q)
        gain = torch.tensor(gain)
        self.assert_grad(F.treble_biquad, (x, sr, gain, central_freq, Q))
160

161
162
163
164
165
166
167
168
    @parameterized.expand(
        [
            (
                800,
                0.7,
            ),
        ]
    )
169
    def test_allpass_biquad(self, central_freq, Q):
170
171
        torch.random.manual_seed(2434)
        sr = 22050
172
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
173
174
175
        central_freq = torch.tensor(central_freq)
        Q = torch.tensor(Q)
        self.assert_grad(F.allpass_biquad, (x, sr, central_freq, Q))
176

177
178
179
180
181
182
183
184
    @parameterized.expand(
        [
            (
                800,
                0.7,
            ),
        ]
    )
185
    def test_lowpass_biquad(self, cutoff_freq, Q):
186
187
        torch.random.manual_seed(2434)
        sr = 22050
188
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
189
190
191
        cutoff_freq = torch.tensor(cutoff_freq)
        Q = torch.tensor(Q)
        self.assert_grad(F.lowpass_biquad, (x, sr, cutoff_freq, Q))
192

193
194
195
196
197
198
199
200
    @parameterized.expand(
        [
            (
                800,
                0.7,
            ),
        ]
    )
201
    def test_highpass_biquad(self, cutoff_freq, Q):
202
203
        torch.random.manual_seed(2434)
        sr = 22050
204
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
205
206
207
        cutoff_freq = torch.tensor(cutoff_freq)
        Q = torch.tensor(Q)
        self.assert_grad(F.highpass_biquad, (x, sr, cutoff_freq, Q))
208

209
210
211
212
213
214
    @parameterized.expand(
        [
            (800, 0.7, True),
            (800, 0.7, False),
        ]
    )
215
    def test_bandpass_biquad(self, central_freq, Q, const_skirt_gain):
216
217
        torch.random.manual_seed(2434)
        sr = 22050
218
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
219
220
221
        central_freq = torch.tensor(central_freq)
        Q = torch.tensor(Q)
        self.assert_grad(F.bandpass_biquad, (x, sr, central_freq, Q, const_skirt_gain))
222

223
224
225
226
227
228
    @parameterized.expand(
        [
            (800, 0.7, 10),
            (800, 0.7, -10),
        ]
    )
229
    def test_equalizer_biquad(self, central_freq, Q, gain):
230
231
        torch.random.manual_seed(2434)
        sr = 22050
232
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
233
234
235
236
        central_freq = torch.tensor(central_freq)
        Q = torch.tensor(Q)
        gain = torch.tensor(gain)
        self.assert_grad(F.equalizer_biquad, (x, sr, central_freq, gain, Q))
237

238
239
240
241
242
243
244
245
    @parameterized.expand(
        [
            (
                800,
                0.7,
            ),
        ]
    )
246
    def test_bandreject_biquad(self, central_freq, Q):
247
248
        torch.random.manual_seed(2434)
        sr = 22050
249
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
250
251
252
        central_freq = torch.tensor(central_freq)
        Q = torch.tensor(Q)
        self.assert_grad(F.bandreject_biquad, (x, sr, central_freq, Q))
253

moto's avatar
moto committed
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
    def test_deemph_biquad(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1)
        self.assert_grad(F.deemph_biquad, (x, 44100))

    def test_flanger(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
        self.assert_grad(F.flanger, (x, 44100))

    def test_gain(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
        self.assert_grad(F.gain, (x, 1.1))

    def test_overdrive(self):
        torch.random.manual_seed(2434)
        x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
        self.assert_grad(F.gain, (x,))

    @parameterized.expand([(True,), (False,)])
    def test_phaser(self, sinusoidal):
        torch.random.manual_seed(2434)
        sr = 8000
        x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
        self.assert_grad(F.phaser, (x, sr, sinusoidal))

281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
    @parameterized.expand(
        [
            (True,),
            (False,),
        ]
    )
    def test_psd(self, use_mask):
        torch.random.manual_seed(2434)
        specgram = torch.rand(4, 10, 5, dtype=torch.cfloat)
        if use_mask:
            mask = torch.rand(10, 5)
        else:
            mask = None
        self.assert_grad(F.psd, (specgram, mask))

296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
    def test_mvdr_weights_souden(self):
        torch.random.manual_seed(2434)
        channel = 4
        n_fft_bin = 5
        psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
        psd_noise = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
        self.assert_grad(F.mvdr_weights_souden, (psd_speech, psd_noise, 0))

    def test_mvdr_weights_souden_with_tensor(self):
        torch.random.manual_seed(2434)
        channel = 4
        n_fft_bin = 5
        psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
        psd_noise = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
        reference_channel = torch.zeros(channel)
        reference_channel[0].fill_(1)
        self.assert_grad(F.mvdr_weights_souden, (psd_speech, psd_noise, reference_channel))

314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
    def test_mvdr_weights_rtf(self):
        torch.random.manual_seed(2434)
        batch_size = 2
        channel = 4
        n_fft_bin = 10
        rtf = torch.rand(batch_size, n_fft_bin, channel, dtype=self.complex_dtype)
        psd_noise = torch.rand(batch_size, n_fft_bin, channel, channel, dtype=self.complex_dtype)
        self.assert_grad(F.mvdr_weights_rtf, (rtf, psd_noise, 0))

    def test_mvdr_weights_rtf_with_tensor(self):
        torch.random.manual_seed(2434)
        batch_size = 2
        channel = 4
        n_fft_bin = 10
        rtf = torch.rand(batch_size, n_fft_bin, channel, dtype=self.complex_dtype)
        psd_noise = torch.rand(batch_size, n_fft_bin, channel, channel, dtype=self.complex_dtype)
        reference_channel = torch.zeros(batch_size, channel)
        reference_channel[..., 0].fill_(1)
        self.assert_grad(F.mvdr_weights_rtf, (rtf, psd_noise, reference_channel))

334
335
    @parameterized.expand(
        [
336
337
            (1, True),
            (3, False),
338
339
        ]
    )
340
    def test_rtf_power(self, n_iter, diagonal_loading):
341
342
343
344
345
        torch.random.manual_seed(2434)
        channel = 4
        n_fft_bin = 5
        psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
        psd_noise = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
346
        self.assert_grad(F.rtf_power, (psd_speech, psd_noise, 0, n_iter, diagonal_loading))
347
348
349

    @parameterized.expand(
        [
350
351
            (1, True),
            (3, False),
352
353
        ]
    )
354
    def test_rtf_power_with_tensor(self, n_iter, diagonal_loading):
355
356
357
358
359
360
361
        torch.random.manual_seed(2434)
        channel = 4
        n_fft_bin = 5
        psd_speech = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
        psd_noise = torch.rand(n_fft_bin, channel, channel, dtype=torch.cfloat)
        reference_channel = torch.zeros(channel)
        reference_channel[0].fill_(1)
362
        self.assert_grad(F.rtf_power, (psd_speech, psd_noise, reference_channel, n_iter, diagonal_loading))
363

364
365
366
367
368
369
370
371
372
373
374
375
    def test_apply_beamforming(self):
        torch.random.manual_seed(2434)
        sr = 8000
        n_fft = 400
        batch_size, num_channels = 2, 3
        n_fft_bin = n_fft // 2 + 1
        x = get_whitenoise(sample_rate=sr, duration=0.05, n_channels=batch_size * num_channels)
        specgram = get_spectrogram(x, n_fft=n_fft, hop_length=100)
        specgram = specgram.view(batch_size, num_channels, n_fft_bin, specgram.size(-1))
        beamform_weights = torch.rand(n_fft_bin, num_channels, dtype=torch.cfloat)
        self.assert_grad(F.apply_beamforming, (beamform_weights, specgram))

376
377
378

class AutogradFloat32(TestBaseMixin):
    def assert_grad(
379
380
381
382
        self,
        transform: Callable[..., Tensor],
        inputs: Tuple[torch.Tensor],
        enable_all_grad: bool = True,
383
384
385
386
387
388
389
390
391
    ):
        inputs_ = []
        for i in inputs:
            if torch.is_tensor(i):
                i = i.to(dtype=self.dtype, device=self.device)
                if enable_all_grad:
                    i.requires_grad = True
            inputs_.append(i)
        # gradcheck with float32 requires higher atol and epsilon
392
        assert gradcheck(transform, inputs, eps=1e-3, atol=1e-3, nondet_tol=0.0)
393

394
395
396
397
398
399
400
    @parameterized.expand(
        [
            (rnnt_utils.get_B1_T10_U3_D4_data,),
            (rnnt_utils.get_B2_T4_U3_D3_data,),
            (rnnt_utils.get_B1_T2_U3_D5_data,),
        ]
    )
401
402
403
404
405
406
407
408
409
410
    def test_rnnt_loss(self, data_func):
        def get_data(data_func, device):
            data = data_func()
            if type(data) == tuple:
                data = data[0]
            return data

        data = get_data(data_func, self.device)
        inputs = (
            data["logits"].to(torch.float32),  # logits
411
412
413
414
415
            data["targets"],  # targets
            data["logit_lengths"],  # logit_lengths
            data["target_lengths"],  # target_lengths
            data["blank"],  # blank
            -1,  # clamp
416
417
418
        )

        self.assert_grad(F.rnnt_loss, inputs, enable_all_grad=False)