models_test.py 7.54 KB
Newer Older
moto's avatar
moto committed
1
2
3
import itertools
from collections import namedtuple

Tomás Osório's avatar
Tomás Osório committed
4
import torch
moto's avatar
moto committed
5
from parameterized import parameterized
discort's avatar
discort committed
6
from torchaudio.models import ConvTasNet, DeepSpeech, Wav2Letter, WaveRNN
7
from torchaudio.models.wavernn import MelResNet, UpsampleNetwork
8
from torchaudio_unittest import common_utils
moto's avatar
moto committed
9
from torchaudio_unittest.common_utils import torch_script
Tomás Osório's avatar
Tomás Osório committed
10

11
12

class TestWav2Letter(common_utils.TorchaudioTestCase):
jimchen90's avatar
jimchen90 committed
13
14
15
16
17
18
19
20

    def test_waveform(self):
        batch_size = 2
        num_features = 1
        num_classes = 40
        input_length = 320

        model = Wav2Letter(num_classes=num_classes, num_features=num_features)
Tomás Osório's avatar
Tomás Osório committed
21
22
23
24
25
26

        x = torch.rand(batch_size, num_features, input_length)
        out = model(x)

        assert out.size() == (batch_size, num_classes, 2)

jimchen90's avatar
jimchen90 committed
27
28
29
30
31
32
33
    def test_mfcc(self):
        batch_size = 2
        num_features = 13
        num_classes = 40
        input_length = 2

        model = Wav2Letter(num_classes=num_classes, input_type="mfcc", num_features=num_features)
Tomás Osório's avatar
Tomás Osório committed
34
35
36
37
38

        x = torch.rand(batch_size, num_features, input_length)
        out = model(x)

        assert out.size() == (batch_size, num_classes, 2)
jimchen90's avatar
jimchen90 committed
39
40


41
class TestMelResNet(common_utils.TorchaudioTestCase):
jimchen90's avatar
jimchen90 committed
42
43

    def test_waveform(self):
44
        """Validate the output dimensions of a MelResNet block.
jimchen90's avatar
jimchen90 committed
45
        """
jimchen90's avatar
jimchen90 committed
46

jimchen90's avatar
jimchen90 committed
47
48
49
50
51
52
53
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 128
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5
jimchen90's avatar
jimchen90 committed
54

55
        model = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size)
jimchen90's avatar
jimchen90 committed
56

jimchen90's avatar
jimchen90 committed
57
        x = torch.rand(n_batch, n_freq, n_time)
jimchen90's avatar
jimchen90 committed
58
59
        out = model(x)

jimchen90's avatar
jimchen90 committed
60
        assert out.size() == (n_batch, n_output, n_time - kernel_size + 1)
jimchen90's avatar
jimchen90 committed
61
62
63
64
65


class TestUpsampleNetwork(common_utils.TorchaudioTestCase):

    def test_waveform(self):
66
        """Validate the output dimensions of a UpsampleNetwork block.
jimchen90's avatar
jimchen90 committed
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
        """

        upsample_scales = [5, 5, 8]
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 256
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5

        total_scale = 1
        for upsample_scale in upsample_scales:
            total_scale *= upsample_scale

82
83
84
85
86
87
        model = UpsampleNetwork(upsample_scales,
                                n_res_block,
                                n_freq,
                                n_hidden,
                                n_output,
                                kernel_size)
jimchen90's avatar
jimchen90 committed
88
89
90
91
92
93

        x = torch.rand(n_batch, n_freq, n_time)
        out1, out2 = model(x)

        assert out1.size() == (n_batch, n_freq, total_scale * (n_time - kernel_size + 1))
        assert out2.size() == (n_batch, n_output, total_scale * (n_time - kernel_size + 1))
jimchen90's avatar
jimchen90 committed
94
95
96
97
98


class TestWaveRNN(common_utils.TorchaudioTestCase):

    def test_waveform(self):
99
        """Validate the output dimensions of a WaveRNN model.
jimchen90's avatar
jimchen90 committed
100
101
102
103
104
        """

        upsample_scales = [5, 5, 8]
        n_rnn = 512
        n_fc = 512
105
        n_classes = 512
jimchen90's avatar
jimchen90 committed
106
107
108
109
110
111
112
113
114
        hop_length = 200
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 256
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5

115
116
        model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
                        n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)
jimchen90's avatar
jimchen90 committed
117
118
119
120
121

        x = torch.rand(n_batch, 1, hop_length * (n_time - kernel_size + 1))
        mels = torch.rand(n_batch, 1, n_freq, n_time)
        out = model(x, mels)

122
        assert out.size() == (n_batch, 1, hop_length * (n_time - kernel_size + 1), n_classes)
moto's avatar
moto committed
123

124
125
126
127
128
    def test_infer_waveform(self):
        """Validate the output dimensions of a WaveRNN model's infer method.
        """

        upsample_scales = [5, 5, 8]
129
130
131
        n_rnn = 128
        n_fc = 128
        n_classes = 128
132
133
        hop_length = 200
        n_batch = 2
134
135
136
137
138
        n_time = 50
        n_freq = 25
        n_output = 64
        n_res_block = 2
        n_hidden = 32
139
140
141
142
143
144
        kernel_size = 5

        model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
                        n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)

        x = torch.rand(n_batch, n_freq, n_time)
145
146
        lengths = torch.tensor([n_time, n_time // 2])
        out, waveform_lengths = model.infer(x, lengths)
147

148
149
150
        assert out.size() == (n_batch, 1, hop_length * n_time)
        assert waveform_lengths[0] == hop_length * n_time
        assert waveform_lengths[1] == hop_length * n_time // 2
151

moto's avatar
moto committed
152
153
154
155
    def test_torchscript_infer(self):
        """Scripted model outputs the same as eager mode"""

        upsample_scales = [5, 5, 8]
156
157
158
        n_rnn = 128
        n_fc = 128
        n_classes = 128
moto's avatar
moto committed
159
160
        hop_length = 200
        n_batch = 2
161
162
163
164
165
        n_time = 50
        n_freq = 25
        n_output = 64
        n_res_block = 2
        n_hidden = 32
moto's avatar
moto committed
166
167
168
169
170
171
172
173
174
175
176
177
        kernel_size = 5

        model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
                        n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)
        model.eval()
        x = torch.rand(n_batch, n_freq, n_time)
        torch.random.manual_seed(0)
        out_eager = model.infer(x)
        torch.random.manual_seed(0)
        out_script = torch_script(model).infer(x)
        self.assertEqual(out_eager, out_script)

moto's avatar
moto committed
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231

_ConvTasNetParams = namedtuple(
    '_ConvTasNetParams',
    [
        'enc_num_feats',
        'enc_kernel_size',
        'msk_num_feats',
        'msk_num_hidden_feats',
        'msk_kernel_size',
        'msk_num_layers',
        'msk_num_stacks',
    ]
)


class TestConvTasNet(common_utils.TorchaudioTestCase):
    @parameterized.expand(list(itertools.product(
        [2, 3],
        [
            _ConvTasNetParams(128, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(256, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 256, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 256, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 256, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 512, 3, 6, 4),
            _ConvTasNetParams(512, 40, 128, 512, 3, 4, 6),
            _ConvTasNetParams(512, 40, 128, 512, 3, 8, 3),
            _ConvTasNetParams(512, 32, 128, 512, 3, 8, 3),
            _ConvTasNetParams(512, 16, 128, 512, 3, 8, 3),
        ],
    )))
    def test_paper_configuration(self, num_sources, model_params):
        """ConvTasNet model works on the valid configurations in the paper"""
        batch_size = 32
        num_frames = 8000

        model = ConvTasNet(
            num_sources=num_sources,
            enc_kernel_size=model_params.enc_kernel_size,
            enc_num_feats=model_params.enc_num_feats,
            msk_kernel_size=model_params.msk_kernel_size,
            msk_num_feats=model_params.msk_num_feats,
            msk_num_hidden_feats=model_params.msk_num_hidden_feats,
            msk_num_layers=model_params.msk_num_layers,
            msk_num_stacks=model_params.msk_num_stacks,
        )
        tensor = torch.rand(batch_size, 1, num_frames)
        output = model(tensor)

        assert output.shape == (batch_size, num_sources, num_frames)
discort's avatar
discort committed
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248


class TestDeepSpeech(common_utils.TorchaudioTestCase):

    def test_deepspeech(self):
        n_batch = 2
        n_feature = 1
        n_channel = 1
        n_class = 40
        n_time = 320

        model = DeepSpeech(n_feature=n_feature, n_class=n_class)

        x = torch.rand(n_batch, n_channel, n_time, n_feature)
        out = model(x)

        assert out.size() == (n_batch, n_time, n_class)