models_test.py 6.52 KB
Newer Older
moto's avatar
moto committed
1
2
3
import itertools
from collections import namedtuple

Tomás Osório's avatar
Tomás Osório committed
4
import torch
moto's avatar
moto committed
5
from parameterized import parameterized
discort's avatar
discort committed
6
from torchaudio.models import ConvTasNet, DeepSpeech, Wav2Letter, WaveRNN
7
from torchaudio.models.wavernn import MelResNet, UpsampleNetwork
8
from torchaudio_unittest import common_utils
Tomás Osório's avatar
Tomás Osório committed
9

10
11

class TestWav2Letter(common_utils.TorchaudioTestCase):
jimchen90's avatar
jimchen90 committed
12
13
14
15
16
17
18
19

    def test_waveform(self):
        batch_size = 2
        num_features = 1
        num_classes = 40
        input_length = 320

        model = Wav2Letter(num_classes=num_classes, num_features=num_features)
Tomás Osório's avatar
Tomás Osório committed
20
21
22
23
24
25

        x = torch.rand(batch_size, num_features, input_length)
        out = model(x)

        assert out.size() == (batch_size, num_classes, 2)

jimchen90's avatar
jimchen90 committed
26
27
28
29
30
31
32
    def test_mfcc(self):
        batch_size = 2
        num_features = 13
        num_classes = 40
        input_length = 2

        model = Wav2Letter(num_classes=num_classes, input_type="mfcc", num_features=num_features)
Tomás Osório's avatar
Tomás Osório committed
33
34
35
36
37

        x = torch.rand(batch_size, num_features, input_length)
        out = model(x)

        assert out.size() == (batch_size, num_classes, 2)
jimchen90's avatar
jimchen90 committed
38
39


40
class TestMelResNet(common_utils.TorchaudioTestCase):
jimchen90's avatar
jimchen90 committed
41
42

    def test_waveform(self):
43
        """Validate the output dimensions of a MelResNet block.
jimchen90's avatar
jimchen90 committed
44
        """
jimchen90's avatar
jimchen90 committed
45

jimchen90's avatar
jimchen90 committed
46
47
48
49
50
51
52
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 128
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5
jimchen90's avatar
jimchen90 committed
53

54
        model = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size)
jimchen90's avatar
jimchen90 committed
55

jimchen90's avatar
jimchen90 committed
56
        x = torch.rand(n_batch, n_freq, n_time)
jimchen90's avatar
jimchen90 committed
57
58
        out = model(x)

jimchen90's avatar
jimchen90 committed
59
        assert out.size() == (n_batch, n_output, n_time - kernel_size + 1)
jimchen90's avatar
jimchen90 committed
60
61
62
63
64


class TestUpsampleNetwork(common_utils.TorchaudioTestCase):

    def test_waveform(self):
65
        """Validate the output dimensions of a UpsampleNetwork block.
jimchen90's avatar
jimchen90 committed
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
        """

        upsample_scales = [5, 5, 8]
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 256
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5

        total_scale = 1
        for upsample_scale in upsample_scales:
            total_scale *= upsample_scale

81
82
83
84
85
86
        model = UpsampleNetwork(upsample_scales,
                                n_res_block,
                                n_freq,
                                n_hidden,
                                n_output,
                                kernel_size)
jimchen90's avatar
jimchen90 committed
87
88
89
90
91
92

        x = torch.rand(n_batch, n_freq, n_time)
        out1, out2 = model(x)

        assert out1.size() == (n_batch, n_freq, total_scale * (n_time - kernel_size + 1))
        assert out2.size() == (n_batch, n_output, total_scale * (n_time - kernel_size + 1))
jimchen90's avatar
jimchen90 committed
93
94
95
96
97


class TestWaveRNN(common_utils.TorchaudioTestCase):

    def test_waveform(self):
98
        """Validate the output dimensions of a WaveRNN model.
jimchen90's avatar
jimchen90 committed
99
100
101
102
103
        """

        upsample_scales = [5, 5, 8]
        n_rnn = 512
        n_fc = 512
104
        n_classes = 512
jimchen90's avatar
jimchen90 committed
105
106
107
108
109
110
111
112
113
        hop_length = 200
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 256
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5

114
115
        model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
                        n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)
jimchen90's avatar
jimchen90 committed
116
117
118
119
120

        x = torch.rand(n_batch, 1, hop_length * (n_time - kernel_size + 1))
        mels = torch.rand(n_batch, 1, n_freq, n_time)
        out = model(x, mels)

121
        assert out.size() == (n_batch, 1, hop_length * (n_time - kernel_size + 1), n_classes)
moto's avatar
moto committed
122

123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
    def test_infer_waveform(self):
        """Validate the output dimensions of a WaveRNN model's infer method.
        """

        upsample_scales = [5, 5, 8]
        n_rnn = 512
        n_fc = 512
        n_classes = 512
        hop_length = 200
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 256
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5

        model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
                        n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)

        x = torch.rand(n_batch, n_freq, n_time)
        out = model.infer(x)

        assert out.size() == (n_batch, 1, hop_length * (n_time - kernel_size + 1))

moto's avatar
moto committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201

_ConvTasNetParams = namedtuple(
    '_ConvTasNetParams',
    [
        'enc_num_feats',
        'enc_kernel_size',
        'msk_num_feats',
        'msk_num_hidden_feats',
        'msk_kernel_size',
        'msk_num_layers',
        'msk_num_stacks',
    ]
)


class TestConvTasNet(common_utils.TorchaudioTestCase):
    @parameterized.expand(list(itertools.product(
        [2, 3],
        [
            _ConvTasNetParams(128, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(256, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 256, 256, 3, 7, 2),
            _ConvTasNetParams(512, 40, 256, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 256, 512, 3, 7, 2),
            _ConvTasNetParams(512, 40, 128, 512, 3, 6, 4),
            _ConvTasNetParams(512, 40, 128, 512, 3, 4, 6),
            _ConvTasNetParams(512, 40, 128, 512, 3, 8, 3),
            _ConvTasNetParams(512, 32, 128, 512, 3, 8, 3),
            _ConvTasNetParams(512, 16, 128, 512, 3, 8, 3),
        ],
    )))
    def test_paper_configuration(self, num_sources, model_params):
        """ConvTasNet model works on the valid configurations in the paper"""
        batch_size = 32
        num_frames = 8000

        model = ConvTasNet(
            num_sources=num_sources,
            enc_kernel_size=model_params.enc_kernel_size,
            enc_num_feats=model_params.enc_num_feats,
            msk_kernel_size=model_params.msk_kernel_size,
            msk_num_feats=model_params.msk_num_feats,
            msk_num_hidden_feats=model_params.msk_num_hidden_feats,
            msk_num_layers=model_params.msk_num_layers,
            msk_num_stacks=model_params.msk_num_stacks,
        )
        tensor = torch.rand(batch_size, 1, num_frames)
        output = model(tensor)

        assert output.shape == (batch_size, num_sources, num_frames)
discort's avatar
discort committed
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218


class TestDeepSpeech(common_utils.TorchaudioTestCase):

    def test_deepspeech(self):
        n_batch = 2
        n_feature = 1
        n_channel = 1
        n_class = 40
        n_time = 320

        model = DeepSpeech(n_feature=n_feature, n_class=n_class)

        x = torch.rand(n_batch, n_channel, n_time, n_feature)
        out = model(x)

        assert out.size() == (n_batch, n_time, n_class)