test_transforms.py 3.14 KB
Newer Older
David Pollack's avatar
David Pollack committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import torch
import torchaudio
import torchaudio.transforms as transforms
import numpy as np
import unittest

class Tester(unittest.TestCase):

    sr = 16000
    freq = 440
    volume = 0.3
    sig = (torch.cos(2*np.pi*torch.arange(0, 4*sr) * freq/sr)).float()
    sig.unsqueeze_(1)
    sig = (sig*volume*2**31).long()

    def test_scale(self):

        audio_orig = self.sig.clone()
        result = transforms.Scale()(audio_orig)
        self.assertTrue(result.min() >= -1. and result.max() <= 1.,
21
                        print("min: {}, max: {}".format(result.min(), result.max())))
David Pollack's avatar
David Pollack committed
22
23
24
25
26

        maxminmax = np.abs([audio_orig.min(), audio_orig.max()]).max().astype(np.float)
        result = transforms.Scale(factor=maxminmax)(audio_orig)
        self.assertTrue((result.min() == -1. or result.max() == 1.) and
                        result.min() >= -1. and result.max() <= 1.,
27
                        print("min: {}, max: {}".format(result.min(), result.max())))
David Pollack's avatar
David Pollack committed
28
29
30
31
32
33
34
35
36
37

    def test_pad_trim(self):

        audio_orig = self.sig.clone()
        length_orig = audio_orig.size(0)
        length_new = int(length_orig * 1.2)

        result = transforms.PadTrim(max_len=length_new)(audio_orig)

        self.assertTrue(result.size(0) == length_new,
38
                        print("old size: {}, new size: {}".format(audio_orig.size(0), result.size(0))))
David Pollack's avatar
David Pollack committed
39
40
41
42
43
44
45
46

        audio_orig = self.sig.clone()
        length_orig = audio_orig.size(0)
        length_new = int(length_orig * 0.8)

        result = transforms.PadTrim(max_len=length_new)(audio_orig)

        self.assertTrue(result.size(0) == length_new,
47
                        print("old size: {}, new size: {}".format(audio_orig.size(0), result.size(0))))
David Pollack's avatar
David Pollack committed
48
49
50


    def test_downmix_mono(self):
David Pollack's avatar
David Pollack committed
51

David Pollack's avatar
David Pollack committed
52
53
54
55
56
57
58
59
60
61
62
63
64
        audio_L = self.sig.clone()
        audio_R = self.sig.clone()
        R_idx = int(audio_R.size(0) * 0.1)
        audio_R = torch.cat((audio_R[R_idx:], audio_R[:R_idx]))

        audio_Stereo = torch.cat((audio_L, audio_R), dim=1)

        self.assertTrue(audio_Stereo.size(1) == 2)

        result = transforms.DownmixMono()(audio_Stereo)

        self.assertTrue(result.size(1) == 1)

65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    def test_lc2cl(self):

        audio = self.sig.clone()
        result = transforms.LC2CL()(audio)
        self.assertTrue(result.size()[::-1] == audio.size())

    def test_mel(self):

        audio = self.sig.clone()
        audio = transforms.Scale()(audio)
        self.assertTrue(len(audio.size()) == 2)
        result = transforms.MEL()(audio)
        self.assertTrue(len(result.size()) == 3)
        result = transforms.BLC2CBL()(result)
        self.assertTrue(len(result.size()) == 3)

David Pollack's avatar
David Pollack committed
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
    def test_compose(self):

        audio_orig = self.sig.clone()
        length_orig = audio_orig.size(0)
        length_new = int(length_orig * 1.2)
        maxminmax = np.abs([audio_orig.min(), audio_orig.max()]).max().astype(np.float)

        tset = (transforms.Scale(factor=maxminmax),
                transforms.PadTrim(max_len=length_new))
        result = transforms.Compose(tset)(audio_orig)

        self.assertTrue(np.abs([result.min(), result.max()]).max() == 1.)

        self.assertTrue(result.size(0) == length_new)


if __name__ == '__main__':
    unittest.main()