vctk_test.py 3.31 KB
Newer Older
1
import os
2
from pathlib import Path
3
4

from torchaudio.datasets import vctk
5
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
6

7
8
# Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [
9
10
11
12
13
14
15
16
17
18
19
    "Please call Stella",
    "Ask her to bring these things",
    "with her from the store",
    "Six spoons of fresh snow peas, five thick slabs of blue cheese, and maybe a snack for her brother Bob",
    "We also need a small plastic snake and a big toy frog for the kids",
    "She can scoop these things into three red bags, and we will go meet her Wednesday at the train station",
    "When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow",
    "The rainbow is a division of white light into many beautiful colors",
    "These take the shape of a long round arch, with its path high above, and its two ends \
        apparently beyond the horizon",
    "There is, according to legend, a boiling pot of gold at one end",
20
21
22
]


Aziz's avatar
Aziz committed
23
24
25
26
27
def get_mock_dataset(root_dir):
    """
    root_dir: root directory of the mocked data
    """
    mocked_samples = []
28
    dataset_dir = os.path.join(root_dir, "VCTK-Corpus-0.92")
Aziz's avatar
Aziz committed
29
30
31
32
33
    os.makedirs(dataset_dir, exist_ok=True)
    sample_rate = 48000
    seed = 0

    for speaker in range(225, 230):
34
35
        speaker_id = "p" + str(speaker)
        audio_dir = os.path.join(dataset_dir, "wav48_silence_trimmed", speaker_id)
Aziz's avatar
Aziz committed
36
37
        os.makedirs(audio_dir, exist_ok=True)

38
        file_dir = os.path.join(dataset_dir, "txt", speaker_id)
Aziz's avatar
Aziz committed
39
40
41
        os.makedirs(file_dir, exist_ok=True)

        for utterance_id in range(1, 11):
42
43
44
45
            filename = f"{speaker_id}_{utterance_id:03d}_mic2"
            audio_file_path = os.path.join(audio_dir, filename + ".wav")

            data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
Aziz's avatar
Aziz committed
46
47
            save_wav(audio_file_path, data, sample_rate)

48
            txt_file_path = os.path.join(file_dir, filename[:-5] + ".txt")
49
            transcript = _TRANSCRIPT[utterance_id - 1]
50
            with open(txt_file_path, "w") as f:
51
                f.write(transcript)
Aziz's avatar
Aziz committed
52

53
            sample = (normalize_wav(data), sample_rate, transcript, speaker_id, utterance_id)
Aziz's avatar
Aziz committed
54
55
56
57
58
            mocked_samples.append(sample)
            seed += 1
    return mocked_samples


59
60
61
62
63
64
65
66
class TestVCTK(TempDirMixin, TorchaudioTestCase):

    root_dir = None
    samples = []

    @classmethod
    def setUpClass(cls):
        cls.root_dir = cls.get_base_temp_dir()
Aziz's avatar
Aziz committed
67
        cls.samples = get_mock_dataset(cls.root_dir)
68

69
    def _test_vctk(self, dataset):
70
        num_samples = 0
71
        for i, (data, sample_rate, transcript, speaker_id, utterance_id) in enumerate(dataset):
72
73
            self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
            assert sample_rate == self.samples[i][1]
74
            assert transcript == self.samples[i][2]
75
76
77
78
79
            assert speaker_id == self.samples[i][3]
            assert int(utterance_id) == self.samples[i][4]
            num_samples += 1

        assert num_samples == len(self.samples)
80
81
82
83
84
85
86
87

    def test_vctk_str(self):
        dataset = vctk.VCTK_092(self.root_dir, audio_ext=".wav")
        self._test_vctk(dataset)

    def test_vctk_path(self):
        dataset = vctk.VCTK_092(Path(self.root_dir), audio_ext=".wav")
        self._test_vctk(dataset)