audio_datasets_tutorial.py 2.52 KB
Newer Older
1
2
3
4
5
# -*- coding: utf-8 -*-
"""
Audio Datasets
==============

6
7
**Author**: `Moto Hira <moto@meta.com>`__

8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""

# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio

import torch
import torchaudio

print(torch.__version__)
print(torchaudio.__version__)

######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#

28
29
30
31
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
32

33
# -------------------------------------------------------------------------------
34
# Preparation of data and helper functions.
35
# -------------------------------------------------------------------------------
36
37
38
39
40
41
42
43
44
45
import os

import matplotlib.pyplot as plt
from IPython.display import Audio, display


_SAMPLE_DIR = "_assets"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)

46

47
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
48
49
    waveform = waveform.numpy()

50
    num_channels, _ = waveform.shape
51
52
53
54
55
56
57
58
59
60
61
62
63

    figure, axes = plt.subplots(num_channels, 1)
    if num_channels == 1:
        axes = [axes]
    for c in range(num_channels):
        axes[c].specgram(waveform[c], Fs=sample_rate)
        if num_channels > 1:
            axes[c].set_ylabel(f"Channel {c+1}")
        if xlim:
            axes[c].set_xlim(xlim)
    figure.suptitle(title)
    plt.show(block=False)

64
65

def play_audio(waveform, sample_rate):
66
67
    waveform = waveform.numpy()

68
    num_channels, _ = waveform.shape
69
70
71
72
73
74
    if num_channels == 1:
        display(Audio(waveform[0], rate=sample_rate))
    elif num_channels == 2:
        display(Audio((waveform[0], waveform[1]), rate=sample_rate))
    else:
        raise ValueError("Waveform with more than 2 channels are not supported.")
75
76
77


######################################################################
78
# Here, we show how to use the
79
# :py:class:`torchaudio.datasets.YESNO` dataset.
80
81
#

82

83
84
85
dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True)

for i in [1, 3, 5]:
86
87
88
    waveform, sample_rate, label = dataset[i]
    plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
    play_audio(waveform, sample_rate)