audio_io_tutorial.py 13.1 KB
Newer Older
1
2
3
4
5
# -*- coding: utf-8 -*-
"""
Audio I/O
=========

6
7
**Author**: `Moto Hira <moto@meta.com>`__

8
9
10
11
12
13
14
15
16
This tutorial shows how to use TorchAudio's basic I/O API to inspect audio data,
load them into PyTorch Tensors and save PyTorch Tensors.

.. warning::

   There are multiple changes planned/made to audio I/O in recent releases.
   For the detail of these changes please refer to
   :ref:`Introduction of Dispatcher <dispatcher_migration>`.

17
18
19
20
21
22
23
24
25
"""

import torch
import torchaudio

print(torch.__version__)
print(torchaudio.__version__)

######################################################################
moto's avatar
moto committed
26
27
# Preparation
# -----------
28
#
moto's avatar
moto committed
29
30
31
32
33
34
35
36
37
# First, we import the modules and download the audio assets we use in this tutorial.
#
# .. note::
#    When running this tutorial in Google Colab, install the required packages
#    with the following:
#
#    .. code::
#
#       !pip install boto3
38
39
40
41

import io
import os
import tarfile
moto's avatar
moto committed
42
import tempfile
43
44

import boto3
45
46
import matplotlib.pyplot as plt
import requests
47
48
from botocore import UNSIGNED
from botocore.config import Config
moto's avatar
moto committed
49
50
from IPython.display import Audio
from torchaudio.utils import download_asset
51

moto's avatar
moto committed
52
53
54
SAMPLE_GSM = download_asset("tutorial-assets/steam-train-whistle-daniel_simon.gsm")
SAMPLE_WAV = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
SAMPLE_WAV_8000 = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042-8000hz.wav")
55

56

57
58
59
60
61
62
63
def _hide_seek(obj):
    class _wrapper:
        def __init__(self, obj):
            self.obj = obj

        def read(self, n):
            return self.obj.read(n)
moto's avatar
moto committed
64

65
66
    return _wrapper(obj)

67
68

######################################################################
69
70
# Querying audio metadata
# -----------------------
71
#
72
73
# Function :py:func:`torchaudio.info` fetches audio metadata.
# You can provide a path-like object or file-like object.
74
75
#

moto's avatar
moto committed
76
metadata = torchaudio.info(SAMPLE_WAV)
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
print(metadata)

######################################################################
# Where
#
# -  ``sample_rate`` is the sampling rate of the audio
# -  ``num_channels`` is the number of channels
# -  ``num_frames`` is the number of frames per channel
# -  ``bits_per_sample`` is bit depth
# -  ``encoding`` is the sample coding format
#
# ``encoding`` can take on one of the following values:
#
# -  ``"PCM_S"``: Signed integer linear PCM
# -  ``"PCM_U"``: Unsigned integer linear PCM
# -  ``"PCM_F"``: Floating point linear PCM
# -  ``"FLAC"``: Flac, `Free Lossless Audio
#    Codec <https://xiph.org/flac/>`__
# -  ``"ULAW"``: Mu-law,
#    [`wikipedia <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`__]
# -  ``"ALAW"``: A-law
#    [`wikipedia <https://en.wikipedia.org/wiki/A-law_algorithm>`__]
# -  ``"MP3"`` : MP3, MPEG-1 Audio Layer III
# -  ``"VORBIS"``: OGG Vorbis [`xiph.org <https://xiph.org/vorbis/>`__]
# -  ``"AMR_NB"``: Adaptive Multi-Rate
#    [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_audio_codec>`__]
# -  ``"AMR_WB"``: Adaptive Multi-Rate Wideband
#    [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_Wideband>`__]
# -  ``"OPUS"``: Opus [`opus-codec.org <https://opus-codec.org/>`__]
# -  ``"GSM"``: GSM-FR
#    [`wikipedia <https://en.wikipedia.org/wiki/Full_Rate>`__]
moto's avatar
moto committed
108
# -  ``"HTK"``: Single channel 16-bit PCM
109
110
111
112
113
114
115
116
117
118
# -  ``"UNKNOWN"`` None of above
#

######################################################################
# **Note**
#
# -  ``bits_per_sample`` can be ``0`` for formats with compression and/or
#    variable bit rate (such as MP3).
# -  ``num_frames`` can be ``0`` for GSM-FR format.
#
moto's avatar
moto committed
119
120
121
122

metadata = torchaudio.info(SAMPLE_GSM)
print(metadata)

123
124
125

######################################################################
# Querying file-like object
moto's avatar
moto committed
126
# -------------------------
127
#
128
# :py:func:`torchaudio.info` works on file-like objects.
129
130
#

moto's avatar
moto committed
131
132
url = "https://download.pytorch.org/torchaudio/tutorial-assets/steam-train-whistle-daniel_simon.wav"
with requests.get(url, stream=True) as response:
133
    metadata = torchaudio.info(_hide_seek(response.raw))
134
135
136
print(metadata)

######################################################################
moto's avatar
moto committed
137
# .. note::
138
#
moto's avatar
moto committed
139
140
141
142
143
144
#    When passing a file-like object, ``info`` does not read
#    all of the underlying data; rather, it reads only a portion
#    of the data from the beginning.
#    Therefore, for a given audio format, it may not be able to retrieve the
#    correct metadata, including the format itself. In such case, you
#    can pass ``format`` argument to specify the format of the audio.
145
146

######################################################################
moto's avatar
moto committed
147
148
# Loading audio data
# ------------------
149
#
150
# To load audio data, you can use :py:func:`torchaudio.load`.
151
152
153
154
155
156
157
#
# This function accepts a path-like object or file-like object as input.
#
# The returned value is a tuple of waveform (``Tensor``) and sample rate
# (``int``).
#
# By default, the resulting tensor object has ``dtype=torch.float32`` and
moto's avatar
moto committed
158
# its value range is ``[-1.0, 1.0]``.
159
160
161
162
163
#
# For the list of supported format, please refer to `the torchaudio
# documentation <https://pytorch.org/audio>`__.
#

moto's avatar
moto committed
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
waveform, sample_rate = torchaudio.load(SAMPLE_WAV)


######################################################################
#
def plot_waveform(waveform, sample_rate):
    waveform = waveform.numpy()

    num_channels, num_frames = waveform.shape
    time_axis = torch.arange(0, num_frames) / sample_rate

    figure, axes = plt.subplots(num_channels, 1)
    if num_channels == 1:
        axes = [axes]
    for c in range(num_channels):
        axes[c].plot(time_axis, waveform[c], linewidth=1)
        axes[c].grid(True)
        if num_channels > 1:
            axes[c].set_ylabel(f"Channel {c+1}")
    figure.suptitle("waveform")
    plt.show(block=False)

186

moto's avatar
moto committed
187
188
######################################################################
#
189
plot_waveform(waveform, sample_rate)
moto's avatar
moto committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211


######################################################################
#
def plot_specgram(waveform, sample_rate, title="Spectrogram"):
    waveform = waveform.numpy()

    num_channels, num_frames = waveform.shape

    figure, axes = plt.subplots(num_channels, 1)
    if num_channels == 1:
        axes = [axes]
    for c in range(num_channels):
        axes[c].specgram(waveform[c], Fs=sample_rate)
        if num_channels > 1:
            axes[c].set_ylabel(f"Channel {c+1}")
    figure.suptitle(title)
    plt.show(block=False)


######################################################################
#
212
213
214
plot_specgram(waveform, sample_rate)


moto's avatar
moto committed
215
216
217
218
######################################################################
#
Audio(waveform.numpy()[0], rate=sample_rate)

219
220
######################################################################
# Loading from file-like object
moto's avatar
moto committed
221
# -----------------------------
222
#
moto's avatar
moto committed
223
224
# The I/O functions support file-like objects.
# This allows for fetching and decoding audio data from locations
225
226
227
228
# within and beyond the local file system.
# The following examples illustrate this.
#

moto's avatar
moto committed
229
230
231
######################################################################
#

232
# Load audio data as HTTP request
moto's avatar
moto committed
233
234
url = "https://download.pytorch.org/torchaudio/tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
with requests.get(url, stream=True) as response:
235
    waveform, sample_rate = torchaudio.load(_hide_seek(response.raw))
236
237
plot_specgram(waveform, sample_rate, title="HTTP datasource")

moto's avatar
moto committed
238
239
240
######################################################################
#

241
# Load audio from tar file
moto's avatar
moto committed
242
243
244
245
tar_path = download_asset("tutorial-assets/VOiCES_devkit.tar.gz")
tar_item = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
with tarfile.open(tar_path, mode="r") as tarfile_:
    fileobj = tarfile_.extractfile(tar_item)
246
    waveform, sample_rate = torchaudio.load(fileobj)
247
248
plot_specgram(waveform, sample_rate, title="TAR file")

moto's avatar
moto committed
249
250
251
######################################################################
#

252
# Load audio from S3
moto's avatar
moto committed
253
254
bucket = "pytorch-tutorial-assets"
key = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
255
client = boto3.client("s3", config=Config(signature_version=UNSIGNED))
moto's avatar
moto committed
256
response = client.get_object(Bucket=bucket, Key=key)
257
waveform, sample_rate = torchaudio.load(_hide_seek(response["Body"]))
258
259
260
261
262
plot_specgram(waveform, sample_rate, title="From S3")


######################################################################
# Tips on slicing
moto's avatar
moto committed
263
# ---------------
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
#
# Providing ``num_frames`` and ``frame_offset`` arguments restricts
# decoding to the corresponding segment of the input.
#
# The same result can be achieved using vanilla Tensor slicing,
# (i.e. ``waveform[:, frame_offset:frame_offset+num_frames]``). However,
# providing ``num_frames`` and ``frame_offset`` arguments is more
# efficient.
#
# This is because the function will end data acquisition and decoding
# once it finishes decoding the requested frames. This is advantageous
# when the audio data are transferred via network as the data transfer will
# stop as soon as the necessary amount of data is fetched.
#
# The following example illustrates this.
#

# Illustration of two different decoding methods.
# The first one will fetch all the data and decode them, while
# the second one will stop fetching data once it completes decoding.
# The resulting waveforms are identical.

frame_offset, num_frames = 16000, 16000  # Fetch and decode the 1 - 2 seconds

moto's avatar
moto committed
288
url = "https://download.pytorch.org/torchaudio/tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
289
print("Fetching all the data...")
moto's avatar
moto committed
290
with requests.get(url, stream=True) as response:
291
    waveform1, sample_rate1 = torchaudio.load(_hide_seek(response.raw))
292
    waveform1 = waveform1[:, frame_offset : frame_offset + num_frames]
293
    print(f" - Fetched {response.raw.tell()} bytes")
294
295

print("Fetching until the requested frames are available...")
moto's avatar
moto committed
296
with requests.get(url, stream=True) as response:
297
    waveform2, sample_rate2 = torchaudio.load(
moto's avatar
moto committed
298
299
        _hide_seek(response.raw), frame_offset=frame_offset, num_frames=num_frames
    )
300
    print(f" - Fetched {response.raw.tell()} bytes")
301
302
303
304
305
306
307
308
309
310

print("Checking the resulting waveform ... ", end="")
assert (waveform1 == waveform2).all()
print("matched!")

######################################################################
# Saving audio to file
# --------------------
#
# To save audio data in formats interpretable by common applications,
311
# you can use :py:func:`torchaudio.save`.
312
313
314
315
316
317
318
319
320
321
322
323
324
325
#
# This function accepts a path-like object or file-like object.
#
# When passing a file-like object, you also need to provide argument ``format``
# so that the function knows which format it should use. In the
# case of a path-like object, the function will infer the format from
# the extension. If you are saving to a file without an extension, you need
# to provide argument ``format``.
#
# When saving WAV-formatted data, the default encoding for ``float32`` Tensor
# is 32-bit floating-point PCM. You can provide arguments ``encoding`` and
# ``bits_per_sample`` to change this behavior. For example, to save data
# in 16-bit signed integer PCM, you can do the following.
#
moto's avatar
moto committed
326
327
# .. note::
#
moto's avatar
moto committed
328
329
#    Saving data in encodings with a lower bit depth reduces the
#    resulting file size but also precision.
330
331
#

moto's avatar
moto committed
332
333
waveform, sample_rate = torchaudio.load(SAMPLE_WAV)

334

moto's avatar
moto committed
335
336
######################################################################
#
337

moto's avatar
moto committed
338

moto's avatar
moto committed
339
340
341
342
343
344
345
346
def inspect_file(path):
    print("-" * 10)
    print("Source:", path)
    print("-" * 10)
    print(f" - File size: {os.path.getsize(path)} bytes")
    print(f" - {torchaudio.info(path)}")
    print()

moto's avatar
moto committed
347

moto's avatar
moto committed
348
349
######################################################################
#
350
351
352
# Save without any encoding option.
# The function will pick up the encoding which
# the provided data fit
moto's avatar
moto committed
353
354
355
356
with tempfile.TemporaryDirectory() as tempdir:
    path = f"{tempdir}/save_example_default.wav"
    torchaudio.save(path, waveform, sample_rate)
    inspect_file(path)
357

moto's avatar
moto committed
358
359
######################################################################
#
360
361
# Save as 16-bit signed integer Linear PCM
# The resulting file occupies half the storage but loses precision
moto's avatar
moto committed
362
363
364
365
with tempfile.TemporaryDirectory() as tempdir:
    path = f"{tempdir}/save_example_PCM_S16.wav"
    torchaudio.save(path, waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
    inspect_file(path)
366
367
368


######################################################################
moto's avatar
moto committed
369
# :py:func:`torchaudio.save` can also handle other formats.
370
# To name a few:
371
372
373
#

formats = [
374
    "flac",
375
376
377
378
379
    # "vorbis",
    # "sph",
    # "amb",
    # "amr-nb",
    # "gsm",
380
381
]

moto's avatar
moto committed
382
383
384
385
386
387
388
389
######################################################################
#
waveform, sample_rate = torchaudio.load(SAMPLE_WAV_8000)
with tempfile.TemporaryDirectory() as tempdir:
    for format in formats:
        path = f"{tempdir}/save_example.{format}"
        torchaudio.save(path, waveform, sample_rate, format=format)
        inspect_file(path)
390
391
392

######################################################################
# Saving to file-like object
moto's avatar
moto committed
393
# --------------------------
394
395
396
397
398
399
400
#
# Similar to the other I/O functions, you can save audio to file-like
# objects. When saving to a file-like object, argument ``format`` is
# required.
#


moto's avatar
moto committed
401
waveform, sample_rate = torchaudio.load(SAMPLE_WAV)
402
403
404
405
406
407
408

# Saving to bytes buffer
buffer_ = io.BytesIO()
torchaudio.save(buffer_, waveform, sample_rate, format="wav")

buffer_.seek(0)
print(buffer_.read(16))