functional.py 35.1 KB
Newer Older
1
from __future__ import absolute_import, division, print_function, unicode_literals
Vincent QB's avatar
Vincent QB committed
2

3
import math
Vincent QB's avatar
Vincent QB committed
4

Jason Lian's avatar
Jason Lian committed
5
6
import torch

Jason Lian's avatar
pre  
Jason Lian committed
7
__all__ = [
8
9
10
11
12
13
14
15
16
17
18
19
20
21
    "istft",
    "spectrogram",
    "amplitude_to_DB",
    "create_fb_matrix",
    "create_dct",
    "mu_law_encoding",
    "mu_law_decoding",
    "complex_norm",
    "angle",
    "magphase",
    "phase_vocoder",
    "lfilter",
    "lowpass_biquad",
    "highpass_biquad",
xinyang0's avatar
xinyang0 committed
22
    "equalizer_biquad",
23
    "biquad",
24
25
    'mask_along_axis',
    'mask_along_axis_iid'
Jason Lian's avatar
pre  
Jason Lian committed
26
27
]

Vincent QB's avatar
Vincent QB committed
28

Jason Lian's avatar
Jason Lian committed
29
# TODO: remove this once https://github.com/pytorch/pytorch/issues/21478 gets solved
Jason Lian's avatar
more  
Jason Lian committed
30
@torch.jit.ignore
31
32
33
34
35
36
37
38
39
40
41
def _stft(
    waveform,
    n_fft,
    hop_length,
    win_length,
    window,
    center,
    pad_mode,
    normalized,
    onesided,
):
42
    # type: (Tensor, int, Optional[int], Optional[int], Optional[Tensor], bool, str, bool, bool) -> Tensor
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
    return torch.stft(
        waveform,
        n_fft,
        hop_length,
        win_length,
        window,
        center,
        pad_mode,
        normalized,
        onesided,
    )


def istft(
    stft_matrix,  # type: Tensor
    n_fft,  # type: int
    hop_length=None,  # type: Optional[int]
    win_length=None,  # type: Optional[int]
    window=None,  # type: Optional[Tensor]
    center=True,  # type: bool
    pad_mode="reflect",  # type: str
    normalized=False,  # type: bool
    onesided=True,  # type: bool
    length=None,  # type: Optional[int]
):
jamarshon's avatar
jamarshon committed
68
    # type: (...) -> Tensor
69
    r"""Inverse short time Fourier Transform. This is expected to be the inverse of torch.stft.
jamarshon's avatar
jamarshon committed
70
    It has the same parameters (+ additional optional parameter of ``length``) and it should return the
jamarshon's avatar
jamarshon committed
71
72
    least squares estimation of the original signal. The algorithm will check using the NOLA condition (
    nonzero overlap).
jamarshon's avatar
jamarshon committed
73
74

    Important consideration in the parameters ``window`` and ``center`` so that the envelop
jamarshon's avatar
jamarshon committed
75
    created by the summation of all the windows is never zero at certain point in time. Specifically,
jamarshon's avatar
jamarshon committed
76
77
    :math:`\sum_{t=-\infty}^{\infty} w^2[n-t\times hop\_length] \cancel{=} 0`.

jamarshon's avatar
jamarshon committed
78
    Since stft discards elements at the end of the signal if they do not fit in a frame, the
79
    istft may return a shorter signal than the original signal (can occur if ``center`` is False
jamarshon's avatar
jamarshon committed
80
    since the signal isn't padded).
jamarshon's avatar
jamarshon committed
81
82

    If ``center`` is True, then there will be padding e.g. 'constant', 'reflect', etc. Left padding
jamarshon's avatar
jamarshon committed
83
84
    can be trimmed off exactly because they can be calculated but right padding cannot be calculated
    without additional information.
jamarshon's avatar
jamarshon committed
85

jamarshon's avatar
jamarshon committed
86
87
    Example: Suppose the last window is:
    [17, 18, 0, 0, 0] vs [18, 0, 0, 0, 0]
jamarshon's avatar
jamarshon committed
88

Vincent QB's avatar
Vincent QB committed
89
    The n_frame, hop_length, win_length are all the same which prevents the calculation of right padding.
jamarshon's avatar
jamarshon committed
90
91
92
93
    These additional values could be zeros or a reflection of the signal so providing ``length``
    could be useful. If ``length`` is ``None`` then padding will be aggressively removed
    (some loss of signal).

94
95
    [1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform,"
    IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984.
jamarshon's avatar
jamarshon committed
96
97

    Args:
98
        stft_matrix (torch.Tensor): Output of stft where each row of a channel is a frequency and each
99
            column is a window. it has a size of either (..., fft_size, n_frame, 2)
jamarshon's avatar
jamarshon committed
100
101
102
103
104
105
106
        n_fft (int): Size of Fourier transform
        hop_length (Optional[int]): The distance between neighboring sliding window frames.
            (Default: ``win_length // 4``)
        win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``)
        window (Optional[torch.Tensor]): The optional window function.
            (Default: ``torch.ones(win_length)``)
        center (bool): Whether ``input`` was padded on both sides so
107
108
109
110
111
112
            that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
            (Default: ``True``)
        pad_mode (str): Controls the padding method used when ``center`` is True. (Default:
            ``'reflect'``)
        normalized (bool): Whether the STFT was normalized. (Default: ``False``)
        onesided (bool): Whether the STFT is onesided. (Default: ``True``)
jamarshon's avatar
jamarshon committed
113
        length (Optional[int]): The amount to trim the signal by (i.e. the
jamarshon's avatar
jamarshon committed
114
            original signal length). (Default: whole signal)
jamarshon's avatar
jamarshon committed
115
116

    Returns:
Vincent QB's avatar
Vincent QB committed
117
        torch.Tensor: Least squares estimation of the original signal of size (..., signal_length)
jamarshon's avatar
jamarshon committed
118
119
    """
    stft_matrix_dim = stft_matrix.dim()
Vincent QB's avatar
Vincent QB committed
120
121
    assert 3 <= stft_matrix_dim, "Incorrect stft dimension: %d" % (stft_matrix_dim)
    assert stft_matrix.nelement() > 0
jamarshon's avatar
jamarshon committed
122
123

    if stft_matrix_dim == 3:
124
        # add a channel dimension
jamarshon's avatar
jamarshon committed
125
126
        stft_matrix = stft_matrix.unsqueeze(0)

Vincent QB's avatar
Vincent QB committed
127
128
129
130
    # pack batch
    shape = stft_matrix.size()
    stft_matrix = stft_matrix.reshape(-1, *shape[-3:])

131
    dtype = stft_matrix.dtype
jamarshon's avatar
jamarshon committed
132
133
    device = stft_matrix.device
    fft_size = stft_matrix.size(1)
134
135
136
137
138
139
140
    assert (onesided and n_fft // 2 + 1 == fft_size) or (
        not onesided and n_fft == fft_size
    ), (
        "one_sided implies that n_fft // 2 + 1 == fft_size and not one_sided implies n_fft == fft_size. "
        + "Given values were onesided: %s, n_fft: %d, fft_size: %d"
        % ("True" if onesided else False, n_fft, fft_size)
    )
jamarshon's avatar
jamarshon committed
141
142
143
144
145
146
147
148
149
150
151
152
153

    # use stft defaults for Optionals
    if win_length is None:
        win_length = n_fft

    if hop_length is None:
        hop_length = int(win_length // 4)

    # There must be overlap
    assert 0 < hop_length <= win_length
    assert 0 < win_length <= n_fft

    if window is None:
154
        window = torch.ones(win_length, requires_grad=False, device=device, dtype=dtype)
jamarshon's avatar
jamarshon committed
155
156
157
158
159
160
161
162
163
164

    assert window.dim() == 1 and window.size(0) == win_length

    if win_length != n_fft:
        # center window with pad left and right zeros
        left = (n_fft - win_length) // 2
        window = torch.nn.functional.pad(window, (left, n_fft - win_length - left))
        assert window.size(0) == n_fft
    # win_length and n_fft are synonymous from here on

Vincent QB's avatar
Vincent QB committed
165
    stft_matrix = stft_matrix.transpose(1, 2)  # size (channel, n_frame, fft_size, 2)
166
167
    stft_matrix = torch.irfft(
        stft_matrix, 1, normalized, onesided, signal_sizes=(n_fft,)
Vincent QB's avatar
Vincent QB committed
168
    )  # size (channel, n_frame, n_fft)
jamarshon's avatar
jamarshon committed
169
170

    assert stft_matrix.size(2) == n_fft
Vincent QB's avatar
Vincent QB committed
171
    n_frame = stft_matrix.size(1)
jamarshon's avatar
jamarshon committed
172

Vincent QB's avatar
Vincent QB committed
173
    ytmp = stft_matrix * window.view(1, 1, n_fft)  # size (channel, n_frame, n_fft)
174
    # each column of a channel is a frame which needs to be overlap added at the right place
Vincent QB's avatar
Vincent QB committed
175
    ytmp = ytmp.transpose(1, 2)  # size (channel, n_fft, n_frame)
jamarshon's avatar
jamarshon committed
176

177
178
179
    eye = torch.eye(n_fft, requires_grad=False, device=device, dtype=dtype).unsqueeze(
        1
    )  # size (n_fft, 1, n_fft)
jamarshon's avatar
jamarshon committed
180
181
182
183

    # this does overlap add where the frames of ytmp are added such that the i'th frame of
    # ytmp is added starting at i*hop_length in the output
    y = torch.nn.functional.conv_transpose1d(
184
185
        ytmp, eye, stride=hop_length, padding=0
    )  # size (channel, 1, expected_signal_len)
jamarshon's avatar
jamarshon committed
186
187

    # do the same for the window function
188
    window_sq = (
Vincent QB's avatar
Vincent QB committed
189
190
        window.pow(2).view(n_fft, 1).repeat((1, n_frame)).unsqueeze(0)
    )  # size (1, n_fft, n_frame)
jamarshon's avatar
jamarshon committed
191
    window_envelop = torch.nn.functional.conv_transpose1d(
192
193
        window_sq, eye, stride=hop_length, padding=0
    )  # size (1, 1, expected_signal_len)
jamarshon's avatar
jamarshon committed
194

Vincent QB's avatar
Vincent QB committed
195
    expected_signal_len = n_fft + hop_length * (n_frame - 1)
jamarshon's avatar
jamarshon committed
196
197
198
199
200
201
202
203
204
205
206
207
208
    assert y.size(2) == expected_signal_len
    assert window_envelop.size(2) == expected_signal_len

    half_n_fft = n_fft // 2
    # we need to trim the front padding away if center
    start = half_n_fft if center else 0
    end = -half_n_fft if length is None else start + length

    y = y[:, :, start:end]
    window_envelop = window_envelop[:, :, start:end]

    # check NOLA non-zero overlap condition
    window_envelop_lowest = window_envelop.abs().min()
209
210
211
    assert window_envelop_lowest > 1e-11, "window overlap add min: %f" % (
        window_envelop_lowest
    )
jamarshon's avatar
jamarshon committed
212

213
    y = (y / window_envelop).squeeze(1)  # size (channel, expected_signal_len)
jamarshon's avatar
jamarshon committed
214

Vincent QB's avatar
Vincent QB committed
215
216
217
    # unpack batch
    y = y.reshape(shape[:-3] + y.shape[-1:])

218
    if stft_matrix_dim == 3:  # remove the channel dimension
jamarshon's avatar
jamarshon committed
219
        y = y.squeeze(0)
Vincent QB's avatar
Vincent QB committed
220

jamarshon's avatar
jamarshon committed
221
222
223
    return y


224
225
226
def spectrogram(
    waveform, pad, window, n_fft, hop_length, win_length, power, normalized
):
227
    # type: (Tensor, int, Tensor, int, int, int, Optional[int], bool) -> Tensor
228
229
230
    r"""
    spectrogram(waveform, pad, window, n_fft, hop_length, win_length, power, normalized)

231
232
    Create a spectrogram or a batch of spectrograms from a raw audio signal.
    The spectrogram can be either magnitude-only or complex.
jamarshon's avatar
jamarshon committed
233
234

    Args:
235
        waveform (torch.Tensor): Tensor of audio of dimension (..., channel, time)
jamarshon's avatar
jamarshon committed
236
        pad (int): Two sided padding of signal
237
        window (torch.Tensor): Window tensor that is applied/multiplied to each frame/window
238
        n_fft (int): Size of FFT
239
240
241
        hop_length (int): Length of hop between STFT windows
        win_length (int): Window size
        power (int): Exponent for the magnitude spectrogram,
jamarshon's avatar
jamarshon committed
242
            (must be > 0) e.g., 1 for energy, 2 for power, etc.
243
            If None, then the complex spectrum is returned instead.
244
        normalized (bool): Whether to normalize by magnitude after stft
jamarshon's avatar
jamarshon committed
245
246

    Returns:
247
        torch.Tensor: Dimension (..., channel, freq, time), where channel
Vincent QB's avatar
Vincent QB committed
248
249
        is unchanged, freq is ``n_fft // 2 + 1`` and ``n_fft`` is the number of
        Fourier bins, and time is the number of window hops (n_frame).
Jason Lian's avatar
Jason Lian committed
250
    """
Jason Lian's avatar
Jason Lian committed
251
252

    if pad > 0:
253
        # TODO add "with torch.no_grad():" back when JIT supports it
254
        waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
Jason Lian's avatar
Jason Lian committed
255

256
257
258
259
    # pack batch
    shape = waveform.size()
    waveform = waveform.reshape(-1, shape[-1])

Jason Lian's avatar
Jason Lian committed
260
    # default values are consistent with librosa.core.spectrum._spectrogram
261
262
263
    spec_f = _stft(
        waveform, n_fft, hop_length, win_length, window, True, "reflect", False, True
    )
264

265
266
267
    # unpack batch
    spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-3:])

268
    if normalized:
Jason Lian's avatar
Jason Lian committed
269
        spec_f /= window.pow(2).sum().sqrt()
270
271
272
    if power is not None:
        spec_f = spec_f.pow(power).sum(-1)  # get power of "complex" tensor

Jason Lian's avatar
Jason Lian committed
273
    return spec_f
Jason Lian's avatar
more  
Jason Lian committed
274
275


276
def amplitude_to_DB(x, multiplier, amin, db_multiplier, top_db=None):
277
    # type: (Tensor, float, float, float, Optional[float]) -> Tensor
278
279
280
281
    r"""
    amplitude_to_DB(x, multiplier, amin, db_multiplier, top_db=None)

    Turns a tensor from the power/amplitude scale to the decibel scale.
282

283
    This output depends on the maximum value in the input tensor, and so
284
285
286
287
    may return different values for an audio clip split into snippets vs. a
    a full clip.

    Args:
288
        x (torch.Tensor): Input tensor before being converted to decibel scale
289
        multiplier (float): Use 10. for power and 20. for amplitude
290
        amin (float): Number to clamp ``x``
291
292
        db_multiplier (float): Log10(max(reference value and amin))
        top_db (Optional[float]): Minimum negative cut-off in decibels. A reasonable number
293
            is 80. (Default: ``None``)
294
295

    Returns:
296
        torch.Tensor: Output tensor in decibel scale
297
    """
298
299
    x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
    x_db -= multiplier * db_multiplier
300
301

    if top_db is not None:
302
303
304
        new_x_db_max = torch.tensor(
            float(x_db.max()) - top_db, dtype=x_db.dtype, device=x_db.device
        )
305
        x_db = torch.max(x_db, new_x_db_max)
306

307
    return x_db
308
309


engineerchuan's avatar
engineerchuan committed
310
311
def create_fb_matrix(n_freqs, f_min, f_max, n_mels, sample_rate):
    # type: (int, float, float, int, int) -> Tensor
312
    r"""
engineerchuan's avatar
engineerchuan committed
313
    create_fb_matrix(n_freqs, f_min, f_max, n_mels, sample_rate)
314
315

    Create a frequency bin conversion matrix.
Jason Lian's avatar
more  
Jason Lian committed
316

jamarshon's avatar
jamarshon committed
317
    Args:
318
        n_freqs (int): Number of frequencies to highlight/apply
engineerchuan's avatar
engineerchuan committed
319
320
        f_min (float): Minimum frequency (Hz)
        f_max (float): Maximum frequency (Hz)
321
        n_mels (int): Number of mel filterbanks
engineerchuan's avatar
engineerchuan committed
322
        sample_rate (int): Sample rate of the audio waveform
Jason Lian's avatar
more  
Jason Lian committed
323

jamarshon's avatar
jamarshon committed
324
    Returns:
325
        torch.Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
326
327
        meaning number of frequencies to highlight/apply to x the number of filterbanks.
        Each column is a filterbank so that assuming there is a matrix A of
328
329
        size (..., ``n_freqs``), the applied result would be
        ``A * create_fb_matrix(A.size(-1), ...)``.
330
    """
331
    # freq bins
engineerchuan's avatar
engineerchuan committed
332
333
334
335
336
    # Equivalent filterbank construction by Librosa
    all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
    i_freqs = all_freqs.ge(f_min) & all_freqs.le(f_max)
    freqs = all_freqs[i_freqs]

Jason Lian's avatar
more  
Jason Lian committed
337
    # calculate mel freq bins
338
    # hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
engineerchuan's avatar
engineerchuan committed
339
    m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
340
    m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
Jason Lian's avatar
more  
Jason Lian committed
341
    m_pts = torch.linspace(m_min, m_max, n_mels + 2)
342
    # mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
343
    f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
Jason Lian's avatar
more  
Jason Lian committed
344
345
    # calculate the difference between each mel point and each stft freq point in hertz
    f_diff = f_pts[1:] - f_pts[:-1]  # (n_mels + 1)
engineerchuan's avatar
engineerchuan committed
346
    slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1)  # (n_freqs, n_mels + 2)
Jason Lian's avatar
more  
Jason Lian committed
347
    # create overlapping triangles
348
    zero = torch.zeros(1)
349
    down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1]  # (n_freqs, n_mels)
350
351
    up_slopes = slopes[:, 2:] / f_diff[1:]  # (n_freqs, n_mels)
    fb = torch.max(zero, torch.min(down_slopes, up_slopes))
Jason Lian's avatar
more  
Jason Lian committed
352
353
354
    return fb


Jason Lian's avatar
more  
Jason Lian committed
355
def create_dct(n_mfcc, n_mels, norm):
356
    # type: (int, int, Optional[str]) -> Tensor
357
358
359
360
    r"""
    create_dct(n_mfcc, n_mels, norm)

    Creates a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
jamarshon's avatar
jamarshon committed
361
    normalized depending on norm.
Jason Lian's avatar
Jason Lian committed
362

jamarshon's avatar
jamarshon committed
363
    Args:
364
365
366
        n_mfcc (int): Number of mfc coefficients to retain
        n_mels (int): Number of mel filterbanks
        norm (Optional[str]): Norm to use (either 'ortho' or None)
Jason Lian's avatar
Jason Lian committed
367

jamarshon's avatar
jamarshon committed
368
    Returns:
369
        torch.Tensor: The transformation matrix, to be right-multiplied to
370
        row-wise data of size (``n_mels``, ``n_mfcc``).
Jason Lian's avatar
more  
Jason Lian committed
371
372
    """
    # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
373
374
375
    n = torch.arange(float(n_mels))
    k = torch.arange(float(n_mfcc)).unsqueeze(1)
    dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k)  # size (n_mfcc, n_mels)
376
377
    if norm is None:
        dct *= 2.0
Jason Lian's avatar
more  
Jason Lian committed
378
    else:
379
        assert norm == "ortho"
380
        dct[0] *= 1.0 / math.sqrt(2.0)
381
        dct *= math.sqrt(2.0 / float(n_mels))
382
    return dct.t()
Jason Lian's avatar
more  
Jason Lian committed
383
384


385
def mu_law_encoding(x, quantization_channels):
386
    # type: (Tensor, int) -> Tensor
387
388
389
390
    r"""
    mu_law_encoding(x, quantization_channels)

    Encode signal based on mu-law companding.  For more info see the
Jason Lian's avatar
Jason Lian committed
391
392
393
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This algorithm assumes the signal has been scaled to between -1 and 1 and
jamarshon's avatar
jamarshon committed
394
    returns a signal encoded with values from 0 to quantization_channels - 1.
Jason Lian's avatar
Jason Lian committed
395

jamarshon's avatar
jamarshon committed
396
397
    Args:
        x (torch.Tensor): Input tensor
398
        quantization_channels (int): Number of channels
Jason Lian's avatar
Jason Lian committed
399

jamarshon's avatar
jamarshon committed
400
    Returns:
401
        torch.Tensor: Input after mu-law encoding
Jason Lian's avatar
Jason Lian committed
402
    """
403
    mu = quantization_channels - 1.0
404
    if not x.is_floating_point():
405
406
        x = x.to(torch.float)
    mu = torch.tensor(mu, dtype=x.dtype)
407
    x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
Jason Lian's avatar
Jason Lian committed
408
    x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
Jason Lian's avatar
more  
Jason Lian committed
409
410
411
    return x_mu


412
def mu_law_decoding(x_mu, quantization_channels):
413
    # type: (Tensor, int) -> Tensor
414
415
416
417
    r"""
    mu_law_decoding(x_mu, quantization_channels)

    Decode mu-law encoded signal.  For more info see the
Jason Lian's avatar
Jason Lian committed
418
419
420
421
422
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This expects an input with values between 0 and quantization_channels - 1
    and returns a signal scaled between -1 and 1.

jamarshon's avatar
jamarshon committed
423
424
    Args:
        x_mu (torch.Tensor): Input tensor
425
        quantization_channels (int): Number of channels
Jason Lian's avatar
Jason Lian committed
426

jamarshon's avatar
jamarshon committed
427
    Returns:
428
        torch.Tensor: Input after mu-law decoding
Jason Lian's avatar
Jason Lian committed
429
    """
430
    mu = quantization_channels - 1.0
431
    if not x_mu.is_floating_point():
432
433
        x_mu = x_mu.to(torch.float)
    mu = torch.tensor(mu, dtype=x_mu.dtype)
434
435
    x = ((x_mu) / mu) * 2 - 1.0
    x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
Jason Lian's avatar
more  
Jason Lian committed
436
    return x
437
438
439


def complex_norm(complex_tensor, power=1.0):
440
    # type: (Tensor, float) -> Tensor
441
    r"""Compute the norm of complex tensor input.
442
443

    Args:
444
        complex_tensor (torch.Tensor): Tensor shape of `(..., complex=2)`
445
        power (float): Power of the norm. (Default: `1.0`).
446
447

    Returns:
448
        torch.Tensor: Power of the normed input tensor. Shape of `(..., )`
449
450
451
452
453
454
455
    """
    if power == 1.0:
        return torch.norm(complex_tensor, 2, -1)
    return torch.norm(complex_tensor, 2, -1).pow(power)


def angle(complex_tensor):
456
    # type: (Tensor) -> Tensor
457
458
459
    r"""Compute the angle of complex tensor input.

    Args:
460
        complex_tensor (torch.Tensor): Tensor shape of `(..., complex=2)`
461
462

    Return:
463
        torch.Tensor: Angle of a complex tensor. Shape of `(..., )`
464
465
466
467
    """
    return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])


468
def magphase(complex_tensor, power=1.0):
469
    # type: (Tensor, float) -> Tuple[Tensor, Tensor]
470
    r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.
471
472

    Args:
473
        complex_tensor (torch.Tensor): Tensor shape of `(..., complex=2)`
474
475
476
        power (float): Power of the norm. (Default: `1.0`)

    Returns:
477
        Tuple[torch.Tensor, torch.Tensor]: The magnitude and phase of the complex tensor
478
479
480
481
482
483
484
    """
    mag = complex_norm(complex_tensor, power)
    phase = angle(complex_tensor)
    return mag, phase


def phase_vocoder(complex_specgrams, rate, phase_advance):
485
    # type: (Tensor, float, Tensor) -> Tensor
486
    r"""Given a STFT tensor, speed up in time without modifying pitch by a
487
    factor of ``rate``.
488
    Args:
489
        complex_specgrams (torch.Tensor): Dimension of `(channel, freq, time, complex=2)`
490
        rate (float): Speed-up factor
491
492
        phase_advance (torch.Tensor): Expected phase advance in each bin. Dimension
            of (freq, 1)
493
    Returns:
494
        complex_specgrams_stretch (torch.Tensor): Dimension of `(channel,
495
496
        freq, ceil(time/rate), complex=2)`
    Example
497
498
499
500
        >>> freq, hop_length = 1025, 512
        >>> # (channel, freq, time, complex=2)
        >>> complex_specgrams = torch.randn(2, freq, 300, 2)
        >>> rate = 1.3 # Speed up by 30%
501
        >>> phase_advance = torch.linspace(
502
        >>>    0, math.pi * hop_length, freq)[..., None]
503
504
        >>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
        >>> x.shape # with 231 == ceil(300 / 1.3)
505
        torch.Size([2, 1025, 231, 2])
506
    """
507
508
509
510
511
512

    time_steps = torch.arange(0,
                              complex_specgrams.size(-2),
                              rate,
                              device=complex_specgrams.device,
                              dtype=complex_specgrams.dtype)
513

514
    alphas = time_steps % 1.0
Vincent QB's avatar
Vincent QB committed
515
    phase_0 = angle(complex_specgrams[..., :1, :])
516
517
518
519

    # Time Padding
    complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])

520
    # (new_bins, freq, 2)
Vincent QB's avatar
Vincent QB committed
521
522
    complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())
    complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())
523
524
525
526

    angle_0 = angle(complex_specgrams_0)
    angle_1 = angle(complex_specgrams_1)

527
528
    norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)
    norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)
529
530
531
532
533
534

    phase = angle_1 - angle_0 - phase_advance
    phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))

    # Compute Phase Accum
    phase = phase + phase_advance
Vincent QB's avatar
Vincent QB committed
535
    phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
536
537
538
539
540
541
542
543
544
545
    phase_acc = torch.cumsum(phase, -1)

    mag = alphas * norm_1 + (1 - alphas) * norm_0

    real_stretch = mag * torch.cos(phase_acc)
    imag_stretch = mag * torch.sin(phase_acc)

    complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)

    return complex_specgrams_stretch
546
547
548
549
550
551
552
553


def lfilter(waveform, a_coeffs, b_coeffs):
    # type: (Tensor, Tensor, Tensor) -> Tensor
    r"""
    Performs an IIR filter by evaluating difference equation.

    Args:
Vincent QB's avatar
Vincent QB committed
554
        waveform (torch.Tensor): audio waveform of dimension of `(..., time)`.  Must be normalized to -1 to 1.
555
556
557
558
559
560
561
562
        a_coeffs (torch.Tensor): denominator coefficients of difference equation of dimension of `(n_order + 1)`.
                                Lower delays coefficients are first, e.g. `[a0, a1, a2, ...]`.
                                Must be same size as b_coeffs (pad with 0's as necessary).
        b_coeffs (torch.Tensor): numerator coefficients of difference equation of dimension of `(n_order + 1)`.
                                 Lower delays coefficients are first, e.g. `[b0, b1, b2, ...]`.
                                 Must be same size as a_coeffs (pad with 0's as necessary).

    Returns:
Vincent QB's avatar
Vincent QB committed
563
        output_waveform (torch.Tensor): Dimension of `(..., time)`.  Output will be clipped to -1 to 1.
564
565
566

    """

Vincent QB's avatar
Vincent QB committed
567
568
569
570
571
572
    dim = waveform.dim()

    # pack batch
    shape = waveform.size()
    waveform = waveform.reshape(-1, shape[-1])

573
574
    assert(a_coeffs.size(0) == b_coeffs.size(0))
    assert(len(waveform.size()) == 2)
575
576
    assert(waveform.device == a_coeffs.device)
    assert(b_coeffs.device == a_coeffs.device)
577

578
579
    device = waveform.device
    dtype = waveform.dtype
Vincent QB's avatar
Vincent QB committed
580
    n_channel, n_sample = waveform.size()
581
582
583
584
    n_order = a_coeffs.size(0)
    assert(n_order > 0)

    # Pad the input and create output
Vincent QB's avatar
Vincent QB committed
585
    padded_waveform = torch.zeros(n_channel, n_sample + n_order - 1, dtype=dtype, device=device)
586
    padded_waveform[:, (n_order - 1):] = waveform
Vincent QB's avatar
Vincent QB committed
587
    padded_output_waveform = torch.zeros(n_channel, n_sample + n_order - 1, dtype=dtype, device=device)
588
589
590

    # Set up the coefficients matrix
    # Flip order, repeat, and transpose
Vincent QB's avatar
Vincent QB committed
591
592
    a_coeffs_filled = a_coeffs.flip(0).repeat(n_channel, 1).t()
    b_coeffs_filled = b_coeffs.flip(0).repeat(n_channel, 1).t()
593
594

    # Set up a few other utilities
Vincent QB's avatar
Vincent QB committed
595
596
    a0_repeated = torch.ones(n_channel, dtype=dtype, device=device) * a_coeffs[0]
    ones = torch.ones(n_channel, n_sample, dtype=dtype, device=device)
597

Vincent QB's avatar
Vincent QB committed
598
    for i_sample in range(n_sample):
599

Vincent QB's avatar
Vincent QB committed
600
        o0 = torch.zeros(n_channel, dtype=dtype, device=device)
601

Vincent QB's avatar
Vincent QB committed
602
603
        windowed_input_signal = padded_waveform[:, i_sample:(i_sample + n_order)]
        windowed_output_signal = padded_output_waveform[:, i_sample:(i_sample + n_order)]
604
605
606
607
608
609

        o0.add_(torch.diag(torch.mm(windowed_input_signal, b_coeffs_filled)))
        o0.sub_(torch.diag(torch.mm(windowed_output_signal, a_coeffs_filled)))

        o0.div_(a0_repeated)

Vincent QB's avatar
Vincent QB committed
610
        padded_output_waveform[:, i_sample + n_order - 1] = o0
611

Vincent QB's avatar
Vincent QB committed
612
613
614
615
616
617
618
619
    output = torch.min(
        ones, torch.max(ones * -1, padded_output_waveform[:, (n_order - 1):])
    )

    # unpack batch
    output = output.reshape(shape[:-1] + output.shape[-1:])

    return output
620
621
622
623
624
625
626
627


def biquad(waveform, b0, b1, b2, a0, a1, a2):
    # type: (Tensor, float, float, float, float, float, float) -> Tensor
    r"""Performs a biquad filter of input tensor.  Initial conditions set to 0.
    https://en.wikipedia.org/wiki/Digital_biquad_filter

    Args:
Vincent QB's avatar
Vincent QB committed
628
        waveform (torch.Tensor): audio waveform of dimension of `(channel, time)`
629
630
631
632
633
634
635
636
        b0 (float): numerator coefficient of current input, x[n]
        b1 (float): numerator coefficient of input one time step ago x[n-1]
        b2 (float): numerator coefficient of input two time steps ago x[n-2]
        a0 (float): denominator coefficient of current output y[n], typically 1
        a1 (float): denominator coefficient of current output y[n-1]
        a2 (float): denominator coefficient of current output y[n-2]

    Returns:
Vincent QB's avatar
Vincent QB committed
637
        output_waveform (torch.Tensor): Dimension of `(channel, time)`
638
639
    """

640
641
    device = waveform.device
    dtype = waveform.dtype
642
643

    output_waveform = lfilter(
644
645
646
        waveform,
        torch.tensor([a0, a1, a2], dtype=dtype, device=device),
        torch.tensor([b0, b1, b2], dtype=dtype, device=device)
647
648
649
650
651
    )
    return output_waveform


def _dB2Linear(x):
652
    # type: (float) -> float
653
654
655
656
    return math.exp(x * math.log(10) / 20.0)


def highpass_biquad(waveform, sample_rate, cutoff_freq, Q=0.707):
657
    # type: (Tensor, int, float, float) -> Tensor
658
659
660
    r"""Designs biquad highpass filter and performs filtering.  Similar to SoX implementation.

    Args:
Vincent QB's avatar
Vincent QB committed
661
        waveform (torch.Tensor): audio waveform of dimension of `(channel, time)`
662
663
664
665
666
        sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
        cutoff_freq (float): filter cutoff frequency
        Q (float): https://en.wikipedia.org/wiki/Q_factor

    Returns:
Vincent QB's avatar
Vincent QB committed
667
        output_waveform (torch.Tensor): Dimension of `(channel, time)`
668
669
    """

670
    GAIN = 1.
671
672
    w0 = 2 * math.pi * cutoff_freq / sample_rate
    A = math.exp(GAIN / 40.0 * math.log(10))
673
    alpha = math.sin(w0) / 2. / Q
674
675
676
677
678
679
680
681
682
683
684
685
    mult = _dB2Linear(max(GAIN, 0))

    b0 = (1 + math.cos(w0)) / 2
    b1 = -1 - math.cos(w0)
    b2 = b0
    a0 = 1 + alpha
    a1 = -2 * math.cos(w0)
    a2 = 1 - alpha
    return biquad(waveform, b0, b1, b2, a0, a1, a2)


def lowpass_biquad(waveform, sample_rate, cutoff_freq, Q=0.707):
686
    # type: (Tensor, int, float, float) -> Tensor
687
688
689
    r"""Designs biquad lowpass filter and performs filtering.  Similar to SoX implementation.

    Args:
Vincent QB's avatar
Vincent QB committed
690
        waveform (torch.Tensor): audio waveform of dimension of `(channel, time)`
691
692
693
694
695
        sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
        cutoff_freq (float): filter cutoff frequency
        Q (float): https://en.wikipedia.org/wiki/Q_factor

    Returns:
Vincent QB's avatar
Vincent QB committed
696
        output_waveform (torch.Tensor): Dimension of `(channel, time)`
697
698
    """

699
    GAIN = 1.
700
701
702
703
704
705
706
707
708
709
710
711
    w0 = 2 * math.pi * cutoff_freq / sample_rate
    A = math.exp(GAIN / 40.0 * math.log(10))
    alpha = math.sin(w0) / 2 / Q
    mult = _dB2Linear(max(GAIN, 0))

    b0 = (1 - math.cos(w0)) / 2
    b1 = 1 - math.cos(w0)
    b2 = b0
    a0 = 1 + alpha
    a1 = -2 * math.cos(w0)
    a2 = 1 - alpha
    return biquad(waveform, b0, b1, b2, a0, a1, a2)
Vincent QB's avatar
Vincent QB committed
712
713


xinyang0's avatar
xinyang0 committed
714
715
716
717
718
719
720
def equalizer_biquad(waveform, sample_rate, center_freq, gain, Q=0.707):
    # type: (Tensor, int, float, float, float) -> Tensor
    r"""Designs biquad peaking equalizer filter and performs filtering.  Similar to SoX implementation.

    Args:
        waveform (torch.Tensor): audio waveform of dimension of `(channel, time)`
        sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
721
        center_freq (float): filter's central frequency
xinyang0's avatar
xinyang0 committed
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
        gain (float): desired gain at the boost (or attenuation) in dB
        q_factor (float): https://en.wikipedia.org/wiki/Q_factor

    Returns:
        output_waveform (torch.Tensor): Dimension of `(channel, time)`
    """
    w0 = 2 * math.pi * center_freq / sample_rate
    A = math.exp(gain / 40.0 * math.log(10))
    alpha = math.sin(w0) / 2 / Q

    b0 = 1 + alpha * A
    b1 = -2 * math.cos(w0)
    b2 = 1 - alpha * A
    a0 = 1 + alpha / A
    a1 = -2 * math.cos(w0)
    a2 = 1 - alpha / A
    return biquad(waveform, b0, b1, b2, a0, a1, a2)


741
742
743
744
745
746
747
748
def mask_along_axis_iid(specgrams, mask_param, mask_value, axis):
    # type: (Tensor, int, float, int) -> Tensor
    r"""
    Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
    ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
    All examples will have the same mask interval.

    Args:
Vincent QB's avatar
Vincent QB committed
749
        specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
750
751
752
753
754
        mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
        mask_value (float): Value to assign to the masked columns
        axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)

    Returns:
Vincent QB's avatar
Vincent QB committed
755
        torch.Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
    """

    if axis != 2 and axis != 3:
        raise ValueError('Only Frequency and Time masking are supported')

    value = torch.rand(specgrams.shape[:2]) * mask_param
    min_value = torch.rand(specgrams.shape[:2]) * (specgrams.size(axis) - value)

    # Create broadcastable mask
    mask_start = (min_value.long())[..., None, None].float()
    mask_end = (min_value.long() + value.long())[..., None, None].float()
    mask = torch.arange(0, specgrams.size(axis)).float()

    # Per batch example masking
    specgrams = specgrams.transpose(axis, -1)
    specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
    specgrams = specgrams.transpose(axis, -1)

    return specgrams


def mask_along_axis(specgram, mask_param, mask_value, axis):
    # type: (Tensor, int, float, int) -> Tensor
    r"""
    Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
    ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
    All examples will have the same mask interval.

    Args:
Vincent QB's avatar
Vincent QB committed
785
        specgram (Tensor): Real spectrogram (channel, freq, time)
786
787
788
789
790
        mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
        mask_value (float): Value to assign to the masked columns
        axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)

    Returns:
Vincent QB's avatar
Vincent QB committed
791
        torch.Tensor: Masked spectrogram of dimensions (channel, freq, time)
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
    """

    value = torch.rand(1) * mask_param
    min_value = torch.rand(1) * (specgram.size(axis) - value)

    mask_start = (min_value.long()).squeeze()
    mask_end = (min_value.long() + value.long()).squeeze()

    assert mask_end - mask_start < mask_param
    if axis == 1:
        specgram[:, mask_start:mask_end] = mask_value
    elif axis == 2:
        specgram[:, :, mask_start:mask_end] = mask_value
    else:
        raise ValueError('Only Frequency and Time masking are supported')

    return specgram


Vincent QB's avatar
Vincent QB committed
811
812
813
814
815
816
817
818
819
820
821
822
def compute_deltas(specgram, win_length=5, mode="replicate"):
    # type: (Tensor, int, str) -> Tensor
    r"""Compute delta coefficients of a tensor, usually a spectrogram:

    .. math::
        d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N} n^2}

    where :math:`d_t` is the deltas at time :math:`t`,
    :math:`c_t` is the spectrogram coeffcients at time :math:`t`,
    :math:`N` is (`win_length`-1)//2.

    Args:
Vincent QB's avatar
Vincent QB committed
823
        specgram (torch.Tensor): Tensor of audio of dimension (..., freq, time)
Vincent QB's avatar
Vincent QB committed
824
825
826
827
        win_length (int): The window length used for computing delta
        mode (str): Mode parameter passed to padding

    Returns:
Vincent QB's avatar
Vincent QB committed
828
        deltas (torch.Tensor): Tensor of audio of dimension (..., freq, time)
Vincent QB's avatar
Vincent QB committed
829
830
831
832
833
834
835

    Example
        >>> specgram = torch.randn(1, 40, 1000)
        >>> delta = compute_deltas(specgram)
        >>> delta2 = compute_deltas(delta)
    """

Vincent QB's avatar
Vincent QB committed
836
837
838
839
    # pack batch
    shape = specgram.size()
    specgram = specgram.reshape(1, -1, shape[-1])

Vincent QB's avatar
Vincent QB committed
840
841
842
843
844
845
846
847
848
849
850
851
    assert win_length >= 3

    n = (win_length - 1) // 2

    # twice sum of integer squared
    denom = n * (n + 1) * (2 * n + 1) / 3

    specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)

    kernel = (
        torch
        .arange(-n, n + 1, 1, device=specgram.device, dtype=specgram.dtype)
Vincent QB's avatar
Vincent QB committed
852
        .repeat(specgram.shape[1], 1, 1)
Vincent QB's avatar
Vincent QB committed
853
854
    )

Vincent QB's avatar
Vincent QB committed
855
856
857
858
859
860
    output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom

    # unpack batch
    output = output.reshape(shape)

    return output
Vincent QB's avatar
Vincent QB committed
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990


def _compute_nccf(waveform, sample_rate, frame_time, freq_low):
    # type: (Tensor, int, float, int) -> Tensor
    r"""
    Compute Normalized Cross-Correlation Function (NCCF).

    .. math::
        \phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},

    where
    :math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
    :math:`w` is the waveform,
    :math:`N` is the lenght of a frame,
    :math:`b_i` is the beginning of frame :math:`i`,
    :math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
    """

    EPSILON = 10 ** (-9)

    # Number of lags to check
    lags = math.ceil(sample_rate / freq_low)

    frame_size = int(math.ceil(sample_rate * frame_time))

    waveform_length = waveform.size()[-1]
    num_of_frames = math.ceil(waveform_length / frame_size)

    p = lags + num_of_frames * frame_size - waveform_length
    waveform = torch.nn.functional.pad(waveform, (0, p))

    # Compute lags
    output_lag = []
    for lag in range(1, lags + 1):
        s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[
            ..., :num_of_frames, :
        ]
        s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[
            ..., :num_of_frames, :
        ]

        output_frames = (
            (s1 * s2).sum(-1)
            / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
            / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
        )

        output_lag.append(output_frames.unsqueeze(-1))

    nccf = torch.cat(output_lag, -1)

    return nccf


def _combine_max(a, b, thresh=0.99):
    # type: (Tuple[Tensor, Tensor], Tuple[Tensor, Tensor], float) -> Tuple[Tensor, Tensor]
    """
    Take value from first if bigger than a multiplicative factor of the second, elementwise.
    """
    mask = (a[0] > thresh * b[0])
    values = mask * a[0] + ~mask * b[0]
    indices = mask * a[1] + ~mask * b[1]
    return values, indices


def _find_max_per_frame(nccf, sample_rate, freq_high):
    # type: (Tensor, int, int) -> Tensor
    r"""
    For each frame, take the highest value of NCCF,
    apply centered median smoothing, and convert to frequency.

    Note: If the max among all the lags is very close
    to the first half of lags, then the latter is taken.
    """

    lag_min = math.ceil(sample_rate / freq_high)

    # Find near enough max that is smallest

    best = torch.max(nccf[..., lag_min:], -1)

    half_size = nccf.shape[-1] // 2
    half = torch.max(nccf[..., lag_min:half_size], -1)

    best = _combine_max(half, best)
    indices = best[1]

    # Add back minimal lag
    indices += lag_min
    # Add 1 empirical calibration offset
    indices += 1

    return indices


def _median_smoothing(indices, win_length):
    # type: (Tensor, int) -> Tensor
    r"""
    Apply median smoothing to the 1D tensor over the given window.
    """

    # Centered windowed
    pad_length = (win_length - 1) // 2

    # "replicate" padding in any dimension
    indices = torch.nn.functional.pad(
        indices, (pad_length, 0), mode="constant", value=0.
    )

    indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
    roll = indices.unfold(-1, win_length, 1)

    values, _ = torch.median(roll, -1)
    return values


def detect_pitch_frequency(
    waveform,
    sample_rate,
    frame_time=10 ** (-2),
    win_length=30,
    freq_low=85,
    freq_high=3400,
):
    # type: (Tensor, int, float, int, int, int) -> Tensor
    r"""Detect pitch frequency.

    It is implemented using normalized cross-correlation function and median smoothing.

    Args:
Vincent QB's avatar
Vincent QB committed
991
        waveform (torch.Tensor): Tensor of audio of dimension (..., freq, time)
Vincent QB's avatar
Vincent QB committed
992
993
994
995
996
997
        sample_rate (int): The sample rate of the waveform (Hz)
        win_length (int): The window length for median smoothing (in number of frames)
        freq_low (int): Lowest frequency that can be detected (Hz)
        freq_high (int): Highest frequency that can be detected (Hz)

    Returns:
Vincent QB's avatar
Vincent QB committed
998
        freq (torch.Tensor): Tensor of audio of dimension (..., frame)
Vincent QB's avatar
Vincent QB committed
999
1000
    """

Vincent QB's avatar
Vincent QB committed
1001
1002
1003
    dim = waveform.dim()

    # pack batch
1004
    shape = list(waveform.size())
Vincent QB's avatar
Vincent QB committed
1005
1006
    waveform = waveform.reshape([-1] + shape[-1:])

Vincent QB's avatar
Vincent QB committed
1007
1008
1009
1010
1011
1012
1013
1014
    nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
    indices = _find_max_per_frame(nccf, sample_rate, freq_high)
    indices = _median_smoothing(indices, win_length)

    # Convert indices to frequency
    EPSILON = 10 ** (-9)
    freq = sample_rate / (EPSILON + indices.to(torch.float))

Vincent QB's avatar
Vincent QB committed
1015
    # unpack batch
1016
    freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
Vincent QB's avatar
Vincent QB committed
1017

Vincent QB's avatar
Vincent QB committed
1018
    return freq