Commit c28073cc authored by hwangjeff's avatar hwangjeff Committed by Facebook GitHub Bot
Browse files

Add speed and speed perturbation functions and transforms (#2829)

Summary:
Adds functions and transforms for speed and speed perturbation (https://www.isca-speech.org/archive/interspeech_2015/ko15_interspeech.html).

Pull Request resolved: https://github.com/pytorch/audio/pull/2829

Reviewed By: xiaohui-zhang

Differential Revision: D41285114

Pulled By: hwangjeff

fbshipit-source-id: 114740507698e01f35d4beb2c568a2479e847506
parent aca61bc0
...@@ -24,6 +24,11 @@ fftconvolve ...@@ -24,6 +24,11 @@ fftconvolve
.. autofunction:: fftconvolve .. autofunction:: fftconvolve
speed
~~~~~
.. autofunction:: speed
DSP DSP
~~~ ~~~
......
...@@ -14,3 +14,5 @@ torchaudio.prototype.transforms ...@@ -14,3 +14,5 @@ torchaudio.prototype.transforms
BarkScale BarkScale
InverseBarkScale InverseBarkScale
BarkSpectrogram BarkSpectrogram
Speed
SpeedPerturbation
...@@ -464,3 +464,11 @@ abstract = {End-to-end spoken language translation (SLT) has recently gained pop ...@@ -464,3 +464,11 @@ abstract = {End-to-end spoken language translation (SLT) has recently gained pop
year=2021, year=2021,
author={Guoguo Chen and Shuzhou Chai and Guanbo Wang and Jiayu Du and Wei-Qiang Zhang and Chao Weng and Dan Su and Daniel Povey and Jan Trmal and Junbo Zhang and Mingjie Jin and Sanjeev Khudanpur and Shinji Watanabe and Shuaijiang Zhao and Wei Zou and Xiangang Li and Xuchen Yao and Yongqing Wang and Yujun Wang and Zhao You and Zhiyong Yan} author={Guoguo Chen and Shuzhou Chai and Guanbo Wang and Jiayu Du and Wei-Qiang Zhang and Chao Weng and Dan Su and Daniel Povey and Jan Trmal and Junbo Zhang and Mingjie Jin and Sanjeev Khudanpur and Shinji Watanabe and Shuaijiang Zhao and Wei Zou and Xiangang Li and Xuchen Yao and Yongqing Wang and Yujun Wang and Zhao You and Zhiyong Yan}
} }
@inproceedings{ko15_interspeech,
author={Tom Ko and Vijayaditya Peddinti and Daniel Povey and Sanjeev Khudanpur},
title={{Audio augmentation for speech recognition}},
year=2015,
booktitle={Proc. Interspeech 2015},
pages={3586--3589},
doi={10.21437/Interspeech.2015-711}
}
...@@ -67,3 +67,11 @@ class AutogradTestImpl(TestBaseMixin): ...@@ -67,3 +67,11 @@ class AutogradTestImpl(TestBaseMixin):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True) cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False)) assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True)) assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
...@@ -44,3 +44,26 @@ class BatchConsistencyTest(TorchaudioTestCase): ...@@ -44,3 +44,26 @@ class BatchConsistencyTest(TorchaudioTestCase):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k])) expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L)) self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
def test_speed(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = F.speed(batched_input, input_lengths, orig_freq=orig_freq, factor=factor)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = F.speed(unbatched_input[idx], input_lengths[idx], orig_freq=orig_freq, factor=factor)
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
import math
import numpy as np import numpy as np
import torch import torch
import torchaudio.prototype.functional as F import torchaudio.prototype.functional as F
...@@ -411,6 +413,39 @@ class FunctionalTestImpl(TestBaseMixin): ...@@ -411,6 +413,39 @@ class FunctionalTestImpl(TestBaseMixin):
self.assertEqual(hyp, ref) self.assertEqual(hyp, ref)
def test_speed_identity(self):
"""speed of 1.0 does not alter input waveform and length"""
leading_dims = (5, 4, 2)
T = 1000
waveform = torch.rand(*leading_dims, T)
lengths = torch.randint(1, 1000, leading_dims)
actual_waveform, actual_lengths = F.speed(waveform, lengths, orig_freq=1000, factor=1.0)
self.assertEqual(waveform, actual_waveform)
self.assertEqual(lengths, actual_lengths)
@nested_params(
[0.8, 1.1, 1.2],
)
def test_speed_accuracy(self, factor):
"""sinusoidal waveform is properly compressed by factor"""
n_to_trim = 20
sample_rate = 1000
freq = 2
times = torch.arange(0, 5, 1.0 / sample_rate)
waveform = torch.cos(2 * math.pi * freq * times).unsqueeze(0).to(self.device, self.dtype)
lengths = torch.tensor([waveform.size(1)])
output, output_lengths = F.speed(waveform, lengths, orig_freq=sample_rate, factor=factor)
self.assertEqual(output.size(1), output_lengths[0])
new_times = torch.arange(0, 5 / factor, 1.0 / sample_rate)
expected_waveform = torch.cos(2 * math.pi * freq * factor * new_times).unsqueeze(0).to(self.device, self.dtype)
self.assertEqual(
expected_waveform[..., n_to_trim:-n_to_trim], output[..., n_to_trim:-n_to_trim], atol=1e-1, rtol=1e-4
)
class Functional64OnlyTestImpl(TestBaseMixin): class Functional64OnlyTestImpl(TestBaseMixin):
@nested_params( @nested_params(
......
...@@ -81,3 +81,10 @@ class TorchScriptConsistencyTestImpl(TestBaseMixin): ...@@ -81,3 +81,10 @@ class TorchScriptConsistencyTestImpl(TestBaseMixin):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype) cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False)) self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True)) self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
...@@ -56,3 +56,21 @@ class Autograd(TestBaseMixin): ...@@ -56,3 +56,21 @@ class Autograd(TestBaseMixin):
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1 get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
) )
self.assert_grad(transform, [spec]) self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
...@@ -63,3 +63,53 @@ class BatchConsistencyTest(TorchaudioTestCase): ...@@ -63,3 +63,53 @@ class BatchConsistencyTest(TorchaudioTestCase):
# Because InverseBarkScale runs SGD on randomly initialized values so they do not yield # Because InverseBarkScale runs SGD on randomly initialized values so they do not yield
# exactly same result. For this reason, tolerance is very relaxed here. # exactly same result. For this reason, tolerance is very relaxed here.
self.assert_batch_consistency(transform, bark_spec, atol=1.0, rtol=1e-5) self.assert_batch_consistency(transform, bark_spec, atol=1.0, rtol=1e-5)
def test_Speed(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
speed = T.Speed(orig_freq, factor)
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = speed(batched_input, input_lengths)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = speed(unbatched_input[idx], input_lengths[idx])
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
def test_SpeedPerturbation(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
speed = T.SpeedPerturbation(orig_freq, [factor])
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = speed(batched_input, input_lengths)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = speed(unbatched_input[idx], input_lengths[idx])
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
...@@ -18,3 +18,25 @@ class Transforms(TestBaseMixin): ...@@ -18,3 +18,25 @@ class Transforms(TestBaseMixin):
output = convolve(x, y) output = convolve(x, y)
ts_output = torch_script(convolve)(x, y) ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output) self.assertEqual(ts_output, output)
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.Speed(1000, 0.9).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
import math
import random
from unittest.mock import patch
import numpy as np import numpy as np
import torch import torch
import torchaudio.prototype.transforms as T import torchaudio.prototype.transforms as T
...@@ -99,3 +103,69 @@ class TransformsTestImpl(TestBaseMixin): ...@@ -99,3 +103,69 @@ class TransformsTestImpl(TestBaseMixin):
print(f"Ratio of relative diff smaller than {tol:e} is " f"{_get_ratio(relative_diff < tol)}") print(f"Ratio of relative diff smaller than {tol:e} is " f"{_get_ratio(relative_diff < tol)}")
assert _get_ratio(relative_diff < 1e-1) > 0.2 assert _get_ratio(relative_diff < 1e-1) > 0.2
assert _get_ratio(relative_diff < 1e-3) > 2e-3 assert _get_ratio(relative_diff < 1e-3) > 2e-3
def test_Speed_identity(self):
"""speed of 1.0 does not alter input waveform and length"""
leading_dims = (5, 4, 2)
time = 1000
waveform = torch.rand(*leading_dims, time)
lengths = torch.randint(1, 1000, leading_dims)
speed = T.Speed(1000, 1.0)
actual_waveform, actual_lengths = speed(waveform, lengths)
self.assertEqual(waveform, actual_waveform)
self.assertEqual(lengths, actual_lengths)
@nested_params(
[0.8, 1.1, 1.2],
)
def test_Speed_accuracy(self, factor):
"""sinusoidal waveform is properly compressed by factor"""
n_to_trim = 20
sample_rate = 1000
freq = 2
times = torch.arange(0, 5, 1.0 / sample_rate)
waveform = torch.cos(2 * math.pi * freq * times).unsqueeze(0).to(self.device, self.dtype)
lengths = torch.tensor([waveform.size(1)])
speed = T.Speed(sample_rate, factor).to(self.device, self.dtype)
output, output_lengths = speed(waveform, lengths)
self.assertEqual(output.size(1), output_lengths[0])
new_times = torch.arange(0, 5 / factor, 1.0 / sample_rate)
expected_waveform = torch.cos(2 * math.pi * freq * factor * new_times).unsqueeze(0).to(self.device, self.dtype)
self.assertEqual(
expected_waveform[..., n_to_trim:-n_to_trim], output[..., n_to_trim:-n_to_trim], atol=1e-1, rtol=1e-4
)
def test_SpeedPerturbation(self):
"""sinusoidal waveform is properly compressed by sampled factors"""
n_to_trim = 20
sample_rate = 1000
freq = 2
times = torch.arange(0, 5, 1.0 / sample_rate)
waveform = torch.cos(2 * math.pi * freq * times).unsqueeze(0).to(self.device, self.dtype)
lengths = torch.tensor([waveform.size(1)])
factors = [0.8, 1.1, 1.0]
indices = random.choices(range(len(factors)), k=5)
speed_perturb = T.SpeedPerturbation(sample_rate, factors).to(self.device, self.dtype)
with patch("torch.randint", side_effect=indices):
for idx in indices:
output, output_lengths = speed_perturb(waveform, lengths)
self.assertEqual(output.size(1), output_lengths[0])
factor = factors[idx]
new_times = torch.arange(0, 5 / factor, 1.0 / sample_rate)
expected_waveform = (
torch.cos(2 * math.pi * freq * factor * new_times).unsqueeze(0).to(self.device, self.dtype)
)
self.assertEqual(
expected_waveform[..., n_to_trim:-n_to_trim],
output[..., n_to_trim:-n_to_trim],
atol=1e-1,
rtol=1e-4,
)
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response from ._dsp import adsr_envelope, extend_pitch, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve, speed
__all__ = [ __all__ = [
"add_noise", "add_noise",
...@@ -10,4 +10,5 @@ __all__ = [ ...@@ -10,4 +10,5 @@ __all__ = [
"fftconvolve", "fftconvolve",
"oscillator_bank", "oscillator_bank",
"sinc_impulse_response", "sinc_impulse_response",
"speed",
] ]
import math import math
import warnings import warnings
from typing import Tuple
import torch import torch
from torchaudio.functional import resample
from torchaudio.functional.functional import _create_triangular_filterbank from torchaudio.functional.functional import _create_triangular_filterbank
...@@ -306,3 +309,39 @@ def barkscale_fbanks( ...@@ -306,3 +309,39 @@ def barkscale_fbanks(
) )
return fb return fb
def speed(
waveform: torch.Tensor, lengths: torch.Tensor, orig_freq: int, factor: float
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Adjusts waveform speed.
.. devices:: CPU CUDA
.. properties:: Autograd TorchScript
Args:
waveform (torch.Tensor): Input signals, with shape `(..., time)`.
lengths (torch.Tensor): Valid lengths of signals in ``waveform``, with shape `(...)`.
orig_freq (int): Original frequency of the signals in ``waveform``.
factor (float): Factor by which to adjust speed of input. Values greater than 1.0
compress ``waveform`` in time, whereas values less than 1.0 stretch ``waveform`` in time.
Returns:
(torch.Tensor, torch.Tensor):
torch.Tensor
Speed-adjusted waveform, with shape `(..., new_time).`
torch.Tensor
Valid lengths of signals in speed-adjusted waveform, with shape `(...)`.
"""
source_sample_rate = int(factor * orig_freq)
target_sample_rate = int(orig_freq)
gcd = math.gcd(source_sample_rate, target_sample_rate)
source_sample_rate = source_sample_rate // gcd
target_sample_rate = target_sample_rate // gcd
return resample(waveform, source_sample_rate, target_sample_rate), torch.ceil(
lengths * target_sample_rate / source_sample_rate
).to(lengths.dtype)
from ._transforms import BarkScale, BarkSpectrogram, Convolve, FFTConvolve, InverseBarkScale from ._transforms import BarkScale, BarkSpectrogram, Convolve, FFTConvolve, InverseBarkScale, Speed, SpeedPerturbation
__all__ = [ __all__ = [
"BarkScale", "BarkScale",
...@@ -6,4 +6,6 @@ __all__ = [ ...@@ -6,4 +6,6 @@ __all__ = [
"Convolve", "Convolve",
"FFTConvolve", "FFTConvolve",
"InverseBarkScale", "InverseBarkScale",
"SpeedPerturbation",
"Speed",
] ]
from typing import Callable, Optional import math
from typing import Callable, Optional, Sequence, Tuple
import torch import torch
from torchaudio.prototype.functional import barkscale_fbanks, convolve, fftconvolve from torchaudio.prototype.functional import barkscale_fbanks, convolve, fftconvolve
from torchaudio.prototype.functional.functional import _check_convolve_mode from torchaudio.prototype.functional.functional import _check_convolve_mode
from torchaudio.transforms import Spectrogram from torchaudio.transforms import Resample, Spectrogram
class Convolve(torch.nn.Module): class Convolve(torch.nn.Module):
...@@ -384,3 +385,101 @@ class BarkSpectrogram(torch.nn.Module): ...@@ -384,3 +385,101 @@ class BarkSpectrogram(torch.nn.Module):
specgram = self.spectrogram(waveform) specgram = self.spectrogram(waveform)
bark_specgram = self.bark_scale(specgram) bark_specgram = self.bark_scale(specgram)
return bark_specgram return bark_specgram
def _source_target_sample_rate(orig_freq: int, speed: float) -> Tuple[int, int]:
source_sample_rate = int(speed * orig_freq)
target_sample_rate = int(orig_freq)
gcd = math.gcd(source_sample_rate, target_sample_rate)
return source_sample_rate // gcd, target_sample_rate // gcd
class Speed(torch.nn.Module):
r"""Adjusts waveform speed.
.. devices:: CPU CUDA
.. properties:: Autograd TorchScript
Args:
orig_freq (int): Original frequency of the signals in ``waveform``.
factor (float): Factor by which to adjust speed of input. Values greater than 1.0
compress ``waveform`` in time, whereas values less than 1.0 stretch ``waveform`` in time.
"""
def __init__(self, orig_freq, factor) -> None:
super().__init__()
self.orig_freq = orig_freq
self.factor = factor
self.source_sample_rate, self.target_sample_rate = _source_target_sample_rate(orig_freq, factor)
self.resampler = Resample(orig_freq=self.source_sample_rate, new_freq=self.target_sample_rate)
def forward(self, waveform, lengths) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Args:
waveform (torch.Tensor): Input signals, with shape `(..., time)`.
lengths (torch.Tensor): Valid lengths of signals in ``waveform``, with shape `(...)`.
Returns:
(torch.Tensor, torch.Tensor):
torch.Tensor
Speed-adjusted waveform, with shape `(..., new_time).`
torch.Tensor
Valid lengths of signals in speed-adjusted waveform, with shape `(...)`.
"""
return (
self.resampler(waveform),
torch.ceil(lengths * self.target_sample_rate / self.source_sample_rate).to(lengths.dtype),
)
class SpeedPerturbation(torch.nn.Module):
r"""Applies the speed perturbation augmentation introduced in
*Audio augmentation for speech recognition* :cite:`ko15_interspeech`. For a given input,
the module samples a speed-up factor from ``factors`` uniformly at random and adjusts
the speed of the input by that factor.
.. devices:: CPU CUDA
.. properties:: Autograd TorchScript
Args:
orig_freq (int): Original frequency of the signals in ``waveform``.
factors (Sequence[float]): Factors by which to adjust speed of input. Values greater than 1.0
compress ``waveform`` in time, whereas values less than 1.0 stretch ``waveform`` in time.
Example
>>> speed_perturb = SpeedPerturbation(16000, [0.9, 1.1, 1.0, 1.0, 1.0])
>>> # waveform speed will be adjusted by factor 0.9 with 20% probability,
>>> # 1.1 with 20% probability, and 1.0 (i.e. kept the same) with 60% probability.
>>> speed_perturbed_waveform = speed_perturb(waveform, lengths)
"""
def __init__(self, orig_freq: int, factors: Sequence[float]) -> None:
super().__init__()
self.speeders = torch.nn.ModuleList([Speed(orig_freq=orig_freq, factor=factor) for factor in factors])
def forward(self, waveform: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Args:
waveform (torch.Tensor): Input signals, with shape `(..., time)`.
lengths (torch.Tensor): Valid lengths of signals in ``waveform``, with shape `(...)`.
Returns:
(torch.Tensor, torch.Tensor):
torch.Tensor
Speed-adjusted waveform, with shape `(..., new_time).`
torch.Tensor
Valid lengths of signals in speed-adjusted waveform, with shape `(...)`.
"""
idx = int(torch.randint(len(self.speeders), ()))
# NOTE: we do this because TorchScript doesn't allow for
# indexing ModuleList instances with non-literals.
for speeder_idx, speeder in enumerate(self.speeders):
if idx == speeder_idx:
return speeder(waveform, lengths)
raise RuntimeError("Speeder not found; execution should have never reached here.")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment