"scripts/convert_zero123_to_diffusers.py" did not exist on "2625fb59dc7a3f03516f0c6c5c0cdda18ef0ef5b"
Unverified Commit 6d5e879c authored by Krishna Kalyan's avatar Krishna Kalyan Committed by GitHub
Browse files

BC-Breaking: Remove deprecated `normalized` argument from griffinlim (#1369)

parent f5ac116f
......@@ -52,14 +52,13 @@ class TestFunctional(common_utils.TorchaudioTestCase):
hop = 200
window = torch.hann_window(ws)
power = 2
normalize = False
momentum = 0.99
n_iter = 32
length = 1000
torch.random.manual_seed(0)
batch = torch.rand(self.batch_size, 1, 201, 6)
self.assert_batch_consistency(
F.griffinlim, batch, window, n_fft, hop, ws, power, normalize,
F.griffinlim, batch, window, n_fft, hop, ws, power,
n_iter, momentum, length, 0, atol=5e-5)
@parameterized.expand(list(itertools.product(
......
......@@ -38,7 +38,7 @@ class TestFunctional(common_utils.TorchaudioTestCase):
init = 'random' if rand_init else None
specgram = F.spectrogram(tensor, 0, window, n_fft, hop, ws, 2, normalize).sqrt()
ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1, normalize,
ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1,
n_iter, momentum, length, rand_init)
lr_out = librosa.griffinlim(specgram.squeeze(0).numpy(), n_iter=n_iter, hop_length=hop,
momentum=momentum, init=init, length=length)
......
......@@ -41,12 +41,11 @@ class Functional(common_utils.TestBaseMixin):
hop = 200
window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
power = 2.
normalize = False
momentum = 0.99
n_iter = 32
length = 1000
rand_int = False
return F.griffinlim(tensor, window, n_fft, hop, ws, power, normalize, n_iter, momentum, length, rand_int)
return F.griffinlim(tensor, window, n_fft, hop, ws, power, n_iter, momentum, length, rand_int)
tensor = torch.rand((1, 201, 6))
self._assert_consistency(func, tensor)
......
......@@ -118,7 +118,6 @@ def griffinlim(
hop_length: int,
win_length: int,
power: float,
normalized: bool,
n_iter: int,
momentum: float,
length: Optional[int],
......@@ -148,7 +147,6 @@ def griffinlim(
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
normalized (bool): Whether to normalize by magnitude after stft.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
......@@ -162,12 +160,6 @@ def griffinlim(
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
if normalized:
warnings.warn(
"The argument normalized is not used in Griffin-Lim, "
"and will be removed in v0.9.0 release. To suppress this warning, "
"please use `normalized=False`.")
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
......
......@@ -122,7 +122,6 @@ class GriffinLim(torch.nn.Module):
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
momentum (float, optional): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
......@@ -158,7 +157,6 @@ class GriffinLim(torch.nn.Module):
hop_length: Optional[int] = None,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: float = 2.,
normalized: bool = False,
wkwargs: Optional[dict] = None,
momentum: float = 0.99,
length: Optional[int] = None,
......@@ -174,7 +172,6 @@ class GriffinLim(torch.nn.Module):
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.normalized = normalized
self.length = length
self.power = power
self.momentum = momentum / (1 + momentum)
......@@ -191,7 +188,7 @@ class GriffinLim(torch.nn.Module):
Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
return F.griffinlim(specgram, self.window, self.n_fft, self.hop_length, self.win_length, self.power,
self.normalized, self.n_iter, self.momentum, self.length, self.rand_init)
self.n_iter, self.momentum, self.length, self.rand_init)
class AmplitudeToDB(torch.nn.Module):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment