Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Torchaudio
Commits
6d5e879c
"app/git@developer.sourcefind.cn:OpenDAS/ollama.git" did not exist on "e51dead6363e941b480f5bf1270254db7e175083"
Unverified
Commit
6d5e879c
authored
Mar 08, 2021
by
Krishna Kalyan
Committed by
GitHub
Mar 08, 2021
Browse files
BC-Breaking: Remove deprecated `normalized` argument from griffinlim (#1369)
parent
f5ac116f
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
4 additions
and
17 deletions
+4
-17
test/torchaudio_unittest/functional/batch_consistency_test.py
.../torchaudio_unittest/functional/batch_consistency_test.py
+1
-2
test/torchaudio_unittest/functional/librosa_compatibility_test.py
...chaudio_unittest/functional/librosa_compatibility_test.py
+1
-1
test/torchaudio_unittest/functional/torchscript_consistency_impl.py
...audio_unittest/functional/torchscript_consistency_impl.py
+1
-2
torchaudio/functional/functional.py
torchaudio/functional/functional.py
+0
-8
torchaudio/transforms.py
torchaudio/transforms.py
+1
-4
No files found.
test/torchaudio_unittest/functional/batch_consistency_test.py
View file @
6d5e879c
...
@@ -52,14 +52,13 @@ class TestFunctional(common_utils.TorchaudioTestCase):
...
@@ -52,14 +52,13 @@ class TestFunctional(common_utils.TorchaudioTestCase):
hop
=
200
hop
=
200
window
=
torch
.
hann_window
(
ws
)
window
=
torch
.
hann_window
(
ws
)
power
=
2
power
=
2
normalize
=
False
momentum
=
0.99
momentum
=
0.99
n_iter
=
32
n_iter
=
32
length
=
1000
length
=
1000
torch
.
random
.
manual_seed
(
0
)
torch
.
random
.
manual_seed
(
0
)
batch
=
torch
.
rand
(
self
.
batch_size
,
1
,
201
,
6
)
batch
=
torch
.
rand
(
self
.
batch_size
,
1
,
201
,
6
)
self
.
assert_batch_consistency
(
self
.
assert_batch_consistency
(
F
.
griffinlim
,
batch
,
window
,
n_fft
,
hop
,
ws
,
power
,
normalize
,
F
.
griffinlim
,
batch
,
window
,
n_fft
,
hop
,
ws
,
power
,
n_iter
,
momentum
,
length
,
0
,
atol
=
5e-5
)
n_iter
,
momentum
,
length
,
0
,
atol
=
5e-5
)
@
parameterized
.
expand
(
list
(
itertools
.
product
(
@
parameterized
.
expand
(
list
(
itertools
.
product
(
...
...
test/torchaudio_unittest/functional/librosa_compatibility_test.py
View file @
6d5e879c
...
@@ -38,7 +38,7 @@ class TestFunctional(common_utils.TorchaudioTestCase):
...
@@ -38,7 +38,7 @@ class TestFunctional(common_utils.TorchaudioTestCase):
init
=
'random'
if
rand_init
else
None
init
=
'random'
if
rand_init
else
None
specgram
=
F
.
spectrogram
(
tensor
,
0
,
window
,
n_fft
,
hop
,
ws
,
2
,
normalize
).
sqrt
()
specgram
=
F
.
spectrogram
(
tensor
,
0
,
window
,
n_fft
,
hop
,
ws
,
2
,
normalize
).
sqrt
()
ta_out
=
F
.
griffinlim
(
specgram
,
window
,
n_fft
,
hop
,
ws
,
1
,
normalize
,
ta_out
=
F
.
griffinlim
(
specgram
,
window
,
n_fft
,
hop
,
ws
,
1
,
n_iter
,
momentum
,
length
,
rand_init
)
n_iter
,
momentum
,
length
,
rand_init
)
lr_out
=
librosa
.
griffinlim
(
specgram
.
squeeze
(
0
).
numpy
(),
n_iter
=
n_iter
,
hop_length
=
hop
,
lr_out
=
librosa
.
griffinlim
(
specgram
.
squeeze
(
0
).
numpy
(),
n_iter
=
n_iter
,
hop_length
=
hop
,
momentum
=
momentum
,
init
=
init
,
length
=
length
)
momentum
=
momentum
,
init
=
init
,
length
=
length
)
...
...
test/torchaudio_unittest/functional/torchscript_consistency_impl.py
View file @
6d5e879c
...
@@ -41,12 +41,11 @@ class Functional(common_utils.TestBaseMixin):
...
@@ -41,12 +41,11 @@ class Functional(common_utils.TestBaseMixin):
hop
=
200
hop
=
200
window
=
torch
.
hann_window
(
ws
,
device
=
tensor
.
device
,
dtype
=
tensor
.
dtype
)
window
=
torch
.
hann_window
(
ws
,
device
=
tensor
.
device
,
dtype
=
tensor
.
dtype
)
power
=
2.
power
=
2.
normalize
=
False
momentum
=
0.99
momentum
=
0.99
n_iter
=
32
n_iter
=
32
length
=
1000
length
=
1000
rand_int
=
False
rand_int
=
False
return
F
.
griffinlim
(
tensor
,
window
,
n_fft
,
hop
,
ws
,
power
,
normalize
,
n_iter
,
momentum
,
length
,
rand_int
)
return
F
.
griffinlim
(
tensor
,
window
,
n_fft
,
hop
,
ws
,
power
,
n_iter
,
momentum
,
length
,
rand_int
)
tensor
=
torch
.
rand
((
1
,
201
,
6
))
tensor
=
torch
.
rand
((
1
,
201
,
6
))
self
.
_assert_consistency
(
func
,
tensor
)
self
.
_assert_consistency
(
func
,
tensor
)
...
...
torchaudio/functional/functional.py
View file @
6d5e879c
...
@@ -118,7 +118,6 @@ def griffinlim(
...
@@ -118,7 +118,6 @@ def griffinlim(
hop_length
:
int
,
hop_length
:
int
,
win_length
:
int
,
win_length
:
int
,
power
:
float
,
power
:
float
,
normalized
:
bool
,
n_iter
:
int
,
n_iter
:
int
,
momentum
:
float
,
momentum
:
float
,
length
:
Optional
[
int
],
length
:
Optional
[
int
],
...
@@ -148,7 +147,6 @@ def griffinlim(
...
@@ -148,7 +147,6 @@ def griffinlim(
win_length (int): Window size. (Default: ``n_fft``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
(must be > 0) e.g., 1 for energy, 2 for power, etc.
normalized (bool): Whether to normalize by magnitude after stft.
n_iter (int): Number of iteration for phase recovery process.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Setting this to 0 recovers the original Griffin-Lim method.
...
@@ -162,12 +160,6 @@ def griffinlim(
...
@@ -162,12 +160,6 @@ def griffinlim(
assert
momentum
<
1
,
'momentum={} > 1 can be unstable'
.
format
(
momentum
)
assert
momentum
<
1
,
'momentum={} > 1 can be unstable'
.
format
(
momentum
)
assert
momentum
>=
0
,
'momentum={} < 0'
.
format
(
momentum
)
assert
momentum
>=
0
,
'momentum={} < 0'
.
format
(
momentum
)
if
normalized
:
warnings
.
warn
(
"The argument normalized is not used in Griffin-Lim, "
"and will be removed in v0.9.0 release. To suppress this warning, "
"please use `normalized=False`."
)
# pack batch
# pack batch
shape
=
specgram
.
size
()
shape
=
specgram
.
size
()
specgram
=
specgram
.
reshape
([
-
1
]
+
list
(
shape
[
-
2
:]))
specgram
=
specgram
.
reshape
([
-
1
]
+
list
(
shape
[
-
2
:]))
...
...
torchaudio/transforms.py
View file @
6d5e879c
...
@@ -122,7 +122,6 @@ class GriffinLim(torch.nn.Module):
...
@@ -122,7 +122,6 @@ class GriffinLim(torch.nn.Module):
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
momentum (float, optional): The momentum parameter for fast Griffin-Lim.
momentum (float, optional): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Setting this to 0 recovers the original Griffin-Lim method.
...
@@ -158,7 +157,6 @@ class GriffinLim(torch.nn.Module):
...
@@ -158,7 +157,6 @@ class GriffinLim(torch.nn.Module):
hop_length
:
Optional
[
int
]
=
None
,
hop_length
:
Optional
[
int
]
=
None
,
window_fn
:
Callable
[...,
Tensor
]
=
torch
.
hann_window
,
window_fn
:
Callable
[...,
Tensor
]
=
torch
.
hann_window
,
power
:
float
=
2.
,
power
:
float
=
2.
,
normalized
:
bool
=
False
,
wkwargs
:
Optional
[
dict
]
=
None
,
wkwargs
:
Optional
[
dict
]
=
None
,
momentum
:
float
=
0.99
,
momentum
:
float
=
0.99
,
length
:
Optional
[
int
]
=
None
,
length
:
Optional
[
int
]
=
None
,
...
@@ -174,7 +172,6 @@ class GriffinLim(torch.nn.Module):
...
@@ -174,7 +172,6 @@ class GriffinLim(torch.nn.Module):
self
.
hop_length
=
hop_length
if
hop_length
is
not
None
else
self
.
win_length
//
2
self
.
hop_length
=
hop_length
if
hop_length
is
not
None
else
self
.
win_length
//
2
window
=
window_fn
(
self
.
win_length
)
if
wkwargs
is
None
else
window_fn
(
self
.
win_length
,
**
wkwargs
)
window
=
window_fn
(
self
.
win_length
)
if
wkwargs
is
None
else
window_fn
(
self
.
win_length
,
**
wkwargs
)
self
.
register_buffer
(
'window'
,
window
)
self
.
register_buffer
(
'window'
,
window
)
self
.
normalized
=
normalized
self
.
length
=
length
self
.
length
=
length
self
.
power
=
power
self
.
power
=
power
self
.
momentum
=
momentum
/
(
1
+
momentum
)
self
.
momentum
=
momentum
/
(
1
+
momentum
)
...
@@ -191,7 +188,7 @@ class GriffinLim(torch.nn.Module):
...
@@ -191,7 +188,7 @@ class GriffinLim(torch.nn.Module):
Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
"""
return
F
.
griffinlim
(
specgram
,
self
.
window
,
self
.
n_fft
,
self
.
hop_length
,
self
.
win_length
,
self
.
power
,
return
F
.
griffinlim
(
specgram
,
self
.
window
,
self
.
n_fft
,
self
.
hop_length
,
self
.
win_length
,
self
.
power
,
self
.
normalized
,
self
.
n_iter
,
self
.
momentum
,
self
.
length
,
self
.
rand_init
)
self
.
n_iter
,
self
.
momentum
,
self
.
length
,
self
.
rand_init
)
class
AmplitudeToDB
(
torch
.
nn
.
Module
):
class
AmplitudeToDB
(
torch
.
nn
.
Module
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment