Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Torchaudio
Commits
e4a0bd2c
"git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "3fd45eb10f8ac2ad1e613d5ce710b8d4edcd2f7c"
Unverified
Commit
e4a0bd2c
authored
Mar 30, 2021
by
moto
Committed by
GitHub
Mar 30, 2021
Browse files
Add autograd test for T.Spectrogram/T.MelSpectrogram (#1340)
parent
c0bfb03a
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
78 additions
and
0 deletions
+78
-0
test/torchaudio_unittest/transforms/autograd_cpu_test.py
test/torchaudio_unittest/transforms/autograd_cpu_test.py
+6
-0
test/torchaudio_unittest/transforms/autograd_cuda_test.py
test/torchaudio_unittest/transforms/autograd_cuda_test.py
+10
-0
test/torchaudio_unittest/transforms/autograd_test_impl.py
test/torchaudio_unittest/transforms/autograd_test_impl.py
+62
-0
No files found.
test/torchaudio_unittest/transforms/autograd_cpu_test.py
0 → 100644
View file @
e4a0bd2c
from
torchaudio_unittest.common_utils
import
PytorchTestCase
from
.autograd_test_impl
import
AutogradTestMixin
class
AutogradCPUTest
(
AutogradTestMixin
,
PytorchTestCase
):
device
=
'cpu'
test/torchaudio_unittest/transforms/autograd_cuda_test.py
0 → 100644
View file @
e4a0bd2c
from
torchaudio_unittest.common_utils
import
(
PytorchTestCase
,
skipIfNoCuda
,
)
from
.autograd_test_impl
import
AutogradTestMixin
@
skipIfNoCuda
class
AutogradCUDATest
(
AutogradTestMixin
,
PytorchTestCase
):
device
=
'cuda'
test/torchaudio_unittest/transforms/autograd_test_impl.py
0 → 100644
View file @
e4a0bd2c
from
typing
import
List
from
parameterized
import
parameterized
import
torch
from
torch.autograd
import
gradcheck
,
gradgradcheck
import
torchaudio.transforms
as
T
from
torchaudio_unittest.common_utils
import
(
TestBaseMixin
,
get_whitenoise
,
)
class
AutogradTestMixin
(
TestBaseMixin
):
def
assert_grad
(
self
,
transform
:
torch
.
nn
.
Module
,
inputs
:
List
[
torch
.
Tensor
],
*
,
nondet_tol
:
float
=
0.0
,
):
transform
=
transform
.
to
(
dtype
=
torch
.
float64
,
device
=
self
.
device
)
inputs_
=
[]
for
i
in
inputs
:
i
.
requires_grad
=
True
inputs_
.
append
(
i
.
to
(
dtype
=
torch
.
float64
,
device
=
self
.
device
))
assert
gradcheck
(
transform
,
inputs_
)
assert
gradgradcheck
(
transform
,
inputs_
,
nondet_tol
=
nondet_tol
)
@
parameterized
.
expand
([
({
'pad'
:
0
,
'normalized'
:
False
,
'power'
:
None
},
),
({
'pad'
:
3
,
'normalized'
:
False
,
'power'
:
None
},
),
({
'pad'
:
0
,
'normalized'
:
True
,
'power'
:
None
},
),
({
'pad'
:
3
,
'normalized'
:
True
,
'power'
:
None
},
),
({
'pad'
:
0
,
'normalized'
:
False
,
'power'
:
1.0
},
),
({
'pad'
:
3
,
'normalized'
:
False
,
'power'
:
1.0
},
),
({
'pad'
:
0
,
'normalized'
:
True
,
'power'
:
1.0
},
),
({
'pad'
:
3
,
'normalized'
:
True
,
'power'
:
1.0
},
),
({
'pad'
:
0
,
'normalized'
:
False
,
'power'
:
2.0
},
),
({
'pad'
:
3
,
'normalized'
:
False
,
'power'
:
2.0
},
),
({
'pad'
:
0
,
'normalized'
:
True
,
'power'
:
2.0
},
),
({
'pad'
:
3
,
'normalized'
:
True
,
'power'
:
2.0
},
),
])
def
test_spectrogram
(
self
,
kwargs
):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~2.7756e-17) difference.
#
# See https://github.com/pytorch/pytorch/issues/54093
transform
=
T
.
Spectrogram
(
**
kwargs
)
waveform
=
get_whitenoise
(
sample_rate
=
8000
,
duration
=
0.05
,
n_channels
=
2
)
self
.
assert_grad
(
transform
,
[
waveform
],
nondet_tol
=
1e-10
)
def
test_melspectrogram
(
self
):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~2.7756e-17) difference.
#
# See https://github.com/pytorch/pytorch/issues/54093
sample_rate
=
8000
transform
=
T
.
MelSpectrogram
(
sample_rate
=
sample_rate
)
waveform
=
get_whitenoise
(
sample_rate
=
sample_rate
,
duration
=
0.05
,
n_channels
=
2
)
self
.
assert_grad
(
transform
,
[
waveform
],
nondet_tol
=
1e-10
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment