Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
5e3f8fff
Unverified
Commit
5e3f8fff
authored
Jun 22, 2023
by
Patrick von Platen
Committed by
GitHub
Jun 22, 2023
Browse files
Fix some audio tests (#3841)
* Fix some audio tests * make style * fix * make style
parent
5df2acf7
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
13 additions
and
4 deletions
+13
-4
tests/pipelines/audioldm/test_audioldm.py
tests/pipelines/audioldm/test_audioldm.py
+8
-2
tests/pipelines/test_pipelines_common.py
tests/pipelines/test_pipelines_common.py
+5
-2
No files found.
tests/pipelines/audioldm/test_audioldm.py
View file @
5e3f8fff
...
@@ -36,7 +36,7 @@ from diffusers import (
...
@@ -36,7 +36,7 @@ from diffusers import (
PNDMScheduler
,
PNDMScheduler
,
UNet2DConditionModel
,
UNet2DConditionModel
,
)
)
from
diffusers.utils
import
slow
,
torch_device
from
diffusers.utils
import
is_xformers_available
,
slow
,
torch_device
from
diffusers.utils.testing_utils
import
enable_full_determinism
from
diffusers.utils.testing_utils
import
enable_full_determinism
from
..pipeline_params
import
TEXT_TO_AUDIO_BATCH_PARAMS
,
TEXT_TO_AUDIO_PARAMS
from
..pipeline_params
import
TEXT_TO_AUDIO_BATCH_PARAMS
,
TEXT_TO_AUDIO_PARAMS
...
@@ -361,9 +361,15 @@ class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
...
@@ -361,9 +361,15 @@ class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def
test_inference_batch_single_identical
(
self
):
def
test_inference_batch_single_identical
(
self
):
self
.
_test_inference_batch_single_identical
(
test_mean_pixel_difference
=
False
)
self
.
_test_inference_batch_single_identical
(
test_mean_pixel_difference
=
False
)
@
unittest
.
skipIf
(
torch_device
!=
"cuda"
or
not
is_xformers_available
(),
reason
=
"XFormers attention is only available with CUDA and `xformers` installed"
,
)
def
test_xformers_attention_forwardGenerator_pass
(
self
):
self
.
_test_xformers_attention_forwardGenerator_pass
(
test_mean_pixel_difference
=
False
)
@
slow
@
slow
# @require_torch_gpu
class
AudioLDMPipelineSlowTests
(
unittest
.
TestCase
):
class
AudioLDMPipelineSlowTests
(
unittest
.
TestCase
):
def
tearDown
(
self
):
def
tearDown
(
self
):
super
().
tearDown
()
super
().
tearDown
()
...
...
tests/pipelines/test_pipelines_common.py
View file @
5e3f8fff
...
@@ -640,7 +640,9 @@ class PipelineTesterMixin:
...
@@ -640,7 +640,9 @@ class PipelineTesterMixin:
def
test_xformers_attention_forwardGenerator_pass
(
self
):
def
test_xformers_attention_forwardGenerator_pass
(
self
):
self
.
_test_xformers_attention_forwardGenerator_pass
()
self
.
_test_xformers_attention_forwardGenerator_pass
()
def
_test_xformers_attention_forwardGenerator_pass
(
self
,
test_max_difference
=
True
,
expected_max_diff
=
1e-4
):
def
_test_xformers_attention_forwardGenerator_pass
(
self
,
test_max_difference
=
True
,
test_mean_pixel_difference
=
True
,
expected_max_diff
=
1e-4
):
if
not
self
.
test_xformers_attention
:
if
not
self
.
test_xformers_attention
:
return
return
...
@@ -660,6 +662,7 @@ class PipelineTesterMixin:
...
@@ -660,6 +662,7 @@ class PipelineTesterMixin:
max_diff
=
np
.
abs
(
output_with_offload
-
output_without_offload
).
max
()
max_diff
=
np
.
abs
(
output_with_offload
-
output_without_offload
).
max
()
self
.
assertLess
(
max_diff
,
expected_max_diff
,
"XFormers attention should not affect the inference results"
)
self
.
assertLess
(
max_diff
,
expected_max_diff
,
"XFormers attention should not affect the inference results"
)
if
test_mean_pixel_difference
:
assert_mean_pixel_difference
(
output_with_offload
[
0
],
output_without_offload
[
0
])
assert_mean_pixel_difference
(
output_with_offload
[
0
],
output_without_offload
[
0
])
def
test_progress_bar
(
self
):
def
test_progress_bar
(
self
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment