Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
af13a90e
Unverified
Commit
af13a90e
authored
Feb 20, 2024
by
Dhruv Nair
Committed by
GitHub
Feb 20, 2024
Browse files
Remove `disable_full_determinism` from StableVideoDiffusion xformers test. (#7039)
* update * update
parent
3067da12
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
5 deletions
+3
-5
tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py
...nes/stable_video_diffusion/test_stable_video_diffusion.py
+3
-5
No files found.
tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py
View file @
af13a90e
...
...
@@ -22,7 +22,6 @@ from diffusers.utils import is_accelerate_available, is_accelerate_version, load
from
diffusers.utils.import_utils
import
is_xformers_available
from
diffusers.utils.testing_utils
import
(
CaptureLogger
,
disable_full_determinism
,
enable_full_determinism
,
floats_tensor
,
numpy_cosine_similarity_distance
,
...
...
@@ -34,6 +33,9 @@ from diffusers.utils.testing_utils import (
from
..test_pipelines_common
import
PipelineTesterMixin
enable_full_determinism
()
def
to_np
(
tensor
):
if
isinstance
(
tensor
,
torch
.
Tensor
):
tensor
=
tensor
.
detach
().
cpu
().
numpy
()
...
...
@@ -465,8 +467,6 @@ class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa
reason
=
"XFormers attention is only available with CUDA and `xformers` installed"
,
)
def
test_xformers_attention_forwardGenerator_pass
(
self
):
disable_full_determinism
()
expected_max_diff
=
9e-4
if
not
self
.
test_xformers_attention
:
...
...
@@ -496,8 +496,6 @@ class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCa
max_diff
=
np
.
abs
(
to_np
(
output_with_offload
)
-
to_np
(
output_without_offload
)).
max
()
self
.
assertLess
(
max_diff
,
expected_max_diff
,
"XFormers attention should not affect the inference results"
)
enable_full_determinism
()
@
slow
@
require_torch_gpu
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment