Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
fbe2fe55
Unverified
Commit
fbe2fe55
authored
Apr 30, 2025
by
Yao Matrix
Committed by
GitHub
Apr 30, 2025
Browse files
enable consistency test cases on XPU, all passed (#11446)
Signed-off-by:
Yao Matrix
<
matrix.yao@intel.com
>
parent
c8651158
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
15 additions
and
5 deletions
+15
-5
tests/pipelines/consistency_models/test_consistency_models.py
...s/pipelines/consistency_models/test_consistency_models.py
+15
-5
No files found.
tests/pipelines/consistency_models/test_consistency_models.py
View file @
fbe2fe55
...
@@ -11,10 +11,12 @@ from diffusers import (
...
@@ -11,10 +11,12 @@ from diffusers import (
UNet2DModel
,
UNet2DModel
,
)
)
from
diffusers.utils.testing_utils
import
(
from
diffusers.utils.testing_utils
import
(
Expectations
,
backend_empty_cache
,
enable_full_determinism
,
enable_full_determinism
,
nightly
,
nightly
,
require_torch_2
,
require_torch_2
,
require_torch_
gpu
,
require_torch_
accelerator
,
torch_device
,
torch_device
,
)
)
from
diffusers.utils.torch_utils
import
randn_tensor
from
diffusers.utils.torch_utils
import
randn_tensor
...
@@ -168,17 +170,17 @@ class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
...
@@ -168,17 +170,17 @@ class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
@
nightly
@
nightly
@
require_torch_
gpu
@
require_torch_
accelerator
class
ConsistencyModelPipelineSlowTests
(
unittest
.
TestCase
):
class
ConsistencyModelPipelineSlowTests
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
super
().
setUp
()
super
().
setUp
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
tearDown
(
self
):
def
tearDown
(
self
):
super
().
tearDown
()
super
().
tearDown
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
get_inputs
(
self
,
seed
=
0
,
get_fixed_latents
=
False
,
device
=
"cpu"
,
dtype
=
torch
.
float32
,
shape
=
(
1
,
3
,
64
,
64
)):
def
get_inputs
(
self
,
seed
=
0
,
get_fixed_latents
=
False
,
device
=
"cpu"
,
dtype
=
torch
.
float32
,
shape
=
(
1
,
3
,
64
,
64
)):
generator
=
torch
.
manual_seed
(
seed
)
generator
=
torch
.
manual_seed
(
seed
)
...
@@ -264,11 +266,19 @@ class ConsistencyModelPipelineSlowTests(unittest.TestCase):
...
@@ -264,11 +266,19 @@ class ConsistencyModelPipelineSlowTests(unittest.TestCase):
# Ensure usage of flash attention in torch 2.0
# Ensure usage of flash attention in torch 2.0
with
sdp_kernel
(
enable_flash
=
True
,
enable_math
=
False
,
enable_mem_efficient
=
False
):
with
sdp_kernel
(
enable_flash
=
True
,
enable_math
=
False
,
enable_mem_efficient
=
False
):
image
=
pipe
(
**
inputs
).
images
image
=
pipe
(
**
inputs
).
images
assert
image
.
shape
==
(
1
,
64
,
64
,
3
)
assert
image
.
shape
==
(
1
,
64
,
64
,
3
)
image_slice
=
image
[
0
,
-
3
:,
-
3
:,
-
1
]
image_slice
=
image
[
0
,
-
3
:,
-
3
:,
-
1
]
expected_slice
=
np
.
array
([
0.1845
,
0.1371
,
0.1211
,
0.2035
,
0.1954
,
0.1323
,
0.1773
,
0.1593
,
0.1314
])
expected_slices
=
Expectations
(
{
(
"xpu"
,
3
):
np
.
array
([
0.0816
,
0.0518
,
0.0445
,
0.0594
,
0.0739
,
0.0534
,
0.0805
,
0.0457
,
0.0765
]),
(
"cuda"
,
7
):
np
.
array
([
0.1845
,
0.1371
,
0.1211
,
0.2035
,
0.1954
,
0.1323
,
0.1773
,
0.1593
,
0.1314
]),
(
"cuda"
,
8
):
np
.
array
([
0.0816
,
0.0518
,
0.0445
,
0.0594
,
0.0739
,
0.0534
,
0.0805
,
0.0457
,
0.0765
]),
}
)
expected_slice
=
expected_slices
.
get_expectation
()
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-3
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-3
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment