Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
a674914f
Unverified
Commit
a674914f
authored
May 05, 2025
by
Yao Matrix
Committed by
GitHub
May 05, 2025
Browse files
enable semantic diffusion and stable diffusion panorama cases on XPU (#11459)
Signed-off-by:
Yao Matrix
<
matrix.yao@intel.com
>
parent
ec3d5828
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
26 additions
and
20 deletions
+26
-20
tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py
...ines/semantic_stable_diffusion/test_semantic_diffusion.py
+8
-12
tests/pipelines/stable_diffusion_panorama/test_stable_diffusion_panorama.py
...able_diffusion_panorama/test_stable_diffusion_panorama.py
+18
-8
No files found.
tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py
View file @
a674914f
...
@@ -25,11 +25,11 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
...
@@ -25,11 +25,11 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from
diffusers
import
AutoencoderKL
,
DDIMScheduler
,
LMSDiscreteScheduler
,
PNDMScheduler
,
UNet2DConditionModel
from
diffusers
import
AutoencoderKL
,
DDIMScheduler
,
LMSDiscreteScheduler
,
PNDMScheduler
,
UNet2DConditionModel
from
diffusers.pipelines.semantic_stable_diffusion
import
SemanticStableDiffusionPipeline
as
StableDiffusionPipeline
from
diffusers.pipelines.semantic_stable_diffusion
import
SemanticStableDiffusionPipeline
as
StableDiffusionPipeline
from
diffusers.utils.testing_utils
import
(
from
diffusers.utils.testing_utils
import
(
backend_empty_cache
,
enable_full_determinism
,
enable_full_determinism
,
floats_tensor
,
floats_tensor
,
nightly
,
nightly
,
require_accelerator
,
require_torch_accelerator
,
require_torch_gpu
,
torch_device
,
torch_device
,
)
)
...
@@ -42,13 +42,13 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
...
@@ -42,13 +42,13 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
# clean up the VRAM before each test
# clean up the VRAM before each test
super
().
setUp
()
super
().
setUp
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
tearDown
(
self
):
def
tearDown
(
self
):
# clean up the VRAM after each test
# clean up the VRAM after each test
super
().
tearDown
()
super
().
tearDown
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
@
property
@
property
def
dummy_image
(
self
):
def
dummy_image
(
self
):
...
@@ -238,7 +238,7 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
...
@@ -238,7 +238,7 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
image
=
pipe
(
"example prompt"
,
num_inference_steps
=
2
).
images
[
0
]
image
=
pipe
(
"example prompt"
,
num_inference_steps
=
2
).
images
[
0
]
assert
image
is
not
None
assert
image
is
not
None
@
require_accelerator
@
require_
torch_
accelerator
def
test_semantic_diffusion_fp16
(
self
):
def
test_semantic_diffusion_fp16
(
self
):
"""Test that stable diffusion works with fp16"""
"""Test that stable diffusion works with fp16"""
unet
=
self
.
dummy_cond_unet
unet
=
self
.
dummy_cond_unet
...
@@ -272,22 +272,21 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
...
@@ -272,22 +272,21 @@ class SafeDiffusionPipelineFastTests(unittest.TestCase):
@
nightly
@
nightly
@
require_torch_
gpu
@
require_torch_
accelerator
class
SemanticDiffusionPipelineIntegrationTests
(
unittest
.
TestCase
):
class
SemanticDiffusionPipelineIntegrationTests
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
# clean up the VRAM before each test
# clean up the VRAM before each test
super
().
setUp
()
super
().
setUp
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
tearDown
(
self
):
def
tearDown
(
self
):
# clean up the VRAM after each test
# clean up the VRAM after each test
super
().
tearDown
()
super
().
tearDown
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
test_positive_guidance
(
self
):
def
test_positive_guidance
(
self
):
torch_device
=
"cuda"
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
"stable-diffusion-v1-5/stable-diffusion-v1-5"
)
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
"stable-diffusion-v1-5/stable-diffusion-v1-5"
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
...
@@ -370,7 +369,6 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
...
@@ -370,7 +369,6 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
def
test_negative_guidance
(
self
):
def
test_negative_guidance
(
self
):
torch_device
=
"cuda"
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
"stable-diffusion-v1-5/stable-diffusion-v1-5"
)
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
"stable-diffusion-v1-5/stable-diffusion-v1-5"
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
...
@@ -453,7 +451,6 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
...
@@ -453,7 +451,6 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
def
test_multi_cond_guidance
(
self
):
def
test_multi_cond_guidance
(
self
):
torch_device
=
"cuda"
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
"stable-diffusion-v1-5/stable-diffusion-v1-5"
)
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
"stable-diffusion-v1-5/stable-diffusion-v1-5"
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
...
@@ -536,7 +533,6 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
...
@@ -536,7 +533,6 @@ class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
assert
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
def
test_guidance_fp16
(
self
):
def
test_guidance_fp16
(
self
):
torch_device
=
"cuda"
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
pipe
=
StableDiffusionPipeline
.
from_pretrained
(
"stable-diffusion-v1-5/stable-diffusion-v1-5"
,
torch_dtype
=
torch
.
float16
"stable-diffusion-v1-5/stable-diffusion-v1-5"
,
torch_dtype
=
torch
.
float16
)
)
...
...
tests/pipelines/stable_diffusion_panorama/test_stable_diffusion_panorama.py
View file @
a674914f
...
@@ -29,7 +29,17 @@ from diffusers import (
...
@@ -29,7 +29,17 @@ from diffusers import (
StableDiffusionPanoramaPipeline
,
StableDiffusionPanoramaPipeline
,
UNet2DConditionModel
,
UNet2DConditionModel
,
)
)
from
diffusers.utils.testing_utils
import
enable_full_determinism
,
nightly
,
require_torch_gpu
,
skip_mps
,
torch_device
from
diffusers.utils.testing_utils
import
(
backend_empty_cache
,
backend_max_memory_allocated
,
backend_reset_max_memory_allocated
,
backend_reset_peak_memory_stats
,
enable_full_determinism
,
nightly
,
require_torch_accelerator
,
skip_mps
,
torch_device
,
)
from
..pipeline_params
import
TEXT_TO_IMAGE_BATCH_PARAMS
,
TEXT_TO_IMAGE_IMAGE_PARAMS
,
TEXT_TO_IMAGE_PARAMS
from
..pipeline_params
import
TEXT_TO_IMAGE_BATCH_PARAMS
,
TEXT_TO_IMAGE_IMAGE_PARAMS
,
TEXT_TO_IMAGE_PARAMS
from
..test_pipelines_common
import
(
from
..test_pipelines_common
import
(
...
@@ -267,17 +277,17 @@ class StableDiffusionPanoramaPipelineFastTests(
...
@@ -267,17 +277,17 @@ class StableDiffusionPanoramaPipelineFastTests(
@
nightly
@
nightly
@
require_torch_
gpu
@
require_torch_
accelerator
class
StableDiffusionPanoramaNightlyTests
(
unittest
.
TestCase
):
class
StableDiffusionPanoramaNightlyTests
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
super
().
setUp
()
super
().
setUp
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
tearDown
(
self
):
def
tearDown
(
self
):
super
().
tearDown
()
super
().
tearDown
()
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
def
get_inputs
(
self
,
seed
=
0
):
def
get_inputs
(
self
,
seed
=
0
):
generator
=
torch
.
manual_seed
(
seed
)
generator
=
torch
.
manual_seed
(
seed
)
...
@@ -415,9 +425,9 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
...
@@ -415,9 +425,9 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
assert
number_of_steps
==
3
assert
number_of_steps
==
3
def
test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading
(
self
):
def
test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading
(
self
):
torch
.
cuda
.
empty_cache
()
backend_
empty_cache
(
torch_device
)
torch
.
cuda
.
reset_max_memory_allocated
()
backend_
reset_max_memory_allocated
(
torch_device
)
torch
.
cuda
.
reset_peak_memory_stats
()
backend_
reset_peak_memory_stats
(
torch_device
)
model_ckpt
=
"stabilityai/stable-diffusion-2-base"
model_ckpt
=
"stabilityai/stable-diffusion-2-base"
scheduler
=
DDIMScheduler
.
from_pretrained
(
model_ckpt
,
subfolder
=
"scheduler"
)
scheduler
=
DDIMScheduler
.
from_pretrained
(
model_ckpt
,
subfolder
=
"scheduler"
)
...
@@ -429,6 +439,6 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
...
@@ -429,6 +439,6 @@ class StableDiffusionPanoramaNightlyTests(unittest.TestCase):
inputs
=
self
.
get_inputs
()
inputs
=
self
.
get_inputs
()
_
=
pipe
(
**
inputs
)
_
=
pipe
(
**
inputs
)
mem_bytes
=
torch
.
cuda
.
max_memory_allocated
()
mem_bytes
=
backend_
max_memory_allocated
(
torch_device
)
# make sure that less than 5.2 GB is allocated
# make sure that less than 5.2 GB is allocated
assert
mem_bytes
<
5.5
*
10
**
9
assert
mem_bytes
<
5.5
*
10
**
9
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment