Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
6e2a93de
Unverified
Commit
6e2a93de
authored
Mar 06, 2025
by
Sayak Paul
Committed by
GitHub
Mar 06, 2025
Browse files
[tests] fix tests for save load components (#10977)
fix tests
parent
37b8edfb
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
270 additions
and
4 deletions
+270
-4
tests/pipelines/hunyuandit/test_hunyuan_dit.py
tests/pipelines/hunyuandit/test_hunyuan_dit.py
+94
-0
tests/pipelines/latte/test_latte.py
tests/pipelines/latte/test_latte.py
+69
-1
tests/pipelines/pag/test_pag_hunyuan_dit.py
tests/pipelines/pag/test_pag_hunyuan_dit.py
+95
-3
tests/pipelines/pag/test_pag_pixart_sigma.py
tests/pipelines/pag/test_pag_pixart_sigma.py
+4
-0
tests/pipelines/pixart_alpha/test_pixart.py
tests/pipelines/pixart_alpha/test_pixart.py
+4
-0
tests/pipelines/pixart_sigma/test_pixart.py
tests/pipelines/pixart_sigma/test_pixart.py
+4
-0
No files found.
tests/pipelines/hunyuandit/test_hunyuan_dit.py
View file @
6e2a93de
...
...
@@ -14,6 +14,7 @@
# limitations under the License.
import
gc
import
tempfile
import
unittest
import
numpy
as
np
...
...
@@ -212,6 +213,99 @@ class HunyuanDiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def
test_encode_prompt_works_in_isolation
(
self
):
pass
def
test_save_load_optional_components
(
self
):
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
prompt
=
inputs
[
"prompt"
]
generator
=
inputs
[
"generator"
]
num_inference_steps
=
inputs
[
"num_inference_steps"
]
output_type
=
inputs
[
"output_type"
]
(
prompt_embeds
,
negative_prompt_embeds
,
prompt_attention_mask
,
negative_prompt_attention_mask
,
)
=
pipe
.
encode_prompt
(
prompt
,
device
=
torch_device
,
dtype
=
torch
.
float32
,
text_encoder_index
=
0
)
(
prompt_embeds_2
,
negative_prompt_embeds_2
,
prompt_attention_mask_2
,
negative_prompt_attention_mask_2
,
)
=
pipe
.
encode_prompt
(
prompt
,
device
=
torch_device
,
dtype
=
torch
.
float32
,
text_encoder_index
=
1
,
)
# inputs with prompt converted to embeddings
inputs
=
{
"prompt_embeds"
:
prompt_embeds
,
"prompt_attention_mask"
:
prompt_attention_mask
,
"negative_prompt_embeds"
:
negative_prompt_embeds
,
"negative_prompt_attention_mask"
:
negative_prompt_attention_mask
,
"prompt_embeds_2"
:
prompt_embeds_2
,
"prompt_attention_mask_2"
:
prompt_attention_mask_2
,
"negative_prompt_embeds_2"
:
negative_prompt_embeds_2
,
"negative_prompt_attention_mask_2"
:
negative_prompt_attention_mask_2
,
"generator"
:
generator
,
"num_inference_steps"
:
num_inference_steps
,
"output_type"
:
output_type
,
"use_resolution_binning"
:
False
,
}
# set all optional components to None
for
optional_component
in
pipe
.
_optional_components
:
setattr
(
pipe
,
optional_component
,
None
)
output
=
pipe
(
**
inputs
)[
0
]
with
tempfile
.
TemporaryDirectory
()
as
tmpdir
:
pipe
.
save_pretrained
(
tmpdir
)
pipe_loaded
=
self
.
pipeline_class
.
from_pretrained
(
tmpdir
)
pipe_loaded
.
to
(
torch_device
)
pipe_loaded
.
set_progress_bar_config
(
disable
=
None
)
for
optional_component
in
pipe
.
_optional_components
:
self
.
assertTrue
(
getattr
(
pipe_loaded
,
optional_component
)
is
None
,
f
"`
{
optional_component
}
` did not stay set to None after loading."
,
)
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
generator
=
inputs
[
"generator"
]
num_inference_steps
=
inputs
[
"num_inference_steps"
]
output_type
=
inputs
[
"output_type"
]
# inputs with prompt converted to embeddings
inputs
=
{
"prompt_embeds"
:
prompt_embeds
,
"prompt_attention_mask"
:
prompt_attention_mask
,
"negative_prompt_embeds"
:
negative_prompt_embeds
,
"negative_prompt_attention_mask"
:
negative_prompt_attention_mask
,
"prompt_embeds_2"
:
prompt_embeds_2
,
"prompt_attention_mask_2"
:
prompt_attention_mask_2
,
"negative_prompt_embeds_2"
:
negative_prompt_embeds_2
,
"negative_prompt_attention_mask_2"
:
negative_prompt_attention_mask_2
,
"generator"
:
generator
,
"num_inference_steps"
:
num_inference_steps
,
"output_type"
:
output_type
,
"use_resolution_binning"
:
False
,
}
output_loaded
=
pipe_loaded
(
**
inputs
)[
0
]
max_diff
=
np
.
abs
(
to_np
(
output
)
-
to_np
(
output_loaded
)).
max
()
self
.
assertLess
(
max_diff
,
1e-4
)
@
slow
@
require_torch_accelerator
...
...
tests/pipelines/latte/test_latte.py
View file @
6e2a93de
...
...
@@ -15,6 +15,7 @@
import
gc
import
inspect
import
tempfile
import
unittest
import
numpy
as
np
...
...
@@ -39,7 +40,7 @@ from diffusers.utils.testing_utils import (
)
from
..pipeline_params
import
TEXT_TO_IMAGE_BATCH_PARAMS
,
TEXT_TO_IMAGE_IMAGE_PARAMS
,
TEXT_TO_IMAGE_PARAMS
from
..test_pipelines_common
import
PipelineTesterMixin
,
PyramidAttentionBroadcastTesterMixin
from
..test_pipelines_common
import
PipelineTesterMixin
,
PyramidAttentionBroadcastTesterMixin
,
to_np
enable_full_determinism
()
...
...
@@ -217,6 +218,73 @@ class LattePipelineFastTests(PipelineTesterMixin, PyramidAttentionBroadcastTeste
def
test_encode_prompt_works_in_isolation
(
self
):
pass
def
test_save_load_optional_components
(
self
):
if
not
hasattr
(
self
.
pipeline_class
,
"_optional_components"
):
return
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
for
component
in
pipe
.
components
.
values
():
if
hasattr
(
component
,
"set_default_attn_processor"
):
component
.
set_default_attn_processor
()
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
prompt
=
inputs
[
"prompt"
]
generator
=
inputs
[
"generator"
]
(
prompt_embeds
,
negative_prompt_embeds
,
)
=
pipe
.
encode_prompt
(
prompt
)
# inputs with prompt converted to embeddings
inputs
=
{
"prompt_embeds"
:
prompt_embeds
,
"negative_prompt"
:
None
,
"negative_prompt_embeds"
:
negative_prompt_embeds
,
"generator"
:
generator
,
"num_inference_steps"
:
2
,
"guidance_scale"
:
5.0
,
"height"
:
8
,
"width"
:
8
,
"video_length"
:
1
,
"mask_feature"
:
False
,
"output_type"
:
"pt"
,
"clean_caption"
:
False
,
}
# set all optional components to None
for
optional_component
in
pipe
.
_optional_components
:
setattr
(
pipe
,
optional_component
,
None
)
output
=
pipe
(
**
inputs
)[
0
]
with
tempfile
.
TemporaryDirectory
()
as
tmpdir
:
pipe
.
save_pretrained
(
tmpdir
,
safe_serialization
=
False
)
pipe_loaded
=
self
.
pipeline_class
.
from_pretrained
(
tmpdir
)
pipe_loaded
.
to
(
torch_device
)
for
component
in
pipe_loaded
.
components
.
values
():
if
hasattr
(
component
,
"set_default_attn_processor"
):
component
.
set_default_attn_processor
()
pipe_loaded
.
set_progress_bar_config
(
disable
=
None
)
for
optional_component
in
pipe
.
_optional_components
:
self
.
assertTrue
(
getattr
(
pipe_loaded
,
optional_component
)
is
None
,
f
"`
{
optional_component
}
` did not stay set to None after loading."
,
)
output_loaded
=
pipe_loaded
(
**
inputs
)[
0
]
max_diff
=
np
.
abs
(
to_np
(
output
)
-
to_np
(
output_loaded
)).
max
()
self
.
assertLess
(
max_diff
,
1.0
)
@
slow
@
require_torch_accelerator
...
...
tests/pipelines/pag/test_pag_hunyuan_dit.py
View file @
6e2a93de
...
...
@@ -14,6 +14,7 @@
# limitations under the License.
import
inspect
import
tempfile
import
unittest
import
numpy
as
np
...
...
@@ -27,9 +28,7 @@ from diffusers import (
HunyuanDiTPAGPipeline
,
HunyuanDiTPipeline
,
)
from
diffusers.utils.testing_utils
import
(
enable_full_determinism
,
)
from
diffusers.utils.testing_utils
import
enable_full_determinism
,
torch_device
from
..pipeline_params
import
TEXT_TO_IMAGE_BATCH_PARAMS
,
TEXT_TO_IMAGE_IMAGE_PARAMS
,
TEXT_TO_IMAGE_PARAMS
from
..test_pipelines_common
import
PipelineTesterMixin
,
to_np
...
...
@@ -269,3 +268,96 @@ class HunyuanDiTPAGPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
)
def
test_encode_prompt_works_in_isolation
(
self
):
pass
def
test_save_load_optional_components
(
self
):
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
prompt
=
inputs
[
"prompt"
]
generator
=
inputs
[
"generator"
]
num_inference_steps
=
inputs
[
"num_inference_steps"
]
output_type
=
inputs
[
"output_type"
]
(
prompt_embeds
,
negative_prompt_embeds
,
prompt_attention_mask
,
negative_prompt_attention_mask
,
)
=
pipe
.
encode_prompt
(
prompt
,
device
=
torch_device
,
dtype
=
torch
.
float32
,
text_encoder_index
=
0
)
(
prompt_embeds_2
,
negative_prompt_embeds_2
,
prompt_attention_mask_2
,
negative_prompt_attention_mask_2
,
)
=
pipe
.
encode_prompt
(
prompt
,
device
=
torch_device
,
dtype
=
torch
.
float32
,
text_encoder_index
=
1
,
)
# inputs with prompt converted to embeddings
inputs
=
{
"prompt_embeds"
:
prompt_embeds
,
"prompt_attention_mask"
:
prompt_attention_mask
,
"negative_prompt_embeds"
:
negative_prompt_embeds
,
"negative_prompt_attention_mask"
:
negative_prompt_attention_mask
,
"prompt_embeds_2"
:
prompt_embeds_2
,
"prompt_attention_mask_2"
:
prompt_attention_mask_2
,
"negative_prompt_embeds_2"
:
negative_prompt_embeds_2
,
"negative_prompt_attention_mask_2"
:
negative_prompt_attention_mask_2
,
"generator"
:
generator
,
"num_inference_steps"
:
num_inference_steps
,
"output_type"
:
output_type
,
"use_resolution_binning"
:
False
,
}
# set all optional components to None
for
optional_component
in
pipe
.
_optional_components
:
setattr
(
pipe
,
optional_component
,
None
)
output
=
pipe
(
**
inputs
)[
0
]
with
tempfile
.
TemporaryDirectory
()
as
tmpdir
:
pipe
.
save_pretrained
(
tmpdir
)
pipe_loaded
=
self
.
pipeline_class
.
from_pretrained
(
tmpdir
)
pipe_loaded
.
to
(
torch_device
)
pipe_loaded
.
set_progress_bar_config
(
disable
=
None
)
for
optional_component
in
pipe
.
_optional_components
:
self
.
assertTrue
(
getattr
(
pipe_loaded
,
optional_component
)
is
None
,
f
"`
{
optional_component
}
` did not stay set to None after loading."
,
)
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
generator
=
inputs
[
"generator"
]
num_inference_steps
=
inputs
[
"num_inference_steps"
]
output_type
=
inputs
[
"output_type"
]
# inputs with prompt converted to embeddings
inputs
=
{
"prompt_embeds"
:
prompt_embeds
,
"prompt_attention_mask"
:
prompt_attention_mask
,
"negative_prompt_embeds"
:
negative_prompt_embeds
,
"negative_prompt_attention_mask"
:
negative_prompt_attention_mask
,
"prompt_embeds_2"
:
prompt_embeds_2
,
"prompt_attention_mask_2"
:
prompt_attention_mask_2
,
"negative_prompt_embeds_2"
:
negative_prompt_embeds_2
,
"negative_prompt_attention_mask_2"
:
negative_prompt_attention_mask_2
,
"generator"
:
generator
,
"num_inference_steps"
:
num_inference_steps
,
"output_type"
:
output_type
,
"use_resolution_binning"
:
False
,
}
output_loaded
=
pipe_loaded
(
**
inputs
)[
0
]
max_diff
=
np
.
abs
(
to_np
(
output
)
-
to_np
(
output_loaded
)).
max
()
self
.
assertLess
(
max_diff
,
1e-4
)
tests/pipelines/pag/test_pag_pixart_sigma.py
View file @
6e2a93de
...
...
@@ -343,3 +343,7 @@ class PixArtSigmaPAGPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
self
.
assertTrue
(
hasattr
(
pipe
,
"components"
))
self
.
assertTrue
(
set
(
pipe
.
components
.
keys
())
==
set
(
init_components
.
keys
()))
@
unittest
.
skip
(
"Test is already covered through encode_prompt isolation."
)
def
test_save_load_optional_components
(
self
):
pass
tests/pipelines/pixart_alpha/test_pixart.py
View file @
6e2a93de
...
...
@@ -144,6 +144,10 @@ class PixArtAlphaPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
max_diff
=
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
self
.
assertLessEqual
(
max_diff
,
1e-3
)
@
unittest
.
skip
(
"Test is already covered through encode_prompt isolation."
)
def
test_save_load_optional_components
(
self
):
pass
def
test_inference_with_embeddings_and_multiple_images
(
self
):
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
...
...
tests/pipelines/pixart_sigma/test_pixart.py
View file @
6e2a93de
...
...
@@ -239,6 +239,10 @@ class PixArtSigmaPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
max_diff
=
np
.
abs
(
image_slice
.
flatten
()
-
expected_slice
).
max
()
self
.
assertLessEqual
(
max_diff
,
1e-3
)
@
unittest
.
skip
(
"Test is already covered through encode_prompt isolation."
)
def
test_save_load_optional_components
(
self
):
pass
def
test_inference_batch_single_identical
(
self
):
self
.
_test_inference_batch_single_identical
(
expected_max_diff
=
1e-3
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment