Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
851dfa30
Unverified
Commit
851dfa30
authored
Dec 23, 2024
by
Sayak Paul
Committed by
GitHub
Dec 23, 2024
Browse files
[Tests] Fix more tests sayak (#10359)
* fixes to tests * fixture * fixes
parent
ea1ba0ba
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
42 additions
and
126 deletions
+42
-126
tests/lora/test_lora_layers_cogvideox.py
tests/lora/test_lora_layers_cogvideox.py
+1
-41
tests/lora/test_lora_layers_hunyuanvideo.py
tests/lora/test_lora_layers_hunyuanvideo.py
+1
-45
tests/lora/test_lora_layers_mochi.py
tests/lora/test_lora_layers_mochi.py
+1
-39
tests/lora/utils.py
tests/lora/utils.py
+1
-1
tests/models/test_attention_processor.py
tests/models/test_attention_processor.py
+11
-0
tests/models/transformers/test_models_transformer_mochi.py
tests/models/transformers/test_models_transformer_mochi.py
+2
-0
tests/models/transformers/test_models_transformer_sana.py
tests/models/transformers/test_models_transformer_sana.py
+25
-0
No files found.
tests/lora/test_lora_layers_cogvideox.py
View file @
851dfa30
...
...
@@ -15,8 +15,6 @@
import
sys
import
unittest
import
numpy
as
np
import
pytest
import
torch
from
transformers
import
AutoTokenizer
,
T5EncoderModel
...
...
@@ -29,16 +27,13 @@ from diffusers import (
)
from
diffusers.utils.testing_utils
import
(
floats_tensor
,
is_torch_version
,
require_peft_backend
,
skip_mps
,
torch_device
,
)
sys
.
path
.
append
(
"."
)
from
utils
import
PeftLoraLoaderMixinTests
,
check_if_lora_correctly_set
# noqa: E402
from
utils
import
PeftLoraLoaderMixinTests
# noqa: E402
@
require_peft_backend
...
...
@@ -123,41 +118,6 @@ class CogVideoXLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
return
noise
,
input_ids
,
pipeline_inputs
@
skip_mps
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cpu"
and
is_torch_version
(
">="
,
"2.5"
),
reason
=
"Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1."
,
strict
=
True
,
)
def
test_lora_fuse_nan
(
self
):
for
scheduler_cls
in
self
.
scheduler_classes
:
components
,
text_lora_config
,
denoiser_lora_config
=
self
.
get_dummy_components
(
scheduler_cls
)
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
_
,
_
,
inputs
=
self
.
get_dummy_inputs
(
with_generator
=
False
)
pipe
.
transformer
.
add_adapter
(
denoiser_lora_config
,
"adapter-1"
)
self
.
assertTrue
(
check_if_lora_correctly_set
(
pipe
.
transformer
),
"Lora not correctly set in denoiser"
)
# corrupt one LoRA weight with `inf` values
with
torch
.
no_grad
():
pipe
.
transformer
.
transformer_blocks
[
0
].
attn1
.
to_q
.
lora_A
[
"adapter-1"
].
weight
+=
float
(
"inf"
)
# with `safe_fusing=True` we should see an Error
with
self
.
assertRaises
(
ValueError
):
pipe
.
fuse_lora
(
components
=
self
.
pipeline_class
.
_lora_loadable_modules
,
safe_fusing
=
True
)
# without we should not see an error, but every image will be black
pipe
.
fuse_lora
(
components
=
self
.
pipeline_class
.
_lora_loadable_modules
,
safe_fusing
=
False
)
out
=
pipe
(
"test"
,
num_inference_steps
=
2
,
max_sequence_length
=
inputs
[
"max_sequence_length"
],
output_type
=
"np"
)[
0
]
self
.
assertTrue
(
np
.
isnan
(
out
).
all
())
def
test_simple_inference_with_text_lora_denoiser_fused_multi
(
self
):
super
().
test_simple_inference_with_text_lora_denoiser_fused_multi
(
expected_atol
=
9e-3
)
...
...
tests/lora/test_lora_layers_hunyuanvideo.py
View file @
851dfa30
...
...
@@ -15,8 +15,6 @@
import
sys
import
unittest
import
numpy
as
np
import
pytest
import
torch
from
transformers
import
CLIPTextModel
,
CLIPTokenizer
,
LlamaModel
,
LlamaTokenizerFast
...
...
@@ -28,16 +26,14 @@ from diffusers import (
)
from
diffusers.utils.testing_utils
import
(
floats_tensor
,
is_torch_version
,
require_peft_backend
,
skip_mps
,
torch_device
,
)
sys
.
path
.
append
(
"."
)
from
utils
import
PeftLoraLoaderMixinTests
,
check_if_lora_correctly_set
# noqa: E402
from
utils
import
PeftLoraLoaderMixinTests
# noqa: E402
@
require_peft_backend
...
...
@@ -144,46 +140,6 @@ class HunyuanVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
return
noise
,
input_ids
,
pipeline_inputs
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cpu"
and
is_torch_version
(
">="
,
"2.5"
),
reason
=
"Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1."
,
strict
=
True
,
)
def
test_lora_fuse_nan
(
self
):
for
scheduler_cls
in
self
.
scheduler_classes
:
components
,
text_lora_config
,
denoiser_lora_config
=
self
.
get_dummy_components
(
scheduler_cls
)
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
_
,
_
,
inputs
=
self
.
get_dummy_inputs
(
with_generator
=
False
)
pipe
.
transformer
.
add_adapter
(
denoiser_lora_config
,
"adapter-1"
)
self
.
assertTrue
(
check_if_lora_correctly_set
(
pipe
.
transformer
),
"Lora not correctly set in denoiser"
)
# corrupt one LoRA weight with `inf` values
with
torch
.
no_grad
():
pipe
.
transformer
.
transformer_blocks
[
0
].
attn
.
to_q
.
lora_A
[
"adapter-1"
].
weight
+=
float
(
"inf"
)
# with `safe_fusing=True` we should see an Error
with
self
.
assertRaises
(
ValueError
):
pipe
.
fuse_lora
(
components
=
self
.
pipeline_class
.
_lora_loadable_modules
,
safe_fusing
=
True
)
# without we should not see an error, but every image will be black
pipe
.
fuse_lora
(
components
=
self
.
pipeline_class
.
_lora_loadable_modules
,
safe_fusing
=
False
)
out
=
pipe
(
prompt
=
inputs
[
"prompt"
],
height
=
inputs
[
"height"
],
width
=
inputs
[
"width"
],
num_frames
=
inputs
[
"num_frames"
],
num_inference_steps
=
inputs
[
"num_inference_steps"
],
max_sequence_length
=
inputs
[
"max_sequence_length"
],
output_type
=
"np"
,
)[
0
]
self
.
assertTrue
(
np
.
isnan
(
out
).
all
())
def
test_simple_inference_with_text_lora_denoiser_fused_multi
(
self
):
super
().
test_simple_inference_with_text_lora_denoiser_fused_multi
(
expected_atol
=
9e-3
)
...
...
tests/lora/test_lora_layers_mochi.py
View file @
851dfa30
...
...
@@ -15,24 +15,20 @@
import
sys
import
unittest
import
numpy
as
np
import
pytest
import
torch
from
transformers
import
AutoTokenizer
,
T5EncoderModel
from
diffusers
import
AutoencoderKLMochi
,
FlowMatchEulerDiscreteScheduler
,
MochiPipeline
,
MochiTransformer3DModel
from
diffusers.utils.testing_utils
import
(
floats_tensor
,
is_torch_version
,
require_peft_backend
,
skip_mps
,
torch_device
,
)
sys
.
path
.
append
(
"."
)
from
utils
import
PeftLoraLoaderMixinTests
,
check_if_lora_correctly_set
# noqa: E402
from
utils
import
PeftLoraLoaderMixinTests
# noqa: E402
@
require_peft_backend
...
...
@@ -103,40 +99,6 @@ class MochiLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
return
noise
,
input_ids
,
pipeline_inputs
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cpu"
and
is_torch_version
(
">="
,
"2.5"
),
reason
=
"Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1."
,
strict
=
True
,
)
def
test_lora_fuse_nan
(
self
):
for
scheduler_cls
in
self
.
scheduler_classes
:
components
,
text_lora_config
,
denoiser_lora_config
=
self
.
get_dummy_components
(
scheduler_cls
)
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
=
pipe
.
to
(
torch_device
)
pipe
.
set_progress_bar_config
(
disable
=
None
)
_
,
_
,
inputs
=
self
.
get_dummy_inputs
(
with_generator
=
False
)
pipe
.
transformer
.
add_adapter
(
denoiser_lora_config
,
"adapter-1"
)
self
.
assertTrue
(
check_if_lora_correctly_set
(
pipe
.
transformer
),
"Lora not correctly set in denoiser"
)
# corrupt one LoRA weight with `inf` values
with
torch
.
no_grad
():
pipe
.
transformer
.
transformer_blocks
[
0
].
attn1
.
to_q
.
lora_A
[
"adapter-1"
].
weight
+=
float
(
"inf"
)
# with `safe_fusing=True` we should see an Error
with
self
.
assertRaises
(
ValueError
):
pipe
.
fuse_lora
(
components
=
self
.
pipeline_class
.
_lora_loadable_modules
,
safe_fusing
=
True
)
# without we should not see an error, but every image will be black
pipe
.
fuse_lora
(
components
=
self
.
pipeline_class
.
_lora_loadable_modules
,
safe_fusing
=
False
)
out
=
pipe
(
"test"
,
num_inference_steps
=
2
,
max_sequence_length
=
inputs
[
"max_sequence_length"
],
output_type
=
"np"
)[
0
]
self
.
assertTrue
(
np
.
isnan
(
out
).
all
())
def
test_simple_inference_with_text_lora_denoiser_fused_multi
(
self
):
super
().
test_simple_inference_with_text_lora_denoiser_fused_multi
(
expected_atol
=
9e-3
)
...
...
tests/lora/utils.py
View file @
851dfa30
...
...
@@ -1528,7 +1528,7 @@ class PeftLoraLoaderMixinTests:
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cpu"
and
is_torch_version
(
">="
,
"2.5"
),
reason
=
"Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1."
,
strict
=
Tru
e
,
strict
=
Fals
e
,
)
def
test_lora_fuse_nan
(
self
):
for
scheduler_cls
in
self
.
scheduler_classes
:
...
...
tests/models/test_attention_processor.py
View file @
851dfa30
...
...
@@ -2,10 +2,12 @@ import tempfile
import
unittest
import
numpy
as
np
import
pytest
import
torch
from
diffusers
import
DiffusionPipeline
from
diffusers.models.attention_processor
import
Attention
,
AttnAddedKVProcessor
from
diffusers.utils.testing_utils
import
torch_device
class
AttnAddedKVProcessorTests
(
unittest
.
TestCase
):
...
...
@@ -79,6 +81,15 @@ class AttnAddedKVProcessorTests(unittest.TestCase):
class
DeprecatedAttentionBlockTests
(
unittest
.
TestCase
):
@
pytest
.
fixture
(
scope
=
"session"
)
def
is_dist_enabled
(
pytestconfig
):
return
pytestconfig
.
getoption
(
"dist"
)
==
"loadfile"
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cuda"
and
is_dist_enabled
,
reason
=
"Test currently fails on our GPU CI because of `loadfile`. Note that it only fails when the tests are distributed from `pytest ... tests/models`. If the tests are run individually, even with `loadfile` it won't fail."
,
strict
=
True
,
)
def
test_conversion_when_using_device_map
(
self
):
pipe
=
DiffusionPipeline
.
from_pretrained
(
"hf-internal-testing/tiny-stable-diffusion-torch"
,
safety_checker
=
None
...
...
tests/models/transformers/test_models_transformer_mochi.py
View file @
851dfa30
...
...
@@ -30,6 +30,8 @@ class MochiTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class
=
MochiTransformer3DModel
main_input_name
=
"hidden_states"
uses_custom_attn_processor
=
True
# Overriding it because of the transformer size.
model_split_percents
=
[
0.7
,
0.6
,
0.6
]
@
property
def
dummy_input
(
self
):
...
...
tests/models/transformers/test_models_transformer_sana.py
View file @
851dfa30
...
...
@@ -14,6 +14,7 @@
import
unittest
import
pytest
import
torch
from
diffusers
import
SanaTransformer2DModel
...
...
@@ -80,3 +81,27 @@ class SanaTransformerTests(ModelTesterMixin, unittest.TestCase):
def
test_gradient_checkpointing_is_applied
(
self
):
expected_set
=
{
"SanaTransformer2DModel"
}
super
().
test_gradient_checkpointing_is_applied
(
expected_set
=
expected_set
)
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cuda"
,
reason
=
"Test currently fails."
,
strict
=
True
,
)
def
test_cpu_offload
(
self
):
return
super
().
test_cpu_offload
()
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cuda"
,
reason
=
"Test currently fails."
,
strict
=
True
,
)
def
test_disk_offload_with_safetensors
(
self
):
return
super
().
test_disk_offload_with_safetensors
()
@
pytest
.
mark
.
xfail
(
condition
=
torch
.
device
(
torch_device
).
type
==
"cuda"
,
reason
=
"Test currently fails."
,
strict
=
True
,
)
def
test_disk_offload_without_safetensors
(
self
):
return
super
().
test_disk_offload_without_safetensors
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment