Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
4f05058b
Unverified
Commit
4f05058b
authored
Aug 24, 2023
by
Dhruv Nair
Committed by
GitHub
Aug 24, 2023
Browse files
Clean up flaky behaviour on Slow CUDA Pytorch Push Tests (#4759)
use max diff to compare model outputs
parent
5d441300
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
19 additions
and
19 deletions
+19
-19
tests/models/test_modeling_common.py
tests/models/test_modeling_common.py
+6
-6
tests/models/test_models_unet_2d_condition.py
tests/models/test_models_unet_2d_condition.py
+2
-2
tests/pipelines/controlnet/test_controlnet.py
tests/pipelines/controlnet/test_controlnet.py
+1
-1
tests/pipelines/controlnet/test_controlnet_img2img.py
tests/pipelines/controlnet/test_controlnet_img2img.py
+1
-1
tests/pipelines/controlnet/test_controlnet_inpaint.py
tests/pipelines/controlnet/test_controlnet_inpaint.py
+1
-1
tests/pipelines/test_pipelines.py
tests/pipelines/test_pipelines.py
+4
-4
tests/pipelines/test_pipelines_common.py
tests/pipelines/test_pipelines_common.py
+1
-1
tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py
...rsatile_diffusion/test_versatile_diffusion_dual_guided.py
+1
-1
tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py
...ines/versatile_diffusion/test_versatile_diffusion_mega.py
+1
-1
tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py
...atile_diffusion/test_versatile_diffusion_text_to_image.py
+1
-1
No files found.
tests/models/test_modeling_common.py
View file @
4f05058b
...
@@ -195,7 +195,7 @@ class ModelTesterMixin:
...
@@ -195,7 +195,7 @@ class ModelTesterMixin:
main_input_name
=
None
# overwrite in model specific tester class
main_input_name
=
None
# overwrite in model specific tester class
base_precision
=
1e-3
base_precision
=
1e-3
def
test_from_save_pretrained
(
self
):
def
test_from_save_pretrained
(
self
,
expected_max_diff
=
5e-5
):
init_dict
,
inputs_dict
=
self
.
prepare_init_args_and_inputs_for_common
()
init_dict
,
inputs_dict
=
self
.
prepare_init_args_and_inputs_for_common
()
model
=
self
.
model_class
(
**
init_dict
)
model
=
self
.
model_class
(
**
init_dict
)
...
@@ -221,8 +221,8 @@ class ModelTesterMixin:
...
@@ -221,8 +221,8 @@ class ModelTesterMixin:
if
isinstance
(
new_image
,
dict
):
if
isinstance
(
new_image
,
dict
):
new_image
=
new_image
.
to_tuple
()[
0
]
new_image
=
new_image
.
to_tuple
()[
0
]
max_diff
=
(
image
-
new_image
).
abs
().
sum
().
item
()
max_diff
=
(
image
-
new_image
).
abs
().
max
().
item
()
self
.
assertLessEqual
(
max_diff
,
5e-5
,
"Models give different forward passes"
)
self
.
assertLessEqual
(
max_diff
,
expected_max_diff
,
"Models give different forward passes"
)
def
test_getattr_is_correct
(
self
):
def
test_getattr_is_correct
(
self
):
init_dict
,
inputs_dict
=
self
.
prepare_init_args_and_inputs_for_common
()
init_dict
,
inputs_dict
=
self
.
prepare_init_args_and_inputs_for_common
()
...
@@ -316,7 +316,7 @@ class ModelTesterMixin:
...
@@ -316,7 +316,7 @@ class ModelTesterMixin:
assert
torch
.
allclose
(
output_2
,
output_5
,
atol
=
self
.
base_precision
)
assert
torch
.
allclose
(
output_2
,
output_5
,
atol
=
self
.
base_precision
)
assert
torch
.
allclose
(
output_2
,
output_6
,
atol
=
self
.
base_precision
)
assert
torch
.
allclose
(
output_2
,
output_6
,
atol
=
self
.
base_precision
)
def
test_from_save_pretrained_variant
(
self
):
def
test_from_save_pretrained_variant
(
self
,
expected_max_diff
=
5e-5
):
init_dict
,
inputs_dict
=
self
.
prepare_init_args_and_inputs_for_common
()
init_dict
,
inputs_dict
=
self
.
prepare_init_args_and_inputs_for_common
()
model
=
self
.
model_class
(
**
init_dict
)
model
=
self
.
model_class
(
**
init_dict
)
...
@@ -351,8 +351,8 @@ class ModelTesterMixin:
...
@@ -351,8 +351,8 @@ class ModelTesterMixin:
if
isinstance
(
new_image
,
dict
):
if
isinstance
(
new_image
,
dict
):
new_image
=
new_image
.
to_tuple
()[
0
]
new_image
=
new_image
.
to_tuple
()[
0
]
max_diff
=
(
image
-
new_image
).
abs
().
sum
().
item
()
max_diff
=
(
image
-
new_image
).
abs
().
max
().
item
()
self
.
assertLessEqual
(
max_diff
,
5e-5
,
"Models give different forward passes"
)
self
.
assertLessEqual
(
max_diff
,
expected_max_diff
,
"Models give different forward passes"
)
@
require_torch_2
@
require_torch_2
def
test_from_save_pretrained_dynamo
(
self
):
def
test_from_save_pretrained_dynamo
(
self
):
...
...
tests/models/test_models_unet_2d_condition.py
View file @
4f05058b
...
@@ -589,10 +589,10 @@ class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.Test
...
@@ -589,10 +589,10 @@ class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.Test
with
torch
.
no_grad
():
with
torch
.
no_grad
():
new_sample
=
new_model
(
**
inputs_dict
,
cross_attention_kwargs
=
{
"scale"
:
0.5
}).
sample
new_sample
=
new_model
(
**
inputs_dict
,
cross_attention_kwargs
=
{
"scale"
:
0.5
}).
sample
assert
(
sample
-
new_sample
).
abs
().
max
()
<
1
e-4
assert
(
sample
-
new_sample
).
abs
().
max
()
<
5
e-4
# LoRA and no LoRA should NOT be the same
# LoRA and no LoRA should NOT be the same
assert
(
sample
-
old_sample
).
abs
().
max
()
>
1
e-4
assert
(
sample
-
old_sample
).
abs
().
max
()
>
5
e-4
def
test_lora_save_load_safetensors
(
self
):
def
test_lora_save_load_safetensors
(
self
):
# enable deterministic behavior for gradient checkpointing
# enable deterministic behavior for gradient checkpointing
...
...
tests/pipelines/controlnet/test_controlnet.py
View file @
4f05058b
...
@@ -959,7 +959,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
...
@@ -959,7 +959,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
torch
.
cuda
.
empty_cache
()
assert
np
.
abs
(
images
[
0
]
-
images
[
1
]).
sum
()
<
1e-3
assert
np
.
abs
(
images
[
0
]
-
images
[
1
]).
max
()
<
1e-3
@
slow
@
slow
...
...
tests/pipelines/controlnet/test_controlnet_img2img.py
View file @
4f05058b
...
@@ -446,4 +446,4 @@ class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase):
...
@@ -446,4 +446,4 @@ class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase):
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
torch
.
cuda
.
empty_cache
()
assert
np
.
abs
(
images
[
0
]
-
images
[
1
]).
sum
()
<
1e-3
assert
np
.
abs
(
images
[
0
]
-
images
[
1
]).
max
()
<
1e-3
tests/pipelines/controlnet/test_controlnet_inpaint.py
View file @
4f05058b
...
@@ -593,4 +593,4 @@ class ControlNetInpaintPipelineSlowTests(unittest.TestCase):
...
@@ -593,4 +593,4 @@ class ControlNetInpaintPipelineSlowTests(unittest.TestCase):
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
torch
.
cuda
.
empty_cache
()
assert
np
.
abs
(
images
[
0
]
-
images
[
1
]).
sum
()
<
1e-3
assert
np
.
abs
(
images
[
0
]
-
images
[
1
]).
max
()
<
1e-3
tests/pipelines/test_pipelines.py
View file @
4f05058b
...
@@ -122,7 +122,7 @@ def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
...
@@ -122,7 +122,7 @@ def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
new_image
=
new_ddpm
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
new_image
=
new_ddpm
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
assert
np
.
abs
(
image
-
new_image
).
sum
()
<
1e-5
,
"Models don't give the same forward pass"
assert
np
.
abs
(
image
-
new_image
).
max
()
<
1e-5
,
"Models don't give the same forward pass"
except
Exception
:
except
Exception
:
error
=
f
"
{
traceback
.
format_exc
()
}
"
error
=
f
"
{
traceback
.
format_exc
()
}
"
...
@@ -1543,7 +1543,7 @@ class PipelineSlowTests(unittest.TestCase):
...
@@ -1543,7 +1543,7 @@ class PipelineSlowTests(unittest.TestCase):
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
new_image
=
new_ddpm
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
new_image
=
new_ddpm
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
assert
np
.
abs
(
image
-
new_image
).
sum
()
<
1e-5
,
"Models don't give the same forward pass"
assert
np
.
abs
(
image
-
new_image
).
max
()
<
1e-5
,
"Models don't give the same forward pass"
@
require_torch_2
@
require_torch_2
def
test_from_save_pretrained_dynamo
(
self
):
def
test_from_save_pretrained_dynamo
(
self
):
...
@@ -1568,7 +1568,7 @@ class PipelineSlowTests(unittest.TestCase):
...
@@ -1568,7 +1568,7 @@ class PipelineSlowTests(unittest.TestCase):
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
new_image
=
ddpm_from_hub
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
new_image
=
ddpm_from_hub
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
assert
np
.
abs
(
image
-
new_image
).
sum
()
<
1e-5
,
"Models don't give the same forward pass"
assert
np
.
abs
(
image
-
new_image
).
max
()
<
1e-5
,
"Models don't give the same forward pass"
def
test_from_pretrained_hub_pass_model
(
self
):
def
test_from_pretrained_hub_pass_model
(
self
):
model_path
=
"google/ddpm-cifar10-32"
model_path
=
"google/ddpm-cifar10-32"
...
@@ -1591,7 +1591,7 @@ class PipelineSlowTests(unittest.TestCase):
...
@@ -1591,7 +1591,7 @@ class PipelineSlowTests(unittest.TestCase):
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
generator
=
torch
.
Generator
(
device
=
torch_device
).
manual_seed
(
0
)
new_image
=
ddpm_from_hub
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
new_image
=
ddpm_from_hub
(
generator
=
generator
,
num_inference_steps
=
5
,
output_type
=
"numpy"
).
images
assert
np
.
abs
(
image
-
new_image
).
sum
()
<
1e-5
,
"Models don't give the same forward pass"
assert
np
.
abs
(
image
-
new_image
).
max
()
<
1e-5
,
"Models don't give the same forward pass"
def
test_output_format
(
self
):
def
test_output_format
(
self
):
model_path
=
"google/ddpm-cifar10-32"
model_path
=
"google/ddpm-cifar10-32"
...
...
tests/pipelines/test_pipelines_common.py
View file @
4f05058b
...
@@ -296,7 +296,7 @@ class PipelineTesterMixin:
...
@@ -296,7 +296,7 @@ class PipelineTesterMixin:
gc
.
collect
()
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
torch
.
cuda
.
empty_cache
()
def
test_save_load_local
(
self
,
expected_max_difference
=
1
e-4
):
def
test_save_load_local
(
self
,
expected_max_difference
=
5
e-4
):
components
=
self
.
get_dummy_components
()
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
=
self
.
pipeline_class
(
**
components
)
pipe
.
to
(
torch_device
)
pipe
.
to
(
torch_device
)
...
...
tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py
View file @
4f05058b
...
@@ -76,7 +76,7 @@ class VersatileDiffusionDualGuidedPipelineIntegrationTests(unittest.TestCase):
...
@@ -76,7 +76,7 @@ class VersatileDiffusionDualGuidedPipelineIntegrationTests(unittest.TestCase):
output_type
=
"numpy"
,
output_type
=
"numpy"
,
).
images
).
images
assert
np
.
abs
(
image
-
new_image
).
sum
()
<
1e-5
,
"Models don't have the same forward pass"
assert
np
.
abs
(
image
-
new_image
).
max
()
<
1e-5
,
"Models don't have the same forward pass"
def
test_inference_dual_guided
(
self
):
def
test_inference_dual_guided
(
self
):
pipe
=
VersatileDiffusionDualGuidedPipeline
.
from_pretrained
(
"shi-labs/versatile-diffusion"
)
pipe
=
VersatileDiffusionDualGuidedPipeline
.
from_pretrained
(
"shi-labs/versatile-diffusion"
)
...
...
tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py
View file @
4f05058b
...
@@ -77,7 +77,7 @@ class VersatileDiffusionMegaPipelineIntegrationTests(unittest.TestCase):
...
@@ -77,7 +77,7 @@ class VersatileDiffusionMegaPipelineIntegrationTests(unittest.TestCase):
output_type
=
"numpy"
,
output_type
=
"numpy"
,
).
images
).
images
assert
np
.
abs
(
image
-
new_image
).
sum
()
<
1e-5
,
"Models don't have the same forward pass"
assert
np
.
abs
(
image
-
new_image
).
max
()
<
1e-5
,
"Models don't have the same forward pass"
def
test_inference_dual_guided_then_text_to_image
(
self
):
def
test_inference_dual_guided_then_text_to_image
(
self
):
pipe
=
VersatileDiffusionPipeline
.
from_pretrained
(
"shi-labs/versatile-diffusion"
,
torch_dtype
=
torch
.
float16
)
pipe
=
VersatileDiffusionPipeline
.
from_pretrained
(
"shi-labs/versatile-diffusion"
,
torch_dtype
=
torch
.
float16
)
...
...
tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py
View file @
4f05058b
...
@@ -64,7 +64,7 @@ class VersatileDiffusionTextToImagePipelineIntegrationTests(unittest.TestCase):
...
@@ -64,7 +64,7 @@ class VersatileDiffusionTextToImagePipelineIntegrationTests(unittest.TestCase):
prompt
=
prompt
,
generator
=
generator
,
guidance_scale
=
7.5
,
num_inference_steps
=
2
,
output_type
=
"numpy"
prompt
=
prompt
,
generator
=
generator
,
guidance_scale
=
7.5
,
num_inference_steps
=
2
,
output_type
=
"numpy"
).
images
).
images
assert
np
.
abs
(
image
-
new_image
).
sum
()
<
1e-5
,
"Models don't have the same forward pass"
assert
np
.
abs
(
image
-
new_image
).
max
()
<
1e-5
,
"Models don't have the same forward pass"
def
test_inference_text2img
(
self
):
def
test_inference_text2img
(
self
):
pipe
=
VersatileDiffusionTextToImagePipeline
.
from_pretrained
(
pipe
=
VersatileDiffusionTextToImagePipeline
.
from_pretrained
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment