Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
c2787c11
Unverified
Commit
c2787c11
authored
Sep 19, 2023
by
Dhruv Nair
Committed by
GitHub
Sep 19, 2023
Browse files
Fixes for Float16 inference Fast CUDA Tests (#5097)
* wip * fix tests
parent
79a3f39e
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
67 additions
and
3 deletions
+67
-3
tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py
tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py
+3
-0
tests/pipelines/kandinsky/test_kandinsky_combined.py
tests/pipelines/kandinsky/test_kandinsky_combined.py
+9
-0
tests/pipelines/kandinsky/test_kandinsky_inpaint.py
tests/pipelines/kandinsky/test_kandinsky_inpaint.py
+3
-0
tests/pipelines/kandinsky_v22/test_kandinsky.py
tests/pipelines/kandinsky_v22/test_kandinsky.py
+3
-0
tests/pipelines/kandinsky_v22/test_kandinsky_combined.py
tests/pipelines/kandinsky_v22/test_kandinsky_combined.py
+9
-0
tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py
tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py
+3
-0
tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py
...elines/kandinsky_v22/test_kandinsky_controlnet_img2img.py
+3
-0
tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py
tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py
+3
-0
tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py
tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py
+3
-0
tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py
...pelines/stable_diffusion/test_stable_diffusion_img2img.py
+3
-0
tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py
...table_diffusion_2/test_stable_diffusion_latent_upscale.py
+3
-0
tests/pipelines/test_pipelines_common.py
tests/pipelines/test_pipelines_common.py
+14
-3
tests/pipelines/unclip/test_unclip.py
tests/pipelines/unclip/test_unclip.py
+4
-0
tests/pipelines/unclip/test_unclip_image_variation.py
tests/pipelines/unclip/test_unclip_image_variation.py
+4
-0
No files found.
tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py
View file @
c2787c11
...
...
@@ -299,3 +299,6 @@ class ControlNetPipelineSDXLFastTests(
# TODO(Patrick, Sayak) - skip for now as this requires more refiner tests
def
test_save_load_optional_components
(
self
):
pass
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
tests/pipelines/kandinsky/test_kandinsky_combined.py
View file @
c2787c11
...
...
@@ -133,6 +133,9 @@ class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase)
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
1e-2
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
1e-1
)
def
test_dict_tuple_outputs_equivalent
(
self
):
super
().
test_dict_tuple_outputs_equivalent
(
expected_max_difference
=
5e-4
)
...
...
@@ -236,6 +239,9 @@ class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.Te
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
1e-2
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
def
test_dict_tuple_outputs_equivalent
(
self
):
super
().
test_dict_tuple_outputs_equivalent
(
expected_max_difference
=
5e-4
)
...
...
@@ -339,5 +345,8 @@ class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.Te
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
1e-2
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
def
test_dict_tuple_outputs_equivalent
(
self
):
super
().
test_dict_tuple_outputs_equivalent
(
expected_max_difference
=
5e-4
)
tests/pipelines/kandinsky/test_kandinsky_inpaint.py
View file @
c2787c11
...
...
@@ -290,6 +290,9 @@ class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
1
]).
max
()
<
1e-3
assert
np
.
abs
(
image_slices
[
0
]
-
image_slices
[
2
]).
max
()
<
1e-3
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
@
nightly
@
require_torch_gpu
...
...
tests/pipelines/kandinsky_v22/test_kandinsky.py
View file @
c2787c11
...
...
@@ -215,6 +215,9 @@ class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase):
np
.
abs
(
image_from_tuple_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
),
f
" expected_slice
{
expected_slice
}
, but got
{
image_from_tuple_slice
.
flatten
()
}
"
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
1e-1
)
@
slow
@
require_torch_gpu
...
...
tests/pipelines/kandinsky_v22/test_kandinsky_combined.py
View file @
c2787c11
...
...
@@ -137,6 +137,9 @@ class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCa
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
1e-2
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
1e-1
)
def
test_dict_tuple_outputs_equivalent
(
self
):
super
().
test_dict_tuple_outputs_equivalent
(
expected_max_difference
=
5e-4
)
...
...
@@ -243,6 +246,9 @@ class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
1e-2
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
1e-1
)
def
test_dict_tuple_outputs_equivalent
(
self
):
super
().
test_dict_tuple_outputs_equivalent
(
expected_max_difference
=
5e-4
)
...
...
@@ -349,6 +355,9 @@ class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
1e-2
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
def
test_dict_tuple_outputs_equivalent
(
self
):
super
().
test_dict_tuple_outputs_equivalent
(
expected_max_difference
=
5e-4
)
...
...
tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py
View file @
c2787c11
...
...
@@ -218,6 +218,9 @@ class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.Test
np
.
abs
(
image_from_tuple_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
),
f
" expected_slice
{
expected_slice
}
, but got
{
image_from_tuple_slice
.
flatten
()
}
"
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
1e-1
)
@
nightly
@
require_torch_gpu
...
...
tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py
View file @
c2787c11
...
...
@@ -228,6 +228,9 @@ class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unitte
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
1.75e-3
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
2e-1
)
@
slow
@
require_torch_gpu
...
...
tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py
View file @
c2787c11
...
...
@@ -232,6 +232,9 @@ class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCas
np
.
abs
(
image_from_tuple_slice
.
flatten
()
-
expected_slice
).
max
()
<
1e-2
),
f
" expected_slice
{
expected_slice
}
, but got
{
image_from_tuple_slice
.
flatten
()
}
"
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
2e-1
)
@
slow
@
require_torch_gpu
...
...
tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py
View file @
c2787c11
...
...
@@ -240,6 +240,9 @@ class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCas
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
3e-3
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
def
test_model_cpu_offload_forward_pass
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
5e-4
)
...
...
tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py
View file @
c2787c11
...
...
@@ -254,6 +254,9 @@ class StableDiffusionImg2ImgPipelineFastTests(
def
test_inference_batch_single_identical
(
self
):
super
().
test_inference_batch_single_identical
(
expected_max_diff
=
3e-3
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
@
slow
@
require_torch_gpu
...
...
tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py
View file @
c2787c11
...
...
@@ -235,6 +235,9 @@ class StableDiffusionLatentUpscalePipelineFastTests(
assert
check_same_shape
(
outputs
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
5e-1
)
@
require_torch_gpu
@
slow
...
...
tests/pipelines/test_pipelines_common.py
View file @
c2787c11
...
...
@@ -544,7 +544,7 @@ class PipelineTesterMixin:
self
.
assertTrue
(
set
(
pipe
.
components
.
keys
())
==
set
(
init_components
.
keys
()))
@
unittest
.
skipIf
(
torch_device
!=
"cuda"
,
reason
=
"float16 requires CUDA"
)
def
test_float16_inference
(
self
,
expected_max_diff
=
1
e-2
):
def
test_float16_inference
(
self
,
expected_max_diff
=
5
e-2
):
components
=
self
.
get_dummy_components
()
pipe
=
self
.
pipeline_class
(
**
components
)
for
component
in
pipe
.
components
.
values
():
...
...
@@ -563,8 +563,19 @@ class PipelineTesterMixin:
pipe_fp16
.
to
(
torch_device
,
torch
.
float16
)
pipe_fp16
.
set_progress_bar_config
(
disable
=
None
)
output
=
pipe
(
**
self
.
get_dummy_inputs
(
torch_device
))[
0
]
output_fp16
=
pipe_fp16
(
**
self
.
get_dummy_inputs
(
torch_device
))[
0
]
inputs
=
self
.
get_dummy_inputs
(
torch_device
)
# Reset generator in case it is used inside dummy inputs
if
"generator"
in
inputs
:
inputs
[
"generator"
]
=
self
.
get_generator
(
0
)
output
=
pipe
(
**
inputs
)[
0
]
fp16_inputs
=
self
.
get_dummy_inputs
(
torch_device
)
# Reset generator in case it is used inside dummy inputs
if
"generator"
in
fp16_inputs
:
fp16_inputs
[
"generator"
]
=
self
.
get_generator
(
0
)
output_fp16
=
pipe_fp16
(
**
fp16_inputs
)[
0
]
max_diff
=
np
.
abs
(
to_np
(
output
)
-
to_np
(
output_fp16
)).
max
()
self
.
assertLess
(
max_diff
,
expected_max_diff
,
"The outputs of the fp16 and fp32 pipelines are too different."
)
...
...
tests/pipelines/unclip/test_unclip.py
View file @
c2787c11
...
...
@@ -418,6 +418,10 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def
test_save_load_optional_components
(
self
):
return
super
().
test_save_load_optional_components
()
@
unittest
.
skip
(
"UnCLIP produces very large differences in fp16 vs fp32. Test is not useful."
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
1.0
)
@
nightly
class
UnCLIPPipelineCPUIntegrationTests
(
unittest
.
TestCase
):
...
...
tests/pipelines/unclip/test_unclip_image_variation.py
View file @
c2787c11
...
...
@@ -491,6 +491,10 @@ class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCa
def
test_save_load_optional_components
(
self
):
return
super
().
test_save_load_optional_components
()
@
unittest
.
skip
(
"UnCLIP produces very large difference in fp16 vs fp32. Test is not useful."
)
def
test_float16_inference
(
self
):
super
().
test_float16_inference
(
expected_max_diff
=
1.0
)
@
nightly
@
require_torch_gpu
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment