Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
3bba44d7
Unverified
Commit
3bba44d7
authored
Aug 25, 2023
by
Dhruv Nair
Committed by
GitHub
Aug 25, 2023
Browse files
[WIP ] Proposal to address precision issues in CI (#4775)
* proposal for flaky tests * clean up
parent
b1290d3f
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
15 additions
and
2 deletions
+15
-2
src/diffusers/utils/testing_utils.py
src/diffusers/utils/testing_utils.py
+8
-0
tests/pipelines/test_pipelines_common.py
tests/pipelines/test_pipelines_common.py
+7
-2
No files found.
src/diffusers/utils/testing_utils.py
View file @
3bba44d7
...
@@ -19,6 +19,7 @@ import numpy as np
...
@@ -19,6 +19,7 @@ import numpy as np
import
PIL.Image
import
PIL.Image
import
PIL.ImageOps
import
PIL.ImageOps
import
requests
import
requests
from
numpy.linalg
import
norm
from
packaging
import
version
from
packaging
import
version
from
.import_utils
import
(
from
.import_utils
import
(
...
@@ -72,6 +73,13 @@ def torch_all_close(a, b, *args, **kwargs):
...
@@ -72,6 +73,13 @@ def torch_all_close(a, b, *args, **kwargs):
return
True
return
True
def
numpy_cosine_similarity_distance
(
a
,
b
):
similarity
=
np
.
dot
(
a
,
b
)
/
(
norm
(
a
)
*
norm
(
b
))
distance
=
1.0
-
similarity
.
mean
()
return
distance
def
print_tensor_test
(
tensor
,
filename
=
"test_corrections.txt"
,
expected_tensor_name
=
"expected_slice"
):
def
print_tensor_test
(
tensor
,
filename
=
"test_corrections.txt"
,
expected_tensor_name
=
"expected_slice"
):
test_name
=
os
.
environ
.
get
(
"PYTEST_CURRENT_TEST"
)
test_name
=
os
.
environ
.
get
(
"PYTEST_CURRENT_TEST"
)
if
not
torch
.
is_tensor
(
tensor
):
if
not
torch
.
is_tensor
(
tensor
):
...
...
tests/pipelines/test_pipelines_common.py
View file @
3bba44d7
...
@@ -22,7 +22,12 @@ from diffusers.image_processor import VaeImageProcessor
...
@@ -22,7 +22,12 @@ from diffusers.image_processor import VaeImageProcessor
from
diffusers.schedulers
import
KarrasDiffusionSchedulers
from
diffusers.schedulers
import
KarrasDiffusionSchedulers
from
diffusers.utils
import
logging
from
diffusers.utils
import
logging
from
diffusers.utils.import_utils
import
is_accelerate_available
,
is_accelerate_version
,
is_xformers_available
from
diffusers.utils.import_utils
import
is_accelerate_available
,
is_accelerate_version
,
is_xformers_available
from
diffusers.utils.testing_utils
import
CaptureLogger
,
require_torch
,
torch_device
from
diffusers.utils.testing_utils
import
(
CaptureLogger
,
numpy_cosine_similarity_distance
,
require_torch
,
torch_device
,
)
from
..others.test_utils
import
TOKEN
,
USER
,
is_staging_test
from
..others.test_utils
import
TOKEN
,
USER
,
is_staging_test
...
@@ -543,7 +548,7 @@ class PipelineTesterMixin:
...
@@ -543,7 +548,7 @@ class PipelineTesterMixin:
output
=
pipe
(
**
self
.
get_dummy_inputs
(
torch_device
))[
0
]
output
=
pipe
(
**
self
.
get_dummy_inputs
(
torch_device
))[
0
]
output_fp16
=
pipe_fp16
(
**
self
.
get_dummy_inputs
(
torch_device
))[
0
]
output_fp16
=
pipe_fp16
(
**
self
.
get_dummy_inputs
(
torch_device
))[
0
]
max_diff
=
n
p
.
abs
(
to_np
(
output
)
-
to_np
(
output_fp16
)
).
max
()
max_diff
=
n
umpy_cosine_similarity_distance
(
to_np
(
output
)
.
flatten
(),
to_np
(
output_fp16
)
.
flatten
()
)
self
.
assertLess
(
max_diff
,
expected_max_diff
,
"The outputs of the fp16 and fp32 pipelines are too different."
)
self
.
assertLess
(
max_diff
,
expected_max_diff
,
"The outputs of the fp16 and fp32 pipelines are too different."
)
@
unittest
.
skipIf
(
torch_device
!=
"cuda"
,
reason
=
"float16 requires CUDA"
)
@
unittest
.
skipIf
(
torch_device
!=
"cuda"
,
reason
=
"float16 requires CUDA"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment