Unverified Commit 3bba44d7 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

[WIP ] Proposal to address precision issues in CI (#4775)

* proposal for flaky tests

* clean up
parent b1290d3f
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
import PIL.Image import PIL.Image
import PIL.ImageOps import PIL.ImageOps
import requests import requests
from numpy.linalg import norm
from packaging import version from packaging import version
from .import_utils import ( from .import_utils import (
...@@ -72,6 +73,13 @@ def torch_all_close(a, b, *args, **kwargs): ...@@ -72,6 +73,13 @@ def torch_all_close(a, b, *args, **kwargs):
return True return True
def numpy_cosine_similarity_distance(a, b):
similarity = np.dot(a, b) / (norm(a) * norm(b))
distance = 1.0 - similarity.mean()
return distance
def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"): def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"):
test_name = os.environ.get("PYTEST_CURRENT_TEST") test_name = os.environ.get("PYTEST_CURRENT_TEST")
if not torch.is_tensor(tensor): if not torch.is_tensor(tensor):
......
...@@ -22,7 +22,12 @@ from diffusers.image_processor import VaeImageProcessor ...@@ -22,7 +22,12 @@ from diffusers.image_processor import VaeImageProcessor
from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import logging from diffusers.utils import logging
from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version, is_xformers_available from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version, is_xformers_available
from diffusers.utils.testing_utils import CaptureLogger, require_torch, torch_device from diffusers.utils.testing_utils import (
CaptureLogger,
numpy_cosine_similarity_distance,
require_torch,
torch_device,
)
from ..others.test_utils import TOKEN, USER, is_staging_test from ..others.test_utils import TOKEN, USER, is_staging_test
...@@ -543,7 +548,7 @@ class PipelineTesterMixin: ...@@ -543,7 +548,7 @@ class PipelineTesterMixin:
output = pipe(**self.get_dummy_inputs(torch_device))[0] output = pipe(**self.get_dummy_inputs(torch_device))[0]
output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0]
max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() max_diff = numpy_cosine_similarity_distance(to_np(output).flatten(), to_np(output_fp16).flatten())
self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.")
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment