Unverified Commit 1a6fa69a authored by Will Berman's avatar Will Berman Committed by GitHub
Browse files

PipelineTesterMixin parameter configuration refactor (#2502)

* attend and excite batch test causing timeouts

* PipelineTesterMixin argument configuration refactor

* error message text re: @yiyixuxu

* remove eta re: @patrickvonplaten
parent 664b4de9
......@@ -26,6 +26,7 @@ from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeli
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, slow
from ...pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ...test_pipelines_common import PipelineTesterMixin
......@@ -34,6 +35,8 @@ torch.backends.cuda.matmul.allow_tf32 = False
class StableDiffusion2InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = StableDiffusionInpaintPipeline
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
def get_dummy_components(self):
torch.manual_seed(0)
......
......@@ -31,6 +31,7 @@ from diffusers import (
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from ...pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ...test_pipelines_common import PipelineTesterMixin
......@@ -39,6 +40,15 @@ torch.backends.cuda.matmul.allow_tf32 = False
class StableDiffusionLatentUpscalePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = StableDiffusionLatentUpscalePipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
required_optional_params = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
test_cpu_offload = True
@property
......
......@@ -15,11 +15,14 @@ from diffusers import (
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, slow, torch_device
from ...pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ...test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class StableUnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = StableUnCLIPPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
test_xformers_attention = False
......
......@@ -18,11 +18,17 @@ from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device
from ...test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from ...pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ...test_pipelines_common import (
PipelineTesterMixin,
assert_mean_pixel_difference,
)
class StableUnCLIPImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = StableUnCLIPImg2ImgPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
def get_dummy_components(self):
embedder_hidden_size = 32
......
......@@ -25,13 +25,22 @@ from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
from ...pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ...test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = UnCLIPPipeline
test_xformers_attention = False
params = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
"guidance_scale",
"prompt_embeds",
"cross_attention_kwargs",
}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = [
"generator",
"return_dict",
......@@ -39,11 +48,7 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
num_inference_steps_args = [
"prior_num_inference_steps",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
......@@ -361,18 +366,36 @@ class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def test_inference_batch_single_identical(self):
test_max_difference = torch_device == "cpu"
relax_max_difference = True
additional_params_copy_to_batched_inputs = [
"prior_num_inference_steps",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=test_max_difference, relax_max_difference=relax_max_difference
test_max_difference=test_max_difference,
relax_max_difference=relax_max_difference,
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs,
)
def test_inference_batch_consistent(self):
additional_params_copy_to_batched_inputs = [
"prior_num_inference_steps",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
batch_sizes = [2, 3]
self._test_inference_batch_consistent(batch_sizes=batch_sizes)
self._test_inference_batch_consistent(
batch_sizes=batch_sizes,
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs,
)
else:
self._test_inference_batch_consistent()
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs
)
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
......
......@@ -39,11 +39,14 @@ from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import load_image, require_torch_gpu, skip_mps
from ...pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ...test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = UnCLIPImageVariationPipeline
params = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
batch_params = IMAGE_VARIATION_BATCH_PARAMS
required_optional_params = [
"generator",
......@@ -51,10 +54,6 @@ class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCa
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
num_inference_steps_args = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
@property
def text_embedder_hidden_size(self):
......@@ -482,18 +481,34 @@ class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCa
def test_inference_batch_single_identical(self):
test_max_difference = torch_device == "cpu"
relax_max_difference = True
additional_params_copy_to_batched_inputs = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=test_max_difference, relax_max_difference=relax_max_difference
test_max_difference=test_max_difference,
relax_max_difference=relax_max_difference,
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs,
)
def test_inference_batch_consistent(self):
additional_params_copy_to_batched_inputs = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
batch_sizes = [2, 3]
self._test_inference_batch_consistent(batch_sizes=batch_sizes)
self._test_inference_batch_consistent(
batch_sizes=batch_sizes,
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs,
)
else:
self._test_inference_batch_consistent()
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs
)
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
......
......@@ -28,17 +28,21 @@ class PipelineTesterMixin:
equivalence of dict and tuple outputs, etc.
"""
allowed_required_args = [
"source_prompt",
"prompt",
"image",
"mask_image",
"example_image",
"class_labels",
"token_indices",
]
required_optional_params = ["generator", "num_inference_steps", "return_dict"]
num_inference_steps_args = ["num_inference_steps"]
# Canonical parameters that are passed to `__call__` regardless
# of the type of pipeline. They are always optional and have common
# sense default values.
required_optional_params = frozenset(
[
"num_inference_steps",
"num_images_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
]
)
# set these parameters to False in the child class if the pipeline does not support the corresponding functionality
test_attention_slicing = True
......@@ -69,6 +73,35 @@ class PipelineTesterMixin:
"See existing pipeline tests for reference."
)
@property
def params(self) -> frozenset:
raise NotImplementedError(
"You need to set the attribute `params` in the child test class. "
"`params` are checked for if all values are present in `__call__`'s signature."
" You can set `params` using one of the common set of parameters defined in`pipeline_params.py`"
" e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to "
"image pipelines, including prompts and prompt embedding overrides."
"If your pipeline's set of arguments has minor changes from one of the common sets of arguments, "
"do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline "
"with non-configurable height and width arguments should set the attribute as "
"`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. "
"See existing pipeline tests for reference."
)
@property
def batch_params(self) -> frozenset:
raise NotImplementedError(
"You need to set the attribute `batch_params` in the child test class. "
"`batch_params` are the parameters required to be batched when passed to the pipeline's "
"`__call__` method. `pipeline_params.py` provides some common sets of parameters such as "
"`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's "
"set of batch arguments has minor changes from one of the common sets of batch arguments, "
"do not make modifications to the existing common sets of batch arguments. I.e. a text to "
"image pipeline `negative_prompt` is not batched should set the attribute as "
"`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. "
"See existing pipeline tests for reference."
)
def tearDown(self):
# clean up the VRAM after each test in case of CUDA runtime errors
super().tearDown()
......@@ -100,29 +133,51 @@ class PipelineTesterMixin:
max_diff = np.abs(output - output_loaded).max()
self.assertLess(max_diff, 1e-4)
def test_pipeline_call_implements_required_args(self):
assert hasattr(self.pipeline_class, "__call__"), f"{self.pipeline_class} should have a `__call__` method"
def test_pipeline_call_signature(self):
self.assertTrue(
hasattr(self.pipeline_class, "__call__"), f"{self.pipeline_class} should have a `__call__` method"
)
parameters = inspect.signature(self.pipeline_class.__call__).parameters
required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
required_parameters.pop("self")
required_parameters = set(required_parameters)
optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
for param in required_parameters:
if param == "kwargs":
# kwargs can be added if arguments of pipeline call function are deprecated
continue
assert param in self.allowed_required_args
optional_parameters = set()
for k, v in parameters.items():
if v.default != inspect._empty:
optional_parameters.add(k)
optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
parameters = set(parameters.keys())
parameters.remove("self")
parameters.discard("kwargs") # kwargs can be added if arguments of pipeline call function are deprecated
remaining_required_parameters = set()
for param in self.params:
if param not in parameters:
remaining_required_parameters.add(param)
self.assertTrue(
len(remaining_required_parameters) == 0,
f"Required parameters not present: {remaining_required_parameters}",
)
remaining_required_optional_parameters = set()
for param in self.required_optional_params:
assert param in optional_parameters
if param not in optional_parameters:
remaining_required_optional_parameters.add(param)
self.assertTrue(
len(remaining_required_optional_parameters) == 0,
f"Required optional parameters not present: {remaining_required_optional_parameters}",
)
def test_inference_batch_consistent(self):
self._test_inference_batch_consistent()
def _test_inference_batch_consistent(self, batch_sizes=[2, 4, 13]):
def _test_inference_batch_consistent(
self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"]
):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
......@@ -137,7 +192,7 @@ class PipelineTesterMixin:
for batch_size in batch_sizes:
batched_inputs = {}
for name, value in inputs.items():
if name in self.allowed_required_args:
if name in self.batch_params:
# prompt is string
if name == "prompt":
len_prompt = len(value)
......@@ -154,7 +209,7 @@ class PipelineTesterMixin:
else:
batched_inputs[name] = value
for arg in self.num_inference_steps_args:
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
batched_inputs["output_type"] = None
......@@ -181,7 +236,11 @@ class PipelineTesterMixin:
self._test_inference_batch_single_identical()
def _test_inference_batch_single_identical(
self, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False
self,
test_max_difference=None,
test_mean_pixel_difference=None,
relax_max_difference=False,
additional_params_copy_to_batched_inputs=["num_inference_steps"],
):
if test_max_difference is None:
# TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems
......@@ -206,7 +265,7 @@ class PipelineTesterMixin:
batched_inputs = {}
batch_size = 3
for name, value in inputs.items():
if name in self.allowed_required_args:
if name in self.batch_params:
# prompt is string
if name == "prompt":
len_prompt = len(value)
......@@ -225,7 +284,7 @@ class PipelineTesterMixin:
else:
batched_inputs[name] = value
for arg in self.num_inference_steps_args:
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
if self.pipeline_class.__name__ != "DanceDiffusionPipeline":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment