Unverified Commit 7aa6af11 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

[Refactor] Move testing utils out of src (#12238)

* update

* update

* update

* update

* update

* merge main

* Revert "merge main"

This reverts commit 65efbcead58644b31596ed2d714f7cee0e0238d3.
parent 87b800e1
......@@ -11,10 +11,10 @@ from diffusers import (
FluxControlInpaintPipeline,
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import (
from ...testing_utils import (
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist
......
......@@ -6,12 +6,12 @@ import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxFillPipeline, FluxTransformer2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin
......
......@@ -6,12 +6,12 @@ import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxImg2ImgPipeline, FluxTransformer2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin
......
......@@ -6,12 +6,12 @@ import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxInpaintPipeline, FluxTransformer2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin
......
......@@ -12,8 +12,8 @@ from diffusers import (
FluxKontextPipeline,
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from ...testing_utils import torch_device
from ..test_pipelines_common import (
FasterCacheTesterMixin,
FluxIPAdapterTesterMixin,
......
......@@ -12,8 +12,8 @@ from diffusers import (
FluxKontextInpaintPipeline,
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import floats_tensor, torch_device
from ...testing_utils import floats_tensor, torch_device
from ..test_pipelines_common import (
FasterCacheTesterMixin,
FluxIPAdapterTesterMixin,
......
......@@ -6,7 +6,8 @@ import torch
from diffusers import FluxPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
from ...testing_utils import (
Expectations,
backend_empty_cache,
numpy_cosine_similarity_distance,
......
......@@ -32,8 +32,8 @@ from diffusers import (
HiDreamImagePipeline,
HiDreamImageTransformer2DModel,
)
from diffusers.utils.testing_utils import enable_full_determinism
from ...testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
......
......@@ -36,8 +36,8 @@ from diffusers import (
HunyuanVideoImageToVideoPipeline,
HunyuanVideoTransformer3DModel,
)
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np
......
......@@ -26,8 +26,8 @@ from diffusers import (
HunyuanSkyreelsImageToVideoPipeline,
HunyuanVideoTransformer3DModel,
)
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np
......
......@@ -26,8 +26,8 @@ from diffusers import (
HunyuanVideoPipeline,
HunyuanVideoTransformer3DModel,
)
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_pipelines_common import (
FasterCacheTesterMixin,
FirstBlockCacheTesterMixin,
......
......@@ -36,11 +36,11 @@ from diffusers import (
HunyuanVideoFramepackPipeline,
HunyuanVideoFramepackTransformer3DModel,
)
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_pipelines_common import (
FasterCacheTesterMixin,
PipelineTesterMixin,
......
......@@ -22,7 +22,8 @@ import torch
from transformers import AutoTokenizer, BertModel, T5EncoderModel
from diffusers import AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel, HunyuanDiTPipeline
from diffusers.utils.testing_utils import (
from ...testing_utils import (
backend_empty_cache,
enable_full_determinism,
numpy_cosine_similarity_distance,
......@@ -30,7 +31,6 @@ from diffusers.utils.testing_utils import (
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineTesterMixin,
......
......@@ -33,7 +33,8 @@ from diffusers import (
)
from diffusers.image_processor import IPAdapterMaskProcessor
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
from ...testing_utils import (
Expectations,
backend_empty_cache,
enable_full_determinism,
......
......@@ -23,7 +23,8 @@ from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils.testing_utils import (
from ...testing_utils import (
backend_empty_cache,
enable_full_determinism,
floats_tensor,
......@@ -32,7 +33,6 @@ from diffusers.utils.testing_utils import (
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
......
......@@ -18,8 +18,8 @@ import unittest
import numpy as np
from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, torch_device
from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device
from ..test_pipelines_common import PipelineTesterMixin
from .test_kandinsky import Dummies
from .test_kandinsky_img2img import Dummies as Img2ImgDummies
......
......@@ -31,7 +31,8 @@ from diffusers import (
VQModel,
)
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils.testing_utils import (
from ...testing_utils import (
backend_empty_cache,
enable_full_determinism,
floats_tensor,
......@@ -42,7 +43,6 @@ from diffusers.utils.testing_utils import (
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
......
......@@ -24,7 +24,8 @@ from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils.testing_utils import (
from ...testing_utils import (
backend_empty_cache,
enable_full_determinism,
floats_tensor,
......@@ -34,7 +35,6 @@ from diffusers.utils.testing_utils import (
require_torch_accelerator,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
......
......@@ -28,8 +28,8 @@ from transformers import (
)
from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device
from ...testing_utils import enable_full_determinism, skip_mps, torch_device
from ..test_pipelines_common import PipelineTesterMixin
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment