Unverified Commit 7aa6af11 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

[Refactor] Move testing utils out of src (#12238)

* update

* update

* update

* update

* update

* merge main

* Revert "merge main"

This reverts commit 65efbcead58644b31596ed2d714f7cee0e0238d3.
parent 87b800e1
...@@ -18,13 +18,13 @@ import unittest ...@@ -18,13 +18,13 @@ import unittest
import torch import torch
from diffusers import VQModel from diffusers import VQModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
backend_manual_seed, backend_manual_seed,
enable_full_determinism, enable_full_determinism,
floats_tensor, floats_tensor,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
......
...@@ -7,7 +7,8 @@ import torch ...@@ -7,7 +7,8 @@ import torch
from diffusers import DiffusionPipeline from diffusers import DiffusionPipeline
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
from diffusers.utils.testing_utils import torch_device
from ..testing_utils import torch_device
class AttnAddedKVProcessorTests(unittest.TestCase): class AttnAddedKVProcessorTests(unittest.TestCase):
......
...@@ -24,7 +24,8 @@ from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU ...@@ -24,7 +24,8 @@ from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU
from diffusers.models.embeddings import get_timestep_embedding from diffusers.models.embeddings import get_timestep_embedding
from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
from diffusers.models.transformers.transformer_2d import Transformer2DModel from diffusers.models.transformers.transformer_2d import Transformer2DModel
from diffusers.utils.testing_utils import (
from ..testing_utils import (
backend_manual_seed, backend_manual_seed,
require_torch_accelerator_with_fp64, require_torch_accelerator_with_fp64,
require_torch_version_greater_equal, require_torch_version_greater_equal,
......
...@@ -59,7 +59,10 @@ from diffusers.utils import ( ...@@ -59,7 +59,10 @@ from diffusers.utils import (
logging, logging,
) )
from diffusers.utils.hub_utils import _add_variant from diffusers.utils.hub_utils import _add_variant
from diffusers.utils.testing_utils import ( from diffusers.utils.torch_utils import get_torch_cuda_device_capability
from ..others.test_utils import TOKEN, USER, is_staging_test
from ..testing_utils import (
CaptureLogger, CaptureLogger,
_check_safetensors_serialization, _check_safetensors_serialization,
backend_empty_cache, backend_empty_cache,
...@@ -82,9 +85,6 @@ from diffusers.utils.testing_utils import ( ...@@ -82,9 +85,6 @@ from diffusers.utils.testing_utils import (
torch_all_close, torch_all_close,
torch_device, torch_device,
) )
from diffusers.utils.torch_utils import get_torch_cuda_device_capability
from ..others.test_utils import TOKEN, USER, is_staging_test
if is_peft_available(): if is_peft_available():
......
...@@ -18,13 +18,13 @@ import unittest ...@@ -18,13 +18,13 @@ import unittest
import torch import torch
from diffusers import DiTTransformer2DModel, Transformer2DModel from diffusers import DiTTransformer2DModel, Transformer2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
floats_tensor, floats_tensor,
slow, slow,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -18,13 +18,13 @@ import unittest ...@@ -18,13 +18,13 @@ import unittest
import torch import torch
from diffusers import PixArtTransformer2DModel, Transformer2DModel from diffusers import PixArtTransformer2DModel, Transformer2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
floats_tensor, floats_tensor,
slow, slow,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -21,7 +21,8 @@ import torch ...@@ -21,7 +21,8 @@ import torch
from parameterized import parameterized from parameterized import parameterized
from diffusers import PriorTransformer from diffusers import PriorTransformer
from diffusers.utils.testing_utils import (
from ...testing_utils import (
backend_empty_cache, backend_empty_cache,
enable_full_determinism, enable_full_determinism,
floats_tensor, floats_tensor,
...@@ -29,7 +30,6 @@ from diffusers.utils.testing_utils import ( ...@@ -29,7 +30,6 @@ from diffusers.utils.testing_utils import (
torch_all_close, torch_all_close,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -17,11 +17,11 @@ import unittest ...@@ -17,11 +17,11 @@ import unittest
import torch import torch
from diffusers import AllegroTransformer3DModel from diffusers import AllegroTransformer3DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -18,8 +18,8 @@ import unittest ...@@ -18,8 +18,8 @@ import unittest
import torch import torch
from diffusers import AuraFlowTransformer2DModel from diffusers import AuraFlowTransformer2DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -20,8 +20,8 @@ import torch ...@@ -20,8 +20,8 @@ import torch
from diffusers import BriaTransformer2DModel from diffusers import BriaTransformer2DModel
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
from diffusers.models.embeddings import ImageProjection from diffusers.models.embeddings import ImageProjection
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
......
...@@ -20,8 +20,8 @@ import torch ...@@ -20,8 +20,8 @@ import torch
from diffusers import ChromaTransformer2DModel from diffusers import ChromaTransformer2DModel
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
from diffusers.models.embeddings import ImageProjection from diffusers.models.embeddings import ImageProjection
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
......
...@@ -18,11 +18,11 @@ import unittest ...@@ -18,11 +18,11 @@ import unittest
import torch import torch
from diffusers import CogVideoXTransformer3DModel from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -18,11 +18,11 @@ import unittest ...@@ -18,11 +18,11 @@ import unittest
import torch import torch
from diffusers import CogView3PlusTransformer2DModel from diffusers import CogView3PlusTransformer2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -17,8 +17,8 @@ import unittest ...@@ -17,8 +17,8 @@ import unittest
import torch import torch
from diffusers import CogView4Transformer2DModel from diffusers import CogView4Transformer2DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -18,11 +18,11 @@ import unittest ...@@ -18,11 +18,11 @@ import unittest
import torch import torch
from diffusers import ConsisIDTransformer3DModel from diffusers import ConsisIDTransformer3DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -17,8 +17,8 @@ import unittest ...@@ -17,8 +17,8 @@ import unittest
import torch import torch
from diffusers import CosmosTransformer3DModel from diffusers import CosmosTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -18,8 +18,8 @@ import unittest ...@@ -18,8 +18,8 @@ import unittest
import torch import torch
from diffusers import EasyAnimateTransformer3DModel from diffusers import EasyAnimateTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ...testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -20,8 +20,8 @@ import torch ...@@ -20,8 +20,8 @@ import torch
from diffusers import FluxTransformer2DModel from diffusers import FluxTransformer2DModel
from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
from diffusers.models.embeddings import ImageProjection from diffusers.models.embeddings import ImageProjection
from diffusers.utils.testing_utils import enable_full_determinism, is_peft_available, torch_device
from ...testing_utils import enable_full_determinism, is_peft_available, torch_device
from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin
......
...@@ -18,11 +18,11 @@ import unittest ...@@ -18,11 +18,11 @@ import unittest
import torch import torch
from diffusers import HiDreamImageTransformer2DModel from diffusers import HiDreamImageTransformer2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
...@@ -18,11 +18,11 @@ import unittest ...@@ -18,11 +18,11 @@ import unittest
import torch import torch
from diffusers import HunyuanDiT2DModel from diffusers import HunyuanDiT2DModel
from diffusers.utils.testing_utils import (
from ...testing_utils import (
enable_full_determinism, enable_full_determinism,
torch_device, torch_device,
) )
from ..test_modeling_common import ModelTesterMixin from ..test_modeling_common import ModelTesterMixin
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment