Unverified Commit 843355f8 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[tests] xfail some kandinsky tests. (#12364)

xfail some kandinsky tests.
parent c006a95d
...@@ -27,8 +27,8 @@ else: ...@@ -27,8 +27,8 @@ else:
_import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"] _import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"]
_import_structure["pipeline_qwenimage_controlnet_inpaint"] = ["QwenImageControlNetInpaintPipeline"] _import_structure["pipeline_qwenimage_controlnet_inpaint"] = ["QwenImageControlNetInpaintPipeline"]
_import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"] _import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"]
_import_structure["pipeline_qwenimage_edit_plus"] = ["QwenImageEditPlusPipeline"]
_import_structure["pipeline_qwenimage_edit_inpaint"] = ["QwenImageEditInpaintPipeline"] _import_structure["pipeline_qwenimage_edit_inpaint"] = ["QwenImageEditInpaintPipeline"]
_import_structure["pipeline_qwenimage_edit_plus"] = ["QwenImageEditPlusPipeline"]
_import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"] _import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"]
_import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"] _import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"]
......
...@@ -18,11 +18,13 @@ import random ...@@ -18,11 +18,13 @@ import random
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from transformers import XLMRobertaTokenizerFast from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import is_transformers_version
from ...testing_utils import ( from ...testing_utils import (
backend_empty_cache, backend_empty_cache,
...@@ -215,6 +217,9 @@ class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -215,6 +217,9 @@ class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
dummy = Dummies() dummy = Dummies()
return dummy.get_dummy_inputs(device=device, seed=seed) return dummy.get_dummy_inputs(device=device, seed=seed)
@pytest.mark.xfail(
condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True
)
def test_kandinsky(self): def test_kandinsky(self):
device = "cpu" device = "cpu"
......
...@@ -16,8 +16,10 @@ ...@@ -16,8 +16,10 @@
import unittest import unittest
import numpy as np import numpy as np
import pytest
from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline
from diffusers.utils import is_transformers_version
from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device
from ..test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
...@@ -73,6 +75,9 @@ class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase) ...@@ -73,6 +75,9 @@ class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase)
) )
return inputs return inputs
@pytest.mark.xfail(
condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True
)
def test_kandinsky(self): def test_kandinsky(self):
device = "cpu" device = "cpu"
...@@ -181,6 +186,9 @@ class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.Te ...@@ -181,6 +186,9 @@ class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.Te
inputs.pop("negative_image_embeds") inputs.pop("negative_image_embeds")
return inputs return inputs
@pytest.mark.xfail(
condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True
)
def test_kandinsky(self): def test_kandinsky(self):
device = "cpu" device = "cpu"
...@@ -292,6 +300,9 @@ class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.Te ...@@ -292,6 +300,9 @@ class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.Te
inputs.pop("negative_image_embeds") inputs.pop("negative_image_embeds")
return inputs return inputs
@pytest.mark.xfail(
condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True
)
def test_kandinsky(self): def test_kandinsky(self):
device = "cpu" device = "cpu"
......
...@@ -18,6 +18,7 @@ import random ...@@ -18,6 +18,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from PIL import Image from PIL import Image
from transformers import XLMRobertaTokenizerFast from transformers import XLMRobertaTokenizerFast
...@@ -31,6 +32,7 @@ from diffusers import ( ...@@ -31,6 +32,7 @@ from diffusers import (
VQModel, VQModel,
) )
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import is_transformers_version
from ...testing_utils import ( from ...testing_utils import (
backend_empty_cache, backend_empty_cache,
...@@ -237,6 +239,9 @@ class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -237,6 +239,9 @@ class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
dummies = Dummies() dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed) return dummies.get_dummy_inputs(device=device, seed=seed)
@pytest.mark.xfail(
condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True
)
def test_kandinsky_img2img(self): def test_kandinsky_img2img(self):
device = "cpu" device = "cpu"
......
...@@ -18,12 +18,14 @@ import random ...@@ -18,12 +18,14 @@ import random
import unittest import unittest
import numpy as np import numpy as np
import pytest
import torch import torch
from PIL import Image from PIL import Image
from transformers import XLMRobertaTokenizerFast from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import is_transformers_version
from ...testing_utils import ( from ...testing_utils import (
backend_empty_cache, backend_empty_cache,
...@@ -231,6 +233,9 @@ class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -231,6 +233,9 @@ class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
dummies = Dummies() dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed) return dummies.get_dummy_inputs(device=device, seed=seed)
@pytest.mark.xfail(
condition=is_transformers_version(">=", "4.56.2"), reason="Latest transformers changes the slices", strict=True
)
def test_kandinsky_inpaint(self): def test_kandinsky_inpaint(self):
device = "cpu" device = "cpu"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment