"src/vscode:/vscode.git/clone" did not exist on "826f43505d07cac92504bfc0637227d72f477f30"
Unverified Commit 5551506b authored by hlky's avatar hlky Committed by GitHub
Browse files

Rename Lumina(2)Text2ImgPipeline -> Lumina(2)Pipeline (#10827)



* Rename Lumina(2)Text2ImgPipeline -> Lumina(2)Pipeline


---------
Co-authored-by: default avatarYiYi Xu <yixu310@gmail.com>
parent 20e4b6a6
...@@ -58,10 +58,10 @@ Use [`torch.compile`](https://huggingface.co/docs/diffusers/main/en/tutorials/fa ...@@ -58,10 +58,10 @@ Use [`torch.compile`](https://huggingface.co/docs/diffusers/main/en/tutorials/fa
First, load the pipeline: First, load the pipeline:
```python ```python
from diffusers import LuminaText2ImgPipeline from diffusers import LuminaPipeline
import torch import torch
pipeline = LuminaText2ImgPipeline.from_pretrained( pipeline = LuminaPipeline.from_pretrained(
"Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16 "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16
).to("cuda") ).to("cuda")
``` ```
...@@ -86,11 +86,11 @@ image = pipeline(prompt="Upper body of a young woman in a Victorian-era outfit w ...@@ -86,11 +86,11 @@ image = pipeline(prompt="Upper body of a young woman in a Victorian-era outfit w
Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model. Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.
Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`LuminaText2ImgPipeline`] for inference with bitsandbytes. Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`LuminaPipeline`] for inference with bitsandbytes.
```py ```py
import torch import torch
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, Transformer2DModel, LuminaText2ImgPipeline from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, Transformer2DModel, LuminaPipeline
from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel
quant_config = BitsAndBytesConfig(load_in_8bit=True) quant_config = BitsAndBytesConfig(load_in_8bit=True)
...@@ -109,7 +109,7 @@ transformer_8bit = Transformer2DModel.from_pretrained( ...@@ -109,7 +109,7 @@ transformer_8bit = Transformer2DModel.from_pretrained(
torch_dtype=torch.float16, torch_dtype=torch.float16,
) )
pipeline = LuminaText2ImgPipeline.from_pretrained( pipeline = LuminaPipeline.from_pretrained(
"Alpha-VLLM/Lumina-Next-SFT-diffusers", "Alpha-VLLM/Lumina-Next-SFT-diffusers",
text_encoder=text_encoder_8bit, text_encoder=text_encoder_8bit,
transformer=transformer_8bit, transformer=transformer_8bit,
...@@ -122,9 +122,9 @@ image = pipeline(prompt).images[0] ...@@ -122,9 +122,9 @@ image = pipeline(prompt).images[0]
image.save("lumina.png") image.save("lumina.png")
``` ```
## LuminaText2ImgPipeline ## LuminaPipeline
[[autodoc]] LuminaText2ImgPipeline [[autodoc]] LuminaPipeline
- all - all
- __call__ - __call__
...@@ -36,14 +36,14 @@ Single file loading for Lumina Image 2.0 is available for the `Lumina2Transforme ...@@ -36,14 +36,14 @@ Single file loading for Lumina Image 2.0 is available for the `Lumina2Transforme
```python ```python
import torch import torch
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline from diffusers import Lumina2Transformer2DModel, Lumina2Pipeline
ckpt_path = "https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0/blob/main/consolidated.00-of-01.pth" ckpt_path = "https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0/blob/main/consolidated.00-of-01.pth"
transformer = Lumina2Transformer2DModel.from_single_file( transformer = Lumina2Transformer2DModel.from_single_file(
ckpt_path, torch_dtype=torch.bfloat16 ckpt_path, torch_dtype=torch.bfloat16
) )
pipe = Lumina2Text2ImgPipeline.from_pretrained( pipe = Lumina2Pipeline.from_pretrained(
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16 "Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
) )
pipe.enable_model_cpu_offload() pipe.enable_model_cpu_offload()
...@@ -60,7 +60,7 @@ image.save("lumina-single-file.png") ...@@ -60,7 +60,7 @@ image.save("lumina-single-file.png")
GGUF Quantized checkpoints for the `Lumina2Transformer2DModel` can be loaded via `from_single_file` with the `GGUFQuantizationConfig` GGUF Quantized checkpoints for the `Lumina2Transformer2DModel` can be loaded via `from_single_file` with the `GGUFQuantizationConfig`
```python ```python
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline, GGUFQuantizationConfig from diffusers import Lumina2Transformer2DModel, Lumina2Pipeline, GGUFQuantizationConfig
ckpt_path = "https://huggingface.co/calcuis/lumina-gguf/blob/main/lumina2-q4_0.gguf" ckpt_path = "https://huggingface.co/calcuis/lumina-gguf/blob/main/lumina2-q4_0.gguf"
transformer = Lumina2Transformer2DModel.from_single_file( transformer = Lumina2Transformer2DModel.from_single_file(
...@@ -69,7 +69,7 @@ transformer = Lumina2Transformer2DModel.from_single_file( ...@@ -69,7 +69,7 @@ transformer = Lumina2Transformer2DModel.from_single_file(
torch_dtype=torch.bfloat16, torch_dtype=torch.bfloat16,
) )
pipe = Lumina2Text2ImgPipeline.from_pretrained( pipe = Lumina2Pipeline.from_pretrained(
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16 "Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
) )
pipe.enable_model_cpu_offload() pipe.enable_model_cpu_offload()
...@@ -80,8 +80,8 @@ image = pipe( ...@@ -80,8 +80,8 @@ image = pipe(
image.save("lumina-gguf.png") image.save("lumina-gguf.png")
``` ```
## Lumina2Text2ImgPipeline ## Lumina2Pipeline
[[autodoc]] Lumina2Text2ImgPipeline [[autodoc]] Lumina2Pipeline
- all - all
- __call__ - __call__
...@@ -5,7 +5,7 @@ import torch ...@@ -5,7 +5,7 @@ import torch
from safetensors.torch import load_file from safetensors.torch import load_file
from transformers import AutoModel, AutoTokenizer from transformers import AutoModel, AutoTokenizer
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaPipeline
def main(args): def main(args):
...@@ -115,7 +115,7 @@ def main(args): ...@@ -115,7 +115,7 @@ def main(args):
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
text_encoder = AutoModel.from_pretrained("google/gemma-2b") text_encoder = AutoModel.from_pretrained("google/gemma-2b")
pipeline = LuminaText2ImgPipeline( pipeline = LuminaPipeline(
tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler
) )
pipeline.save_pretrained(args.dump_path) pipeline.save_pretrained(args.dump_path)
......
...@@ -403,7 +403,9 @@ else: ...@@ -403,7 +403,9 @@ else:
"LEditsPPPipelineStableDiffusionXL", "LEditsPPPipelineStableDiffusionXL",
"LTXImageToVideoPipeline", "LTXImageToVideoPipeline",
"LTXPipeline", "LTXPipeline",
"Lumina2Pipeline",
"Lumina2Text2ImgPipeline", "Lumina2Text2ImgPipeline",
"LuminaPipeline",
"LuminaText2ImgPipeline", "LuminaText2ImgPipeline",
"MarigoldDepthPipeline", "MarigoldDepthPipeline",
"MarigoldIntrinsicsPipeline", "MarigoldIntrinsicsPipeline",
...@@ -945,7 +947,9 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -945,7 +947,9 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
LEditsPPPipelineStableDiffusionXL, LEditsPPPipelineStableDiffusionXL,
LTXImageToVideoPipeline, LTXImageToVideoPipeline,
LTXPipeline, LTXPipeline,
Lumina2Pipeline,
Lumina2Text2ImgPipeline, Lumina2Text2ImgPipeline,
LuminaPipeline,
LuminaText2ImgPipeline, LuminaText2ImgPipeline,
MarigoldDepthPipeline, MarigoldDepthPipeline,
MarigoldIntrinsicsPipeline, MarigoldIntrinsicsPipeline,
......
...@@ -265,8 +265,8 @@ else: ...@@ -265,8 +265,8 @@ else:
) )
_import_structure["latte"] = ["LattePipeline"] _import_structure["latte"] = ["LattePipeline"]
_import_structure["ltx"] = ["LTXPipeline", "LTXImageToVideoPipeline"] _import_structure["ltx"] = ["LTXPipeline", "LTXImageToVideoPipeline"]
_import_structure["lumina"] = ["LuminaText2ImgPipeline"] _import_structure["lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"]
_import_structure["lumina2"] = ["Lumina2Text2ImgPipeline"] _import_structure["lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"]
_import_structure["marigold"].extend( _import_structure["marigold"].extend(
[ [
"MarigoldDepthPipeline", "MarigoldDepthPipeline",
...@@ -619,8 +619,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -619,8 +619,8 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
LEditsPPPipelineStableDiffusionXL, LEditsPPPipelineStableDiffusionXL,
) )
from .ltx import LTXImageToVideoPipeline, LTXPipeline from .ltx import LTXImageToVideoPipeline, LTXPipeline
from .lumina import LuminaText2ImgPipeline from .lumina import LuminaPipeline, LuminaText2ImgPipeline
from .lumina2 import Lumina2Text2ImgPipeline from .lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline
from .marigold import ( from .marigold import (
MarigoldDepthPipeline, MarigoldDepthPipeline,
MarigoldIntrinsicsPipeline, MarigoldIntrinsicsPipeline,
......
...@@ -69,8 +69,8 @@ from .kandinsky2_2 import ( ...@@ -69,8 +69,8 @@ from .kandinsky2_2 import (
) )
from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline
from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline
from .lumina import LuminaText2ImgPipeline from .lumina import LuminaPipeline
from .lumina2 import Lumina2Text2ImgPipeline from .lumina2 import Lumina2Pipeline
from .pag import ( from .pag import (
HunyuanDiTPAGPipeline, HunyuanDiTPAGPipeline,
PixArtSigmaPAGPipeline, PixArtSigmaPAGPipeline,
...@@ -141,8 +141,8 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict( ...@@ -141,8 +141,8 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict(
("flux", FluxPipeline), ("flux", FluxPipeline),
("flux-control", FluxControlPipeline), ("flux-control", FluxControlPipeline),
("flux-controlnet", FluxControlNetPipeline), ("flux-controlnet", FluxControlNetPipeline),
("lumina", LuminaText2ImgPipeline), ("lumina", LuminaPipeline),
("lumina2", Lumina2Text2ImgPipeline), ("lumina2", Lumina2Pipeline),
("cogview3", CogView3PlusPipeline), ("cogview3", CogView3PlusPipeline),
("cogview4", CogView4Pipeline), ("cogview4", CogView4Pipeline),
] ]
......
...@@ -22,7 +22,7 @@ except OptionalDependencyNotAvailable: ...@@ -22,7 +22,7 @@ except OptionalDependencyNotAvailable:
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else: else:
_import_structure["pipeline_lumina"] = ["LuminaText2ImgPipeline"] _import_structure["pipeline_lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try: try:
...@@ -32,7 +32,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -32,7 +32,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * from ...utils.dummy_torch_and_transformers_objects import *
else: else:
from .pipeline_lumina import LuminaText2ImgPipeline from .pipeline_lumina import LuminaPipeline, LuminaText2ImgPipeline
else: else:
import sys import sys
......
...@@ -30,6 +30,7 @@ from ...models.transformers.lumina_nextdit2d import LuminaNextDiT2DModel ...@@ -30,6 +30,7 @@ from ...models.transformers.lumina_nextdit2d import LuminaNextDiT2DModel
from ...schedulers import FlowMatchEulerDiscreteScheduler from ...schedulers import FlowMatchEulerDiscreteScheduler
from ...utils import ( from ...utils import (
BACKENDS_MAPPING, BACKENDS_MAPPING,
deprecate,
is_bs4_available, is_bs4_available,
is_ftfy_available, is_ftfy_available,
is_torch_xla_available, is_torch_xla_available,
...@@ -60,11 +61,9 @@ EXAMPLE_DOC_STRING = """ ...@@ -60,11 +61,9 @@ EXAMPLE_DOC_STRING = """
Examples: Examples:
```py ```py
>>> import torch >>> import torch
>>> from diffusers import LuminaText2ImgPipeline >>> from diffusers import LuminaPipeline
>>> pipe = LuminaText2ImgPipeline.from_pretrained( >>> pipe = LuminaPipeline.from_pretrained("Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16)
... "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16
... )
>>> # Enable memory optimizations. >>> # Enable memory optimizations.
>>> pipe.enable_model_cpu_offload() >>> pipe.enable_model_cpu_offload()
...@@ -134,7 +133,7 @@ def retrieve_timesteps( ...@@ -134,7 +133,7 @@ def retrieve_timesteps(
return timesteps, num_inference_steps return timesteps, num_inference_steps
class LuminaText2ImgPipeline(DiffusionPipeline): class LuminaPipeline(DiffusionPipeline):
r""" r"""
Pipeline for text-to-image generation using Lumina-T2I. Pipeline for text-to-image generation using Lumina-T2I.
...@@ -932,3 +931,23 @@ class LuminaText2ImgPipeline(DiffusionPipeline): ...@@ -932,3 +931,23 @@ class LuminaText2ImgPipeline(DiffusionPipeline):
return (image,) return (image,)
return ImagePipelineOutput(images=image) return ImagePipelineOutput(images=image)
class LuminaText2ImgPipeline(LuminaPipeline):
def __init__(
self,
transformer: LuminaNextDiT2DModel,
scheduler: FlowMatchEulerDiscreteScheduler,
vae: AutoencoderKL,
text_encoder: GemmaPreTrainedModel,
tokenizer: Union[GemmaTokenizer, GemmaTokenizerFast],
):
deprecation_message = "`LuminaText2ImgPipeline` has been renamed to `LuminaPipeline` and will be removed in a future version. Please use `LuminaPipeline` instead."
deprecate("diffusers.pipelines.lumina.pipeline_lumina.LuminaText2ImgPipeline", "0.34", deprecation_message)
super().__init__(
transformer=transformer,
scheduler=scheduler,
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
)
...@@ -22,7 +22,7 @@ except OptionalDependencyNotAvailable: ...@@ -22,7 +22,7 @@ except OptionalDependencyNotAvailable:
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else: else:
_import_structure["pipeline_lumina2"] = ["Lumina2Text2ImgPipeline"] _import_structure["pipeline_lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try: try:
...@@ -32,7 +32,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -32,7 +32,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * from ...utils.dummy_torch_and_transformers_objects import *
else: else:
from .pipeline_lumina2 import Lumina2Text2ImgPipeline from .pipeline_lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline
else: else:
import sys import sys
......
...@@ -25,6 +25,7 @@ from ...models import AutoencoderKL ...@@ -25,6 +25,7 @@ from ...models import AutoencoderKL
from ...models.transformers.transformer_lumina2 import Lumina2Transformer2DModel from ...models.transformers.transformer_lumina2 import Lumina2Transformer2DModel
from ...schedulers import FlowMatchEulerDiscreteScheduler from ...schedulers import FlowMatchEulerDiscreteScheduler
from ...utils import ( from ...utils import (
deprecate,
is_torch_xla_available, is_torch_xla_available,
logging, logging,
replace_example_docstring, replace_example_docstring,
...@@ -47,9 +48,9 @@ EXAMPLE_DOC_STRING = """ ...@@ -47,9 +48,9 @@ EXAMPLE_DOC_STRING = """
Examples: Examples:
```py ```py
>>> import torch >>> import torch
>>> from diffusers import Lumina2Text2ImgPipeline >>> from diffusers import Lumina2Pipeline
>>> pipe = Lumina2Text2ImgPipeline.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", torch_dtype=torch.bfloat16) >>> pipe = Lumina2Pipeline.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", torch_dtype=torch.bfloat16)
>>> # Enable memory optimizations. >>> # Enable memory optimizations.
>>> pipe.enable_model_cpu_offload() >>> pipe.enable_model_cpu_offload()
...@@ -133,7 +134,7 @@ def retrieve_timesteps( ...@@ -133,7 +134,7 @@ def retrieve_timesteps(
return timesteps, num_inference_steps return timesteps, num_inference_steps
class Lumina2Text2ImgPipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): class Lumina2Pipeline(DiffusionPipeline, Lumina2LoraLoaderMixin):
r""" r"""
Pipeline for text-to-image generation using Lumina-T2I. Pipeline for text-to-image generation using Lumina-T2I.
...@@ -767,3 +768,23 @@ class Lumina2Text2ImgPipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): ...@@ -767,3 +768,23 @@ class Lumina2Text2ImgPipeline(DiffusionPipeline, Lumina2LoraLoaderMixin):
return (image,) return (image,)
return ImagePipelineOutput(images=image) return ImagePipelineOutput(images=image)
class Lumina2Text2ImgPipeline(Lumina2Pipeline):
def __init__(
self,
transformer: Lumina2Transformer2DModel,
scheduler: FlowMatchEulerDiscreteScheduler,
vae: AutoencoderKL,
text_encoder: Gemma2PreTrainedModel,
tokenizer: Union[GemmaTokenizer, GemmaTokenizerFast],
):
deprecation_message = "`Lumina2Text2ImgPipeline` has been renamed to `Lumina2Pipeline` and will be removed in a future version. Please use `Lumina2Pipeline` instead."
deprecate("diffusers.pipelines.lumina2.pipeline_lumina2.Lumina2Text2ImgPipeline", "0.34", deprecation_message)
super().__init__(
transformer=transformer,
scheduler=scheduler,
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
)
...@@ -1232,6 +1232,21 @@ class LTXPipeline(metaclass=DummyObject): ...@@ -1232,6 +1232,21 @@ class LTXPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"]) requires_backends(cls, ["torch", "transformers"])
class Lumina2Pipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class Lumina2Text2ImgPipeline(metaclass=DummyObject): class Lumina2Text2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"] _backends = ["torch", "transformers"]
...@@ -1247,6 +1262,21 @@ class Lumina2Text2ImgPipeline(metaclass=DummyObject): ...@@ -1247,6 +1262,21 @@ class Lumina2Text2ImgPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"]) requires_backends(cls, ["torch", "transformers"])
class LuminaPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LuminaText2ImgPipeline(metaclass=DummyObject): class LuminaText2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"] _backends = ["torch", "transformers"]
......
...@@ -5,7 +5,13 @@ import numpy as np ...@@ -5,7 +5,13 @@ import numpy as np
import torch import torch
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
LuminaNextDiT2DModel,
LuminaPipeline,
LuminaText2ImgPipeline,
)
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
backend_empty_cache, backend_empty_cache,
numpy_cosine_similarity_distance, numpy_cosine_similarity_distance,
...@@ -17,8 +23,8 @@ from diffusers.utils.testing_utils import ( ...@@ -17,8 +23,8 @@ from diffusers.utils.testing_utils import (
from ..test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin): class LuminaPipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = LuminaText2ImgPipeline pipeline_class = LuminaPipeline
params = frozenset( params = frozenset(
[ [
"prompt", "prompt",
...@@ -99,11 +105,17 @@ class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterM ...@@ -99,11 +105,17 @@ class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterM
def test_xformers_attention_forwardGenerator_pass(self): def test_xformers_attention_forwardGenerator_pass(self):
pass pass
def test_deprecation_raises_warning(self):
with self.assertWarns(FutureWarning) as warning:
_ = LuminaText2ImgPipeline(**self.get_dummy_components()).to(torch_device)
warning_message = str(warning.warnings[0].message)
assert "renamed to `LuminaPipeline`" in warning_message
@slow @slow
@require_torch_accelerator @require_torch_accelerator
class LuminaText2ImgPipelineSlowTests(unittest.TestCase): class LuminaPipelineSlowTests(unittest.TestCase):
pipeline_class = LuminaText2ImgPipeline pipeline_class = LuminaPipeline
repo_id = "Alpha-VLLM/Lumina-Next-SFT-diffusers" repo_id = "Alpha-VLLM/Lumina-Next-SFT-diffusers"
def setUp(self): def setUp(self):
......
...@@ -6,15 +6,17 @@ from transformers import AutoTokenizer, Gemma2Config, Gemma2Model ...@@ -6,15 +6,17 @@ from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import ( from diffusers import (
AutoencoderKL, AutoencoderKL,
FlowMatchEulerDiscreteScheduler, FlowMatchEulerDiscreteScheduler,
Lumina2Pipeline,
Lumina2Text2ImgPipeline, Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel, Lumina2Transformer2DModel,
) )
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin): class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline pipeline_class = Lumina2Pipeline
params = frozenset( params = frozenset(
[ [
"prompt", "prompt",
...@@ -115,3 +117,9 @@ class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTester ...@@ -115,3 +117,9 @@ class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTester
"output_type": "np", "output_type": "np",
} }
return inputs return inputs
def test_deprecation_raises_warning(self):
with self.assertWarns(FutureWarning) as warning:
_ = Lumina2Text2ImgPipeline(**self.get_dummy_components()).to(torch_device)
warning_message = str(warning.warnings[0].message)
assert "renamed to `Lumina2Pipeline`" in warning_message
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment