test_kandinsky5.py 6.42 KB
Newer Older
1
# Copyright 2025 The Kandinsky Team and The HuggingFace Team.
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import torch
from transformers import (
19
    AutoProcessor,
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
    CLIPTextConfig,
    CLIPTextModel,
    CLIPTokenizer,
    Qwen2_5_VLConfig,
    Qwen2_5_VLForConditionalGeneration,
)

from diffusers import (
    AutoencoderKLHunyuanVideo,
    FlowMatchEulerDiscreteScheduler,
    Kandinsky5T2VPipeline,
    Kandinsky5Transformer3DModel,
)

from ...testing_utils import (
    enable_full_determinism,
)
from ..test_pipelines_common import PipelineTesterMixin


enable_full_determinism()


class Kandinsky5T2VPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
    pipeline_class = Kandinsky5T2VPipeline

46
    batch_params = ["prompt", "negative_prompt"]
47

48
49
50
51
52
53
54
55
56
57
58
    params = frozenset(["prompt", "height", "width", "num_frames", "num_inference_steps", "guidance_scale"])

    required_optional_params = {
        "num_inference_steps",
        "generator",
        "latents",
        "return_dict",
        "callback_on_step_end",
        "callback_on_step_end_tensor_inputs",
        "max_sequence_length",
    }
59
    test_xformers_attention = False
60
    supports_optional_components = True
61
    supports_dduf = False
62
    test_attention_slicing = False
63
64
65
66

    def get_dummy_components(self):
        torch.manual_seed(0)
        vae = AutoencoderKLHunyuanVideo(
67
68
69
70
71
72
            act_fn="silu",
            block_out_channels=[32, 64],
            down_block_types=[
                "HunyuanVideoDownBlock3D",
                "HunyuanVideoDownBlock3D",
            ],
73
            in_channels=3,
74
75
76
77
            latent_channels=16,
            layers_per_block=1,
            mid_block_add_attention=False,
            norm_num_groups=32,
78
            out_channels=3,
79
            scaling_factor=0.476986,
80
81
            spatial_compression_ratio=8,
            temporal_compression_ratio=4,
82
83
84
85
            up_block_types=[
                "HunyuanVideoUpBlock3D",
                "HunyuanVideoUpBlock3D",
            ],
86
87
88
89
        )

        scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0)

90
91
92
        qwen_hidden_size = 32
        torch.manual_seed(0)
        qwen_config = Qwen2_5_VLConfig(
93
            text_config={
94
95
                "hidden_size": qwen_hidden_size,
                "intermediate_size": qwen_hidden_size,
96
97
98
99
                "num_hidden_layers": 2,
                "num_attention_heads": 2,
                "num_key_value_heads": 2,
                "rope_scaling": {
100
                    "mrope_section": [2, 2, 4],
101
102
103
104
105
106
107
                    "rope_type": "default",
                    "type": "default",
                },
                "rope_theta": 1000000.0,
            },
            vision_config={
                "depth": 2,
108
109
                "hidden_size": qwen_hidden_size,
                "intermediate_size": qwen_hidden_size,
110
                "num_heads": 2,
111
                "out_hidden_size": qwen_hidden_size,
112
            },
113
            hidden_size=qwen_hidden_size,
114
115
116
117
118
            vocab_size=152064,
            vision_end_token_id=151653,
            vision_start_token_id=151652,
            vision_token_id=151654,
        )
119
120
        text_encoder = Qwen2_5_VLForConditionalGeneration(qwen_config)
        tokenizer = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration")
121

122
123
124
        clip_hidden_size = 16
        torch.manual_seed(0)
        clip_config = CLIPTextConfig(
125
126
            bos_token_id=0,
            eos_token_id=2,
127
128
            hidden_size=clip_hidden_size,
            intermediate_size=16,
129
            layer_norm_eps=1e-05,
130
131
            num_attention_heads=2,
            num_hidden_layers=2,
132
133
            pad_token_id=1,
            vocab_size=1000,
134
            projection_dim=clip_hidden_size,
135
        )
136
        text_encoder_2 = CLIPTextModel(clip_config)
137
138
139
140
        tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        torch.manual_seed(0)
        transformer = Kandinsky5Transformer3DModel(
141
142
143
144
145
            in_visual_dim=16,
            in_text_dim=qwen_hidden_size,
            in_text_dim2=clip_hidden_size,
            time_dim=16,
            out_visual_dim=16,
146
            patch_size=(1, 2, 2),
147
148
            model_dim=16,
            ff_dim=32,
149
            num_text_blocks=1,
150
151
            num_visual_blocks=2,
            axes_dims=(1, 1, 2),
152
            visual_cond=False,
153
            attention_type="regular",
154
155
        )

156
157
158
        return {
            "vae": vae,
            "text_encoder": text_encoder,
159
            "tokenizer": tokenizer,
160
            "text_encoder_2": text_encoder_2,
161
            "tokenizer_2": tokenizer_2,
162
163
            "transformer": transformer,
            "scheduler": scheduler,
164
165
166
167
168
169
170
        }

    def get_dummy_inputs(self, device, seed=0):
        if str(device).startswith("mps"):
            generator = torch.manual_seed(seed)
        else:
            generator = torch.Generator(device=device).manual_seed(seed)
171
172
173

        return {
            "prompt": "a red square",
174
175
176
            "height": 32,
            "width": 32,
            "num_frames": 5,
177
178
179
            "num_inference_steps": 2,
            "guidance_scale": 4.0,
            "generator": generator,
180
            "output_type": "pt",
181
            "max_sequence_length": 8,
182
183
184
185
186
187
188
189
190
191
        }

    def test_inference(self):
        device = "cpu"
        components = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe.to(device)
        pipe.set_progress_bar_config(disable=None)

        inputs = self.get_dummy_inputs(device)
192
193
        output = pipe(**inputs)
        video = output.frames[0]
194

195
        self.assertEqual(video.shape, (3, 3, 16, 16))
196
197
198
199

    def test_attention_slicing_forward_pass(self):
        pass

200
201
    @unittest.skip("Only SDPA or NABLA (flex)")
    def test_xformers_memory_efficient_attention(self):
202
203
        pass

204
205
206
207
208
209
    @unittest.skip("TODO:Test does not work")
    def test_encode_prompt_works_in_isolation(self):
        pass

    @unittest.skip("TODO: revisit")
    def test_inference_batch_single_identical(self):
210
        pass