qwen_image_runner.py 8.81 KB
Newer Older
1
import gc
2
import math
3
4

import torch
5
from PIL import Image
6
7
8
9
10
11
12
from loguru import logger

from lightx2v.models.input_encoders.hf.qwen25.qwen25_vlforconditionalgeneration import Qwen25_VLForConditionalGeneration_TextEncoder
from lightx2v.models.networks.qwen_image.model import QwenImageTransformerModel
from lightx2v.models.runners.default_runner import DefaultRunner
from lightx2v.models.schedulers.qwen_image.scheduler import QwenImageScheduler
from lightx2v.models.video_encoders.hf.qwen_image.vae import AutoencoderKLQwenImageVAE
13
from lightx2v.utils.profiler import *
14
15
16
from lightx2v.utils.registry_factory import RUNNER_REGISTER


17
18
19
20
21
22
23
24
25
26
def calculate_dimensions(target_area, ratio):
    width = math.sqrt(target_area * ratio)
    height = width / ratio

    width = round(width / 32) * 32
    height = round(height / 32) * 32

    return width, height, None


27
28
29
30
31
32
33
34
@RUNNER_REGISTER("qwen_image")
class QwenImageRunner(DefaultRunner):
    model_cpu_offload_seq = "text_encoder->transformer->vae"
    _callback_tensor_inputs = ["latents", "prompt_embeds"]

    def __init__(self, config):
        super().__init__(config)

35
    @ProfilingContext4DebugL2("Load models")
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
    def load_model(self):
        self.model = self.load_transformer()
        self.text_encoders = self.load_text_encoder()
        self.vae = self.load_vae()

    def load_transformer(self):
        model = QwenImageTransformerModel(self.config)
        return model

    def load_text_encoder(self):
        text_encoder = Qwen25_VLForConditionalGeneration_TextEncoder(self.config)
        text_encoders = [text_encoder]
        return text_encoders

    def load_image_encoder(self):
        pass

    def load_vae(self):
        vae = AutoencoderKLQwenImageVAE(self.config)
        return vae

    def init_modules(self):
        logger.info("Initializing runner modules...")
        if not self.config.get("lazy_load", False) and not self.config.get("unload_modules", False):
            self.load_model()
        elif self.config.get("lazy_load", False):
            assert self.config.get("cpu_offload", False)
63
64
        self.run_dit = self._run_dit_local
        self.run_vae_decoder = self._run_vae_decoder_local
65
        if self.config["task"] == "t2i":
66
67
68
            self.run_input_encoder = self._run_input_encoder_local_t2i
        elif self.config["task"] == "i2i":
            self.run_input_encoder = self._run_input_encoder_local_i2i
69
70
71
        else:
            assert NotImplementedError

72
    @ProfilingContext4DebugL2("Run DiT")
73
74
75
76
77
78
79
80
81
82
83
    def _run_dit_local(self, total_steps=None):
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.model = self.load_transformer()
        self.init_scheduler()
        self.model.scheduler.prepare(self.inputs["image_encoder_output"])
        if self.config.get("model_cls") == "wan2.2" and self.config["task"] == "i2v":
            self.inputs["image_encoder_output"]["vae_encoder_out"] = None
        latents, generator = self.run(total_steps)
        self.end_run()
        return latents, generator

84
    @ProfilingContext4DebugL2("Run Encoders")
85
    def _run_input_encoder_local_t2i(self):
86
87
88
89
90
91
92
93
94
        prompt = self.config["prompt_enhanced"] if self.config["use_prompt_enhancer"] else self.config["prompt"]
        text_encoder_output = self.run_text_encoder(prompt)
        torch.cuda.empty_cache()
        gc.collect()
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": None,
        }

95
    @ProfilingContext4DebugL2("Run Encoders")
96
97
98
99
100
101
102
103
104
105
106
107
108
109
    def _run_input_encoder_local_i2i(self):
        image = Image.open(self.config["image_path"])
        prompt = self.config["prompt_enhanced"] if self.config["use_prompt_enhancer"] else self.config["prompt"]
        text_encoder_output = self.run_text_encoder(prompt, image)
        image_encoder_output = self.run_vae_encoder(image=text_encoder_output["preprocessed_image"])
        image_encoder_output["image_info"] = text_encoder_output["image_info"]
        torch.cuda.empty_cache()
        gc.collect()
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": image_encoder_output,
        }

    def run_text_encoder(self, text, image=None):
110
        text_encoder_output = {}
111
112
113
114
115
116
117
118
119
120
        if self.config["task"] == "t2i":
            prompt_embeds, prompt_embeds_mask, _, _ = self.text_encoders[0].infer([text])
            text_encoder_output["prompt_embeds"] = prompt_embeds
            text_encoder_output["prompt_embeds_mask"] = prompt_embeds_mask
        elif self.config["task"] == "i2i":
            prompt_embeds, prompt_embeds_mask, preprocessed_image, image_info = self.text_encoders[0].infer([text], image)
            text_encoder_output["prompt_embeds"] = prompt_embeds
            text_encoder_output["prompt_embeds_mask"] = prompt_embeds_mask
            text_encoder_output["preprocessed_image"] = preprocessed_image
            text_encoder_output["image_info"] = image_info
121
122
        return text_encoder_output

123
124
125
126
    def run_vae_encoder(self, image):
        image_latents = self.vae.encode_vae_image(image)
        return {"image_latents": image_latents}

127
128
129
130
131
132
    def run(self, total_steps=None):
        if total_steps is None:
            total_steps = self.model.scheduler.infer_steps
        for step_index in range(total_steps):
            logger.info(f"==> step_index: {step_index + 1} / {total_steps}")

133
            with ProfilingContext4DebugL1("step_pre"):
134
135
                self.model.scheduler.step_pre(step_index=step_index)

136
            with ProfilingContext4DebugL1("🚀 infer_main"):
137
138
                self.model.infer(self.inputs)

139
            with ProfilingContext4DebugL1("step_post"):
140
141
142
143
144
145
146
                self.model.scheduler.step_post()

            if self.progress_callback:
                self.progress_callback(((step_index + 1) / total_steps) * 100, 100)

        return self.model.scheduler.latents, self.model.scheduler.generator

147
    def set_target_shape(self):
148
149
150
151
152
153
154
155
156
157
158
159
160
161
        if not self.config._auto_resize:
            width, height = self.config.aspect_ratios[self.config.aspect_ratio]
        else:
            image = Image.open(self.config.image_path).convert("RGB")
            width, height = image.size
            calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, width / height)
            height = height or calculated_height
            width = width or calculated_width
            multiple_of = self.vae.vae_scale_factor * 2
            width = width // multiple_of * multiple_of
            height = height // multiple_of * multiple_of
            self.config.auto_width = width
            self.config.auto_hight = height

162
163
        # VAE applies 8x compression on images but we must also account for packing which requires
        # latent height and width to be divisible by 2.
164
165
        height = 2 * (int(height) // (self.vae.vae_scale_factor * 2))
        width = 2 * (int(width) // (self.vae.vae_scale_factor * 2))
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
        num_channels_latents = self.model.in_channels // 4
        self.config.target_shape = (self.config.batchsize, 1, num_channels_latents, height, width)

    def init_scheduler(self):
        scheduler = QwenImageScheduler(self.config)
        self.model.set_scheduler(scheduler)
        self.model.pre_infer.set_scheduler(scheduler)
        self.model.transformer_infer.set_scheduler(scheduler)
        self.model.post_infer.set_scheduler(scheduler)

    def get_encoder_output_i2v(self):
        pass

    def run_image_encoder(self):
        pass

182
    @ProfilingContext4DebugL2("Load models")
183
184
185
186
187
188
189
    def load_model(self):
        self.model = self.load_transformer()
        self.text_encoders = self.load_text_encoder()
        self.image_encoder = self.load_image_encoder()
        self.vae = self.load_vae()
        self.vfi_model = self.load_vfi_model() if "video_frame_interpolation" in self.config else None

190
    @ProfilingContext4DebugL1("Run VAE Decoder")
191
    def _run_vae_decoder_local(self, latents, generator):
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_decoder = self.load_vae()
        images = self.vae.decode(latents)
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_decoder
            torch.cuda.empty_cache()
            gc.collect()
        return images

    def run_pipeline(self, save_image=True):
        if self.config["use_prompt_enhancer"]:
            self.config["prompt_enhanced"] = self.post_prompt_enhancer()

        self.inputs = self.run_input_encoder()
        self.set_target_shape()
        latents, generator = self.run_dit()

209
        images = self.run_vae_decoder(latents, generator)
210
211
212
213
214
215
216
217
218
        image = images[0]
        image.save(f"{self.config.save_video_path}")

        del latents, generator
        torch.cuda.empty_cache()
        gc.collect()

        # Return (images, audio) - audio is None for default runner
        return images, None