qwen_image_runner.py 9.2 KB
Newer Older
1
import gc
2
import math
3
4

import torch
5
from PIL import Image
6
7
8
9
10
11
12
from loguru import logger

from lightx2v.models.input_encoders.hf.qwen25.qwen25_vlforconditionalgeneration import Qwen25_VLForConditionalGeneration_TextEncoder
from lightx2v.models.networks.qwen_image.model import QwenImageTransformerModel
from lightx2v.models.runners.default_runner import DefaultRunner
from lightx2v.models.schedulers.qwen_image.scheduler import QwenImageScheduler
from lightx2v.models.video_encoders.hf.qwen_image.vae import AutoencoderKLQwenImageVAE
yihuiwen's avatar
yihuiwen committed
13
from lightx2v.server.metrics import monitor_cli
yihuiwen's avatar
yihuiwen committed
14
from lightx2v.utils.envs import *
15
from lightx2v.utils.profiler import *
16
17
18
from lightx2v.utils.registry_factory import RUNNER_REGISTER


19
20
21
22
23
24
25
26
27
28
def calculate_dimensions(target_area, ratio):
    width = math.sqrt(target_area * ratio)
    height = width / ratio

    width = round(width / 32) * 32
    height = round(height / 32) * 32

    return width, height, None


29
30
31
32
33
34
35
36
@RUNNER_REGISTER("qwen_image")
class QwenImageRunner(DefaultRunner):
    model_cpu_offload_seq = "text_encoder->transformer->vae"
    _callback_tensor_inputs = ["latents", "prompt_embeds"]

    def __init__(self, config):
        super().__init__(config)

37
    @ProfilingContext4DebugL2("Load models")
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
    def load_model(self):
        self.model = self.load_transformer()
        self.text_encoders = self.load_text_encoder()
        self.vae = self.load_vae()

    def load_transformer(self):
        model = QwenImageTransformerModel(self.config)
        return model

    def load_text_encoder(self):
        text_encoder = Qwen25_VLForConditionalGeneration_TextEncoder(self.config)
        text_encoders = [text_encoder]
        return text_encoders

    def load_image_encoder(self):
        pass

    def load_vae(self):
        vae = AutoencoderKLQwenImageVAE(self.config)
        return vae

    def init_modules(self):
        logger.info("Initializing runner modules...")
        if not self.config.get("lazy_load", False) and not self.config.get("unload_modules", False):
            self.load_model()
        elif self.config.get("lazy_load", False):
            assert self.config.get("cpu_offload", False)
65
66
        self.run_dit = self._run_dit_local
        self.run_vae_decoder = self._run_vae_decoder_local
67
        if self.config["task"] == "t2i":
68
69
70
            self.run_input_encoder = self._run_input_encoder_local_t2i
        elif self.config["task"] == "i2i":
            self.run_input_encoder = self._run_input_encoder_local_i2i
71
72
73
        else:
            assert NotImplementedError

Watebear's avatar
Watebear committed
74
75
        self.model.set_scheduler(self.scheduler)

76
    @ProfilingContext4DebugL2("Run DiT")
77
78
79
80
81
82
83
84
85
    def _run_dit_local(self, total_steps=None):
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.model = self.load_transformer()
        self.init_scheduler()
        self.model.scheduler.prepare(self.inputs["image_encoder_output"])
        latents, generator = self.run(total_steps)
        self.end_run()
        return latents, generator

86
    @ProfilingContext4DebugL2("Run Encoders")
87
    def _run_input_encoder_local_t2i(self):
88
89
90
91
92
93
94
95
96
        prompt = self.config["prompt_enhanced"] if self.config["use_prompt_enhancer"] else self.config["prompt"]
        text_encoder_output = self.run_text_encoder(prompt)
        torch.cuda.empty_cache()
        gc.collect()
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": None,
        }

97
    @ProfilingContext4DebugL2("Run Encoders")
98
    def _run_input_encoder_local_i2i(self):
Watebear's avatar
Watebear committed
99
        _, image = self.read_image_input(self.config["image_path"])
100
101
102
103
104
105
106
107
108
109
110
        prompt = self.config["prompt_enhanced"] if self.config["use_prompt_enhancer"] else self.config["prompt"]
        text_encoder_output = self.run_text_encoder(prompt, image)
        image_encoder_output = self.run_vae_encoder(image=text_encoder_output["preprocessed_image"])
        image_encoder_output["image_info"] = text_encoder_output["image_info"]
        torch.cuda.empty_cache()
        gc.collect()
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": image_encoder_output,
        }

yihuiwen's avatar
yihuiwen committed
111
    @ProfilingContext4DebugL1("Run Text Encoder", recorder_mode=GET_RECORDER_MODE(), metrics_func=monitor_cli.lightx2v_run_text_encode_duration, metrics_labels=["QwenImageRunner"])
112
    def run_text_encoder(self, text, image=None):
yihuiwen's avatar
yihuiwen committed
113
114
        if GET_RECORDER_MODE():
            monitor_cli.lightx2v_input_prompt_len.observe(len(text))
115
        text_encoder_output = {}
116
117
118
119
120
121
122
123
124
125
        if self.config["task"] == "t2i":
            prompt_embeds, prompt_embeds_mask, _, _ = self.text_encoders[0].infer([text])
            text_encoder_output["prompt_embeds"] = prompt_embeds
            text_encoder_output["prompt_embeds_mask"] = prompt_embeds_mask
        elif self.config["task"] == "i2i":
            prompt_embeds, prompt_embeds_mask, preprocessed_image, image_info = self.text_encoders[0].infer([text], image)
            text_encoder_output["prompt_embeds"] = prompt_embeds
            text_encoder_output["prompt_embeds_mask"] = prompt_embeds_mask
            text_encoder_output["preprocessed_image"] = preprocessed_image
            text_encoder_output["image_info"] = image_info
126
127
        return text_encoder_output

yihuiwen's avatar
yihuiwen committed
128
    @ProfilingContext4DebugL1("Run VAE Encoder", recorder_mode=GET_RECORDER_MODE(), metrics_func=monitor_cli.lightx2v_run_vae_encode_duration, metrics_labels=["QwenImageRunner"])
129
130
131
132
    def run_vae_encoder(self, image):
        image_latents = self.vae.encode_vae_image(image)
        return {"image_latents": image_latents}

133
134
135
136
137
138
    def run(self, total_steps=None):
        if total_steps is None:
            total_steps = self.model.scheduler.infer_steps
        for step_index in range(total_steps):
            logger.info(f"==> step_index: {step_index + 1} / {total_steps}")

139
            with ProfilingContext4DebugL1("step_pre"):
140
141
                self.model.scheduler.step_pre(step_index=step_index)

142
            with ProfilingContext4DebugL1("🚀 infer_main"):
143
144
                self.model.infer(self.inputs)

145
            with ProfilingContext4DebugL1("step_post"):
146
147
148
149
150
151
152
                self.model.scheduler.step_post()

            if self.progress_callback:
                self.progress_callback(((step_index + 1) / total_steps) * 100, 100)

        return self.model.scheduler.latents, self.model.scheduler.generator

153
    def set_target_shape(self):
154
155
156
157
158
159
160
161
162
163
164
165
166
167
        if not self.config._auto_resize:
            width, height = self.config.aspect_ratios[self.config.aspect_ratio]
        else:
            image = Image.open(self.config.image_path).convert("RGB")
            width, height = image.size
            calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, width / height)
            height = height or calculated_height
            width = width or calculated_width
            multiple_of = self.vae.vae_scale_factor * 2
            width = width // multiple_of * multiple_of
            height = height // multiple_of * multiple_of
            self.config.auto_width = width
            self.config.auto_hight = height

168
169
        # VAE applies 8x compression on images but we must also account for packing which requires
        # latent height and width to be divisible by 2.
170
171
        height = 2 * (int(height) // (self.vae.vae_scale_factor * 2))
        width = 2 * (int(width) // (self.vae.vae_scale_factor * 2))
172
173
174
175
        num_channels_latents = self.model.in_channels // 4
        self.config.target_shape = (self.config.batchsize, 1, num_channels_latents, height, width)

    def init_scheduler(self):
Watebear's avatar
Watebear committed
176
        self.scheduler = QwenImageScheduler(self.config)
177
178
179
180
181
182
183

    def get_encoder_output_i2v(self):
        pass

    def run_image_encoder(self):
        pass

184
    @ProfilingContext4DebugL2("Load models")
185
186
187
188
189
190
191
    def load_model(self):
        self.model = self.load_transformer()
        self.text_encoders = self.load_text_encoder()
        self.image_encoder = self.load_image_encoder()
        self.vae = self.load_vae()
        self.vfi_model = self.load_vfi_model() if "video_frame_interpolation" in self.config else None

yihuiwen's avatar
yihuiwen committed
192
193
194
195
196
197
    @ProfilingContext4DebugL1(
        "Run VAE Decoder",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_vae_decode_duration,
        metrics_labels=["QwenImageRunner"],
    )
198
    def _run_vae_decoder_local(self, latents, generator):
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_decoder = self.load_vae()
        images = self.vae.decode(latents)
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_decoder
            torch.cuda.empty_cache()
            gc.collect()
        return images

    def run_pipeline(self, save_image=True):
        if self.config["use_prompt_enhancer"]:
            self.config["prompt_enhanced"] = self.post_prompt_enhancer()

        self.inputs = self.run_input_encoder()
        self.set_target_shape()
        latents, generator = self.run_dit()

216
        images = self.run_vae_decoder(latents, generator)
217
        image = images[0]
218
        image.save(f"{self.config.save_result_path}")
219
220
221
222
223
224
225

        del latents, generator
        torch.cuda.empty_cache()
        gc.collect()

        # Return (images, audio) - audio is None for default runner
        return images, None