wan_runner.py 22.3 KB
Newer Older
1
import gc
PengGao's avatar
PengGao committed
2
3
import os

helloyongyang's avatar
helloyongyang committed
4
5
import numpy as np
import torch
6
import torch.distributed as dist
PengGao's avatar
PengGao committed
7
import torchvision.transforms.functional as TF
helloyongyang's avatar
helloyongyang committed
8
from PIL import Image
PengGao's avatar
PengGao committed
9
10
11
12
13
from loguru import logger

from lightx2v.models.input_encoders.hf.t5.model import T5EncoderModel
from lightx2v.models.input_encoders.hf.xlm_roberta.model import CLIPModel
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
helloyongyang's avatar
helloyongyang committed
14
from lightx2v.models.networks.wan.model import WanModel
helloyongyang's avatar
helloyongyang committed
15
from lightx2v.models.runners.default_runner import DefaultRunner
gushiqiao's avatar
gushiqiao committed
16
from lightx2v.models.schedulers.wan.changing_resolution.scheduler import (
17
    WanScheduler4ChangingResolutionInterface,
gushiqiao's avatar
gushiqiao committed
18
)
19
from lightx2v.models.schedulers.wan.feature_caching.scheduler import (
20
    WanSchedulerCaching,
21
    WanSchedulerTaylorCaching,
22
)
PengGao's avatar
PengGao committed
23
from lightx2v.models.schedulers.wan.scheduler import WanScheduler
helloyongyang's avatar
helloyongyang committed
24
from lightx2v.models.video_encoders.hf.wan.vae import WanVAE
25
from lightx2v.models.video_encoders.hf.wan.vae_2_2 import Wan2_2_VAE
26
from lightx2v.models.video_encoders.hf.wan.vae_tiny import Wan2_2_VAE_tiny, WanVAE_tiny
27
from lightx2v.utils.envs import *
28
from lightx2v.utils.profiler import *
PengGao's avatar
PengGao committed
29
30
31
from lightx2v.utils.registry_factory import RUNNER_REGISTER
from lightx2v.utils.utils import *
from lightx2v.utils.utils import best_output_size, cache_video
helloyongyang's avatar
helloyongyang committed
32
33
34
35
36
37


@RUNNER_REGISTER("wan2.1")
class WanRunner(DefaultRunner):
    def __init__(self, config):
        super().__init__(config)
38
39
40
41
        self.vae_cls = WanVAE
        self.tiny_vae_cls = WanVAE_tiny
        self.vae_name = "Wan2.1_VAE.pth"
        self.tiny_vae_name = "taew2_1.pth"
helloyongyang's avatar
helloyongyang committed
42

43
44
45
46
47
48
    def load_transformer(self):
        model = WanModel(
            self.config.model_path,
            self.config,
            self.init_device,
        )
49
        if self.config.get("lora_configs") and self.config.lora_configs:
50
            assert not self.config.get("dit_quantized", False) or self.config.mm_config.get("weight_auto_quant", False)
51
            lora_wrapper = WanLoraWrapper(model)
52
53
54
            for lora_config in self.config.lora_configs:
                lora_path = lora_config["path"]
                strength = lora_config.get("strength", 1.0)
GoatWu's avatar
GoatWu committed
55
                lora_name = lora_wrapper.load_lora(lora_path)
56
57
                lora_wrapper.apply_lora(lora_name, strength)
                logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
58
59
        return model

60
    def load_image_encoder(self):
helloyongyang's avatar
helloyongyang committed
61
        image_encoder = None
62
        if self.config.task in ["i2v", "flf2v", "animate"] and self.config.get("use_image_encoder", True):
gushiqiao's avatar
gushiqiao committed
63
64
65
66
67
68
            # offload config
            clip_offload = self.config.get("clip_cpu_offload", self.config.get("cpu_offload", False))
            if clip_offload:
                clip_device = torch.device("cpu")
            else:
                clip_device = torch.device("cuda")
gushiqiao's avatar
gushiqiao committed
69
70
71
72
73
            # quant_config
            clip_quantized = self.config.get("clip_quantized", False)
            if clip_quantized:
                clip_quant_scheme = self.config.get("clip_quant_scheme", None)
                assert clip_quant_scheme is not None
gushiqiao's avatar
gushiqiao committed
74
                tmp_clip_quant_scheme = clip_quant_scheme.split("-")[0]
75
                clip_model_name = f"clip-{tmp_clip_quant_scheme}.pth"
76
                clip_quantized_ckpt = find_torch_model_path(self.config, "clip_quantized_ckpt", clip_model_name)
77
                clip_original_ckpt = None
gushiqiao's avatar
gushiqiao committed
78
79
80
            else:
                clip_quantized_ckpt = None
                clip_quant_scheme = None
81
                clip_model_name = "models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"
82
                clip_original_ckpt = find_torch_model_path(self.config, "clip_original_ckpt", clip_model_name)
gushiqiao's avatar
gushiqiao committed
83

84
85
            image_encoder = CLIPModel(
                dtype=torch.float16,
gushiqiao's avatar
gushiqiao committed
86
                device=clip_device,
87
                checkpoint_path=clip_original_ckpt,
gushiqiao's avatar
gushiqiao committed
88
89
90
                clip_quantized=clip_quantized,
                clip_quantized_ckpt=clip_quantized_ckpt,
                quant_scheme=clip_quant_scheme,
gushiqiao's avatar
gushiqiao committed
91
                cpu_offload=clip_offload,
gushiqiao's avatar
gushiqiao committed
92
                use_31_block=self.config.get("use_31_block", True),
93
                load_from_rank0=self.config.get("load_from_rank0", False),
94
            )
95

96
        return image_encoder
helloyongyang's avatar
helloyongyang committed
97

98
    def load_text_encoder(self):
gushiqiao's avatar
gushiqiao committed
99
        # offload config
100
        t5_offload = self.config.get("t5_cpu_offload", self.config.get("cpu_offload"))
gushiqiao's avatar
gushiqiao committed
101
102
103
104
        if t5_offload:
            t5_device = torch.device("cpu")
        else:
            t5_device = torch.device("cuda")
gushiqiao's avatar
gushiqiao committed
105
106
107
108
109
110

        # quant_config
        t5_quantized = self.config.get("t5_quantized", False)
        if t5_quantized:
            t5_quant_scheme = self.config.get("t5_quant_scheme", None)
            assert t5_quant_scheme is not None
111
112
            tmp_t5_quant_scheme = t5_quant_scheme.split("-")[0]
            t5_model_name = f"models_t5_umt5-xxl-enc-{tmp_t5_quant_scheme}.pth"
113
            t5_quantized_ckpt = find_torch_model_path(self.config, "t5_quantized_ckpt", t5_model_name)
114
            t5_original_ckpt = None
gushiqiao's avatar
gushiqiao committed
115
            tokenizer_path = os.path.join(os.path.dirname(t5_quantized_ckpt), "google/umt5-xxl")
gushiqiao's avatar
gushiqiao committed
116
117
118
        else:
            t5_quant_scheme = None
            t5_quantized_ckpt = None
119
            t5_model_name = "models_t5_umt5-xxl-enc-bf16.pth"
120
            t5_original_ckpt = find_torch_model_path(self.config, "t5_original_ckpt", t5_model_name)
gushiqiao's avatar
gushiqiao committed
121
            tokenizer_path = os.path.join(os.path.dirname(t5_original_ckpt), "google/umt5-xxl")
gushiqiao's avatar
Fix  
gushiqiao committed
122

helloyongyang's avatar
helloyongyang committed
123
124
125
        text_encoder = T5EncoderModel(
            text_len=self.config["text_len"],
            dtype=torch.bfloat16,
gushiqiao's avatar
gushiqiao committed
126
            device=t5_device,
127
            checkpoint_path=t5_original_ckpt,
gushiqiao's avatar
gushiqiao committed
128
            tokenizer_path=tokenizer_path,
helloyongyang's avatar
helloyongyang committed
129
            shard_fn=None,
gushiqiao's avatar
gushiqiao committed
130
            cpu_offload=t5_offload,
131
            offload_granularity=self.config.get("t5_offload_granularity", "model"),  # support ['model', 'block']
gushiqiao's avatar
gushiqiao committed
132
133
134
            t5_quantized=t5_quantized,
            t5_quantized_ckpt=t5_quantized_ckpt,
            quant_scheme=t5_quant_scheme,
135
            load_from_rank0=self.config.get("load_from_rank0", False),
helloyongyang's avatar
helloyongyang committed
136
137
        )
        text_encoders = [text_encoder]
138
        return text_encoders
helloyongyang's avatar
helloyongyang committed
139

140
    def load_vae_encoder(self):
141
142
143
144
145
146
147
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
            vae_device = torch.device("cuda")

148
        vae_config = {
149
            "vae_pth": find_torch_model_path(self.config, "vae_pth", self.vae_name),
150
            "device": vae_device,
151
            "parallel": self.config.parallel,
152
            "use_tiling": self.config.get("use_tiling_vae", False),
153
            "cpu_offload": vae_offload,
154
            "dtype": GET_DTYPE(),
155
            "load_from_rank0": self.config.get("load_from_rank0", False),
156
        }
157
        if self.config.task not in ["i2v", "flf2v", "animate", "vace"]:
158
159
            return None
        else:
160
            return self.vae_cls(**vae_config)
161
162

    def load_vae_decoder(self):
163
164
165
166
167
168
169
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
            vae_device = torch.device("cuda")

170
        vae_config = {
171
            "vae_pth": find_torch_model_path(self.config, "vae_pth", self.vae_name),
172
            "device": vae_device,
173
            "parallel": self.config.parallel,
174
            "use_tiling": self.config.get("use_tiling_vae", False),
175
            "cpu_offload": vae_offload,
176
            "dtype": GET_DTYPE(),
177
            "load_from_rank0": self.config.get("load_from_rank0", False),
178
        }
helloyongyang's avatar
helloyongyang committed
179
        if self.config.get("use_tiny_vae", False):
180
181
            tiny_vae_path = find_torch_model_path(self.config, "tiny_vae_path", self.tiny_vae_name)
            vae_decoder = self.tiny_vae_cls(vae_pth=tiny_vae_path, device=self.init_device, need_scaled=self.config.get("need_scaled", False)).to("cuda")
182
        else:
183
            vae_decoder = self.vae_cls(**vae_config)
184
        return vae_decoder
helloyongyang's avatar
helloyongyang committed
185

186
    def load_vae(self):
gushiqiao's avatar
gushiqiao committed
187
        vae_encoder = self.load_vae_encoder()
helloyongyang's avatar
helloyongyang committed
188
        if vae_encoder is None or self.config.get("use_tiny_vae", False):
gushiqiao's avatar
gushiqiao committed
189
190
191
192
            vae_decoder = self.load_vae_decoder()
        else:
            vae_decoder = vae_encoder
        return vae_encoder, vae_decoder
helloyongyang's avatar
helloyongyang committed
193
194

    def init_scheduler(self):
195
196
197
198
        if self.config.feature_caching == "NoCaching":
            scheduler_class = WanScheduler
        elif self.config.feature_caching == "TaylorSeer":
            scheduler_class = WanSchedulerTaylorCaching
Musisoul's avatar
Musisoul committed
199
        elif self.config.feature_caching in ["Tea", "Ada", "Custom", "FirstBlock", "DualBlock", "DynamicBlock", "Mag"]:
200
201
202
203
            scheduler_class = WanSchedulerCaching
        else:
            raise NotImplementedError(f"Unsupported feature_caching type: {self.config.feature_caching}")

204
        if self.config.get("changing_resolution", False):
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
205
            self.scheduler = WanScheduler4ChangingResolutionInterface(scheduler_class, self.config)
helloyongyang's avatar
helloyongyang committed
206
        else:
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
207
            self.scheduler = scheduler_class(self.config)
helloyongyang's avatar
helloyongyang committed
208

gushiqiao's avatar
gushiqiao committed
209
    def run_text_encoder(self, text, img=None):
gushiqiao's avatar
gushiqiao committed
210
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
211
            self.text_encoders = self.load_text_encoder()
212
        n_prompt = self.config.get("negative_prompt", "")
213
214
215
216
217
218

        if self.config["cfg_parallel"]:
            cfg_p_group = self.config["device_mesh"].get_group(mesh_dim="cfg_p")
            cfg_p_rank = dist.get_rank(cfg_p_group)
            if cfg_p_rank == 0:
                context = self.text_encoders[0].infer([text])
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
219
                context = torch.stack([torch.cat([u, u.new_zeros(self.config["text_len"] - u.size(0), u.size(1))]) for u in context])
220
221
222
                text_encoder_output = {"context": context}
            else:
                context_null = self.text_encoders[0].infer([n_prompt])
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
223
                context_null = torch.stack([torch.cat([u, u.new_zeros(self.config["text_len"] - u.size(0), u.size(1))]) for u in context_null])
224
225
226
                text_encoder_output = {"context_null": context_null}
        else:
            context = self.text_encoders[0].infer([text])
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
227
            context = torch.stack([torch.cat([u, u.new_zeros(self.config["text_len"] - u.size(0), u.size(1))]) for u in context])
228
            context_null = self.text_encoders[0].infer([n_prompt])
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
229
            context_null = torch.stack([torch.cat([u, u.new_zeros(self.config["text_len"] - u.size(0), u.size(1))]) for u in context_null])
230
231
232
233
234
            text_encoder_output = {
                "context": context,
                "context_null": context_null,
            }

gushiqiao's avatar
gushiqiao committed
235
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
236
237
238
            del self.text_encoders[0]
            torch.cuda.empty_cache()
            gc.collect()
239

helloyongyang's avatar
helloyongyang committed
240
241
        return text_encoder_output

gushiqiao's avatar
gushiqiao committed
242
    def run_image_encoder(self, first_frame, last_frame=None):
gushiqiao's avatar
gushiqiao committed
243
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
244
            self.image_encoder = self.load_image_encoder()
gushiqiao's avatar
gushiqiao committed
245
        if last_frame is None:
helloyongyang's avatar
helloyongyang committed
246
            clip_encoder_out = self.image_encoder.visual([first_frame]).squeeze(0).to(GET_DTYPE())
gushiqiao's avatar
gushiqiao committed
247
        else:
helloyongyang's avatar
helloyongyang committed
248
            clip_encoder_out = self.image_encoder.visual([first_frame, last_frame]).squeeze(0).to(GET_DTYPE())
gushiqiao's avatar
gushiqiao committed
249
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
250
251
252
            del self.image_encoder
            torch.cuda.empty_cache()
            gc.collect()
253
254
        return clip_encoder_out

gushiqiao's avatar
gushiqiao committed
255
    def run_vae_encoder(self, first_frame, last_frame=None):
helloyongyang's avatar
helloyongyang committed
256
        h, w = first_frame.shape[2:]
helloyongyang's avatar
helloyongyang committed
257
        aspect_ratio = h / w
258
259
260
        max_area = self.config.target_height * self.config.target_width
        lat_h = round(np.sqrt(max_area * aspect_ratio) // self.config.vae_stride[1] // self.config.patch_size[1] * self.config.patch_size[1])
        lat_w = round(np.sqrt(max_area / aspect_ratio) // self.config.vae_stride[2] // self.config.patch_size[2] * self.config.patch_size[2])
261
262

        if self.config.get("changing_resolution", False):
gushiqiao's avatar
gushiqiao committed
263
            assert last_frame is None
264
            self.config.lat_h, self.config.lat_w = lat_h, lat_w
265
266
            vae_encode_out_list = []
            for i in range(len(self.config["resolution_rate"])):
267
268
269
270
                lat_h, lat_w = (
                    int(self.config.lat_h * self.config.resolution_rate[i]) // 2 * 2,
                    int(self.config.lat_w * self.config.resolution_rate[i]) // 2 * 2,
                )
gushiqiao's avatar
gushiqiao committed
271
272
                vae_encode_out_list.append(self.get_vae_encoder_output(first_frame, lat_h, lat_w))
            vae_encode_out_list.append(self.get_vae_encoder_output(first_frame, self.config.lat_h, self.config.lat_w))
273
            return vae_encode_out_list
274
        else:
gushiqiao's avatar
gushiqiao committed
275
            if last_frame is not None:
helloyongyang's avatar
helloyongyang committed
276
277
                first_frame_size = first_frame.shape[2:]
                last_frame_size = last_frame.shape[2:]
gushiqiao's avatar
gushiqiao committed
278
279
280
281
282
283
284
                if first_frame_size != last_frame_size:
                    last_frame_resize_ratio = max(first_frame_size[0] / last_frame_size[0], first_frame_size[1] / last_frame_size[1])
                    last_frame_size = [
                        round(last_frame_size[0] * last_frame_resize_ratio),
                        round(last_frame_size[1] * last_frame_resize_ratio),
                    ]
                    last_frame = TF.center_crop(last_frame, last_frame_size)
285
            self.config.lat_h, self.config.lat_w = lat_h, lat_w
gushiqiao's avatar
gushiqiao committed
286
            vae_encoder_out = self.get_vae_encoder_output(first_frame, lat_h, lat_w, last_frame)
287
            return vae_encoder_out
288

gushiqiao's avatar
gushiqiao committed
289
    def get_vae_encoder_output(self, first_frame, lat_h, lat_w, last_frame=None):
290
291
        h = lat_h * self.config.vae_stride[1]
        w = lat_w * self.config.vae_stride[2]
292
293
294
295
296
297
298
        msk = torch.ones(
            1,
            self.config.target_video_length,
            lat_h,
            lat_w,
            device=torch.device("cuda"),
        )
gushiqiao's avatar
gushiqiao committed
299
300
301
302
303
        if last_frame is not None:
            msk[:, 1:-1] = 0
        else:
            msk[:, 1:] = 0

helloyongyang's avatar
helloyongyang committed
304
305
306
        msk = torch.concat([torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]], dim=1)
        msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w)
        msk = msk.transpose(1, 2)[0]
gushiqiao's avatar
gushiqiao committed
307

gushiqiao's avatar
gushiqiao committed
308
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
309
            self.vae_encoder = self.load_vae_encoder()
gushiqiao's avatar
gushiqiao committed
310
311
312
313

        if last_frame is not None:
            vae_input = torch.concat(
                [
helloyongyang's avatar
helloyongyang committed
314
                    torch.nn.functional.interpolate(first_frame.cpu(), size=(h, w), mode="bicubic").transpose(0, 1),
gushiqiao's avatar
gushiqiao committed
315
                    torch.zeros(3, self.config.target_video_length - 2, h, w),
helloyongyang's avatar
helloyongyang committed
316
                    torch.nn.functional.interpolate(last_frame.cpu(), size=(h, w), mode="bicubic").transpose(0, 1),
gushiqiao's avatar
gushiqiao committed
317
318
319
320
321
322
                ],
                dim=1,
            ).cuda()
        else:
            vae_input = torch.concat(
                [
helloyongyang's avatar
helloyongyang committed
323
                    torch.nn.functional.interpolate(first_frame.cpu(), size=(h, w), mode="bicubic").transpose(0, 1),
gushiqiao's avatar
gushiqiao committed
324
325
326
327
328
                    torch.zeros(3, self.config.target_video_length - 1, h, w),
                ],
                dim=1,
            ).cuda()

329
        vae_encoder_out = self.vae_encoder.encode(vae_input.unsqueeze(0).to(GET_DTYPE()))
gushiqiao's avatar
gushiqiao committed
330

gushiqiao's avatar
gushiqiao committed
331
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
332
333
334
            del self.vae_encoder
            torch.cuda.empty_cache()
            gc.collect()
335
        vae_encoder_out = torch.concat([msk, vae_encoder_out]).to(GET_DTYPE())
336
        return vae_encoder_out
337

gushiqiao's avatar
gushiqiao committed
338
    def get_encoder_output_i2v(self, clip_encoder_out, vae_encoder_out, text_encoder_output, img=None):
339
340
        image_encoder_output = {
            "clip_encoder_out": clip_encoder_out,
341
            "vae_encoder_out": vae_encoder_out,
342
        }
343
344
345
346
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": image_encoder_output,
        }
helloyongyang's avatar
helloyongyang committed
347
348

    def set_target_shape(self):
349
        num_channels_latents = self.config.get("num_channels_latents", 16)
350
        if self.config.task in ["i2v", "flf2v", "animate"]:
351
352
            self.config.target_shape = (
                num_channels_latents,
353
                (self.config.target_video_length - 1) // self.config.vae_stride[0] + 1,
354
355
356
                self.config.lat_h,
                self.config.lat_w,
            )
helloyongyang's avatar
helloyongyang committed
357
358
        elif self.config.task == "t2v":
            self.config.target_shape = (
359
                num_channels_latents,
360
                (self.config.target_video_length - 1) // self.config.vae_stride[0] + 1,
helloyongyang's avatar
helloyongyang committed
361
362
363
                int(self.config.target_height) // self.config.vae_stride[1],
                int(self.config.target_width) // self.config.vae_stride[2],
            )
364
365

    def save_video_func(self, images):
366
367
368
369
370
371
372
373
        cache_video(
            tensor=images,
            save_file=self.config.save_video_path,
            fps=self.config.get("fps", 16),
            nrow=1,
            normalize=True,
            value_range=(-1, 1),
        )
helloyongyang's avatar
helloyongyang committed
374
375
376
377
378
379
380
381
382
383
384
385


class MultiModelStruct:
    def __init__(self, model_list, config, boundary=0.875, num_train_timesteps=1000):
        self.model = model_list  # [high_noise_model, low_noise_model]
        assert len(self.model) == 2, "MultiModelStruct only supports 2 models now."
        self.config = config
        self.boundary = boundary
        self.boundary_timestep = self.boundary * num_train_timesteps
        self.cur_model_index = -1
        logger.info(f"boundary: {self.boundary}, boundary_timestep: {self.boundary_timestep}")

wangshankun's avatar
wangshankun committed
386
387
388
389
    @property
    def device(self):
        return self.model[self.cur_model_index].device

helloyongyang's avatar
helloyongyang committed
390
391
392
393
394
395
396
397
398
    def set_scheduler(self, shared_scheduler):
        self.scheduler = shared_scheduler
        for model in self.model:
            model.set_scheduler(shared_scheduler)

    def infer(self, inputs):
        self.get_current_model_index()
        self.model[self.cur_model_index].infer(inputs)

399
    @ProfilingContext4DebugL2("Swtich models in infer_main costs")
helloyongyang's avatar
helloyongyang committed
400
401
402
403
    def get_current_model_index(self):
        if self.scheduler.timesteps[self.scheduler.step_index] >= self.boundary_timestep:
            logger.info(f"using - HIGH - noise model at step_index {self.scheduler.step_index + 1}")
            self.scheduler.sample_guide_scale = self.config.sample_guide_scale[0]
404
405
406
407
408
409
            if self.config.get("cpu_offload", False) and self.config.get("offload_granularity", "block") == "model":
                if self.cur_model_index == -1:
                    self.to_cuda(model_index=0)
                elif self.cur_model_index == 1:  # 1 -> 0
                    self.offload_cpu(model_index=1)
                    self.to_cuda(model_index=0)
helloyongyang's avatar
helloyongyang committed
410
411
412
413
            self.cur_model_index = 0
        else:
            logger.info(f"using - LOW - noise model at step_index {self.scheduler.step_index + 1}")
            self.scheduler.sample_guide_scale = self.config.sample_guide_scale[1]
414
415
416
417
418
419
            if self.config.get("cpu_offload", False) and self.config.get("offload_granularity", "block") == "model":
                if self.cur_model_index == -1:
                    self.to_cuda(model_index=1)
                elif self.cur_model_index == 0:  # 0 -> 1
                    self.offload_cpu(model_index=0)
                    self.to_cuda(model_index=1)
helloyongyang's avatar
helloyongyang committed
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
            self.cur_model_index = 1

    def offload_cpu(self, model_index):
        self.model[model_index].to_cpu()

    def to_cuda(self, model_index):
        self.model[model_index].to_cuda()


@RUNNER_REGISTER("wan2.2_moe")
class Wan22MoeRunner(WanRunner):
    def __init__(self, config):
        super().__init__(config)

    def load_transformer(self):
        # encoder -> high_noise_model -> low_noise_model -> vae -> video_output
helloyongyang's avatar
helloyongyang committed
436
        high_noise_model = WanModel(
helloyongyang's avatar
helloyongyang committed
437
438
439
440
            os.path.join(self.config.model_path, "high_noise_model"),
            self.config,
            self.init_device,
        )
helloyongyang's avatar
helloyongyang committed
441
        low_noise_model = WanModel(
helloyongyang's avatar
helloyongyang committed
442
443
444
445
            os.path.join(self.config.model_path, "low_noise_model"),
            self.config,
            self.init_device,
        )
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466

        if self.config.get("lora_configs") and self.config.lora_configs:
            assert not self.config.get("dit_quantized", False) or self.config.mm_config.get("weight_auto_quant", False)

            for lora_config in self.config.lora_configs:
                lora_path = lora_config["path"]
                strength = lora_config.get("strength", 1.0)
                base_name = os.path.basename(lora_path)
                if base_name.startswith("high"):
                    lora_wrapper = WanLoraWrapper(high_noise_model)
                    lora_name = lora_wrapper.load_lora(lora_path)
                    lora_wrapper.apply_lora(lora_name, strength)
                    logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
                elif base_name.startswith("low"):
                    lora_wrapper = WanLoraWrapper(low_noise_model)
                    lora_name = lora_wrapper.load_lora(lora_path)
                    lora_wrapper.apply_lora(lora_name, strength)
                    logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
                else:
                    raise ValueError(f"Unsupported LoRA path: {lora_path}")

helloyongyang's avatar
helloyongyang committed
467
        return MultiModelStruct([high_noise_model, low_noise_model], self.config, self.config.boundary)
468
469
470
471
472
473


@RUNNER_REGISTER("wan2.2")
class Wan22DenseRunner(WanRunner):
    def __init__(self, config):
        super().__init__(config)
474
        self.vae_encoder_need_img_original = True
475
476
477
478
        self.vae_cls = Wan2_2_VAE
        self.tiny_vae_cls = Wan2_2_VAE_tiny
        self.vae_name = "Wan2.2_VAE.pth"
        self.tiny_vae_name = "taew2_2.pth"
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502

    def run_vae_encoder(self, img):
        max_area = self.config.target_height * self.config.target_width
        ih, iw = img.height, img.width
        dh, dw = self.config.patch_size[1] * self.config.vae_stride[1], self.config.patch_size[2] * self.config.vae_stride[2]
        ow, oh = best_output_size(iw, ih, dw, dh, max_area)

        scale = max(ow / iw, oh / ih)
        img = img.resize((round(iw * scale), round(ih * scale)), Image.LANCZOS)

        # center-crop
        x1 = (img.width - ow) // 2
        y1 = (img.height - oh) // 2
        img = img.crop((x1, y1, x1 + ow, y1 + oh))
        assert img.width == ow and img.height == oh

        # to tensor
        img = TF.to_tensor(img).sub_(0.5).div_(0.5).cuda().unsqueeze(1)
        vae_encoder_out = self.get_vae_encoder_output(img)
        self.config.lat_w, self.config.lat_h = ow // self.config.vae_stride[2], oh // self.config.vae_stride[1]

        return vae_encoder_out

    def get_vae_encoder_output(self, img):
503
        z = self.vae_encoder.encode(img.to(GET_DTYPE()))
504
        return z