wan_audio_runner.py 38.7 KB
Newer Older
wangshankun's avatar
wangshankun committed
1
import gc
2
import json
PengGao's avatar
PengGao committed
3
import os
sandy's avatar
sandy committed
4
import warnings
PengGao's avatar
PengGao committed
5
from dataclasses import dataclass
6
from typing import Dict, List, Optional, Tuple
PengGao's avatar
PengGao committed
7

wangshankun's avatar
wangshankun committed
8
9
import numpy as np
import torch
10
import torch.distributed as dist
sandy's avatar
sandy committed
11
import torch.nn.functional as F
gushiqiao's avatar
gushiqiao committed
12
import torchaudio as ta
helloyongyang's avatar
helloyongyang committed
13
import torchvision.transforms.functional as TF
wangshankun's avatar
wangshankun committed
14
from PIL import Image
gushiqiao's avatar
gushiqiao committed
15
from einops import rearrange
PengGao's avatar
PengGao committed
16
from loguru import logger
gushiqiao's avatar
gushiqiao committed
17
18
from torchvision.transforms import InterpolationMode
from torchvision.transforms.functional import resize
19

LiangLiu's avatar
LiangLiu committed
20
21
from lightx2v.deploy.common.va_reader import VAReader
from lightx2v.deploy.common.va_recorder import VARecorder
LiangLiu's avatar
LiangLiu committed
22
from lightx2v.deploy.common.va_recorder_x264 import X264VARecorder
23
from lightx2v.models.input_encoders.hf.seko_audio.audio_adapter import AudioAdapter
helloyongyang's avatar
helloyongyang committed
24
from lightx2v.models.input_encoders.hf.seko_audio.audio_encoder import SekoAudioEncoderModel
25
from lightx2v.models.networks.wan.audio_model import WanAudioModel
PengGao's avatar
PengGao committed
26
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
27
from lightx2v.models.runners.wan.wan_runner import WanRunner
28
from lightx2v.models.schedulers.wan.audio.scheduler import EulerScheduler
sandy's avatar
sandy committed
29
from lightx2v.models.video_encoders.hf.wan.vae_2_2 import Wan2_2_VAE
yihuiwen's avatar
yihuiwen committed
30
from lightx2v.server.metrics import monitor_cli
31
from lightx2v.utils.envs import *
32
from lightx2v.utils.profiler import *
PengGao's avatar
PengGao committed
33
from lightx2v.utils.registry_factory import RUNNER_REGISTER
LiangLiu's avatar
LiangLiu committed
34
from lightx2v.utils.utils import find_torch_model_path, load_weights, vae_to_comfyui_image_inplace
35

sandy's avatar
sandy committed
36
37
38
warnings.filterwarnings("ignore", category=UserWarning, module="torchaudio")
warnings.filterwarnings("ignore", category=UserWarning, module="torchvision.io")

wangshankun's avatar
wangshankun committed
39

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def get_optimal_patched_size_with_sp(patched_h, patched_w, sp_size):
    assert sp_size > 0 and (sp_size & (sp_size - 1)) == 0, "sp_size must be a power of 2"

    h_ratio, w_ratio = 1, 1
    while sp_size != 1:
        sp_size //= 2
        if patched_h % 2 == 0:
            patched_h //= 2
            h_ratio *= 2
        elif patched_w % 2 == 0:
            patched_w //= 2
            w_ratio *= 2
        else:
            if patched_h > patched_w:
                patched_h //= 2
55
56
                h_ratio *= 2
            else:
57
                patched_w //= 2
58
                w_ratio *= 2
59
    return patched_h * h_ratio, patched_w * w_ratio
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80


def get_crop_bbox(ori_h, ori_w, tgt_h, tgt_w):
    tgt_ar = tgt_h / tgt_w
    ori_ar = ori_h / ori_w
    if abs(ori_ar - tgt_ar) < 0.01:
        return 0, ori_h, 0, ori_w
    if ori_ar > tgt_ar:
        crop_h = int(tgt_ar * ori_w)
        y0 = (ori_h - crop_h) // 2
        y1 = y0 + crop_h
        return y0, y1, 0, ori_w
    else:
        crop_w = int(ori_h / tgt_ar)
        x0 = (ori_w - crop_w) // 2
        x1 = x0 + crop_w
        return 0, ori_h, x0, x1


def isotropic_crop_resize(frames: torch.Tensor, size: tuple):
    """
81
    frames: (C, H, W) or (T, C, H, W) or (N, C, H, W)
82
83
    size: (H, W)
    """
84
85
86
87
88
89
90
    original_shape = frames.shape

    if len(frames.shape) == 3:
        frames = frames.unsqueeze(0)
    elif len(frames.shape) == 4 and frames.shape[0] > 1:
        pass

91
92
93
94
    ori_h, ori_w = frames.shape[2:]
    h, w = size
    y0, y1, x0, x1 = get_crop_bbox(ori_h, ori_w, h, w)
    cropped_frames = frames[:, :, y0:y1, x0:x1]
95
    resized_frames = resize(cropped_frames, [h, w], InterpolationMode.BICUBIC, antialias=True)
96
97
98
99

    if len(original_shape) == 3:
        resized_frames = resized_frames.squeeze(0)

100
101
102
    return resized_frames


103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def fixed_shape_resize(img, target_height, target_width):
    orig_height, orig_width = img.shape[-2:]

    target_ratio = target_height / target_width
    orig_ratio = orig_height / orig_width

    if orig_ratio > target_ratio:
        crop_width = orig_width
        crop_height = int(crop_width * target_ratio)
    else:
        crop_height = orig_height
        crop_width = int(crop_height / target_ratio)

    cropped_img = TF.center_crop(img, [crop_height, crop_width])

    resized_img = TF.resize(cropped_img, [target_height, target_width], antialias=True)

    h, w = resized_img.shape[-2:]
    return resized_img, h, w


124
def resize_image(img, resize_mode="adaptive", bucket_shape=None, fixed_area=None, fixed_shape=None):
125
    assert resize_mode in ["adaptive", "keep_ratio_fixed_area", "fixed_min_area", "fixed_max_area", "fixed_shape", "fixed_min_side"]
126
127
128
129
130

    if resize_mode == "fixed_shape":
        assert fixed_shape is not None
        logger.info(f"[wan_audio] fixed_shape_resize fixed_height: {fixed_shape[0]}, fixed_width: {fixed_shape[1]}")
        return fixed_shape_resize(img, fixed_shape[0], fixed_shape[1])
131

132
133
134
135
136
137
138
139
140
141
142
    if bucket_shape is not None:
        """
        "adaptive_shape": {
            "0.667": [[480, 832], [544, 960], [720, 1280]],
            "1.500": [[832, 480], [960, 544], [1280, 720]],
            "1.000": [[480, 480], [576, 576], [704, 704], [960, 960]]
        }
        """
        bucket_config = {}
        for ratio, resolutions in bucket_shape.items():
            bucket_config[float(ratio)] = np.array(resolutions, dtype=np.int64)
143
        # logger.info(f"[wan_audio] use custom bucket_shape: {bucket_config}")
144
145
146
147
148
149
    else:
        bucket_config = {
            0.667: np.array([[480, 832], [544, 960], [720, 1280]], dtype=np.int64),
            1.500: np.array([[832, 480], [960, 544], [1280, 720]], dtype=np.int64),
            1.000: np.array([[480, 480], [576, 576], [704, 704], [960, 960]], dtype=np.int64),
        }
150
        # logger.info(f"[wan_audio] use default bucket_shape: {bucket_config}")
151

152
153
154
    ori_height = img.shape[-2]
    ori_weight = img.shape[-1]
    ori_ratio = ori_height / ori_weight
155
156
157
158
159
160
161
162
163
164
165

    if resize_mode == "adaptive":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
        if ori_ratio < 1.0:
            target_h, target_w = 480, 832
        elif ori_ratio == 1.0:
            target_h, target_w = 480, 480
        else:
            target_h, target_w = 832, 480
166
        for resolution in bucket_config[closet_ratio]:
167
168
169
170
171
172
173
174
175
176
177
            if ori_height * ori_weight >= resolution[0] * resolution[1]:
                target_h, target_w = resolution
    elif resize_mode == "keep_ratio_fixed_area":
        assert fixed_area in ["480p", "720p"], f"fixed_area must be in ['480p', '720p'], but got {fixed_area}, please set fixed_area in config."
        fixed_area = 480 * 832 if fixed_area == "480p" else 720 * 1280
        target_h = round(np.sqrt(fixed_area * ori_ratio))
        target_w = round(np.sqrt(fixed_area / ori_ratio))
    elif resize_mode == "fixed_min_area":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
178
        target_h, target_w = bucket_config[closet_ratio][0]
179
180
181
182
183
184
185
186
187
188
    elif resize_mode == "fixed_min_side":
        assert fixed_area in ["480p", "720p"], f"fixed_min_side mode requires fixed_area to be '480p' or '720p', got {fixed_area}"

        min_side = 720 if fixed_area == "720p" else 480
        if ori_ratio < 1.0:
            target_h = min_side
            target_w = round(target_h / ori_ratio)
        else:
            target_w = min_side
            target_h = round(target_w * ori_ratio)
189
190
191
192
    elif resize_mode == "fixed_max_area":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
193
        target_h, target_w = bucket_config[closet_ratio][-1]
194

195
196
197
198
    cropped_img = isotropic_crop_resize(img, (target_h, target_w))
    return cropped_img, target_h, target_w


199
200
201
202
@dataclass
class AudioSegment:
    """Data class for audio segment information"""

sandy's avatar
sandy committed
203
    audio_array: torch.Tensor
204
205
206
207
    start_frame: int
    end_frame: int


208
class FramePreprocessorTorchVersion:
209
210
211
212
213
214
215
    """Handles frame preprocessing including noise and masking"""

    def __init__(self, noise_mean: float = -3.0, noise_std: float = 0.5, mask_rate: float = 0.1):
        self.noise_mean = noise_mean
        self.noise_std = noise_std
        self.mask_rate = mask_rate

216
    def add_noise(self, frames: torch.Tensor, generator: Optional[torch.Generator] = None) -> torch.Tensor:
217
218
        """Add noise to frames"""

219
        device = frames.device
220
221
        shape = frames.shape
        bs = 1 if len(shape) == 4 else shape[0]
222
223
224
225
226
227
228
229
230
231

        # Generate sigma values on the same device
        sigma = torch.normal(mean=self.noise_mean, std=self.noise_std, size=(bs,), device=device, generator=generator)
        sigma = torch.exp(sigma)

        for _ in range(1, len(shape)):
            sigma = sigma.unsqueeze(-1)

        # Generate noise on the same device
        noise = torch.randn(*shape, device=device, generator=generator) * sigma
232
233
        return frames + noise

234
    def add_mask(self, frames: torch.Tensor, generator: Optional[torch.Generator] = None) -> torch.Tensor:
235
236
        """Add mask to frames"""

237
        device = frames.device
238
        h, w = frames.shape[-2:]
239
240
241

        # Generate mask on the same device
        mask = torch.rand(h, w, device=device, generator=generator) > self.mask_rate
242
243
244
245
        return frames * mask

    def process_prev_frames(self, frames: torch.Tensor) -> torch.Tensor:
        """Process previous frames with noise and masking"""
246
247
248
        frames = self.add_noise(frames, torch.Generator(device=frames.device))
        frames = self.add_mask(frames, torch.Generator(device=frames.device))
        return frames
249
250
251
252
253
254
255
256


class AudioProcessor:
    """Handles audio loading and segmentation"""

    def __init__(self, audio_sr: int = 16000, target_fps: int = 16):
        self.audio_sr = audio_sr
        self.target_fps = target_fps
sandy's avatar
sandy committed
257
        self.audio_frame_rate = audio_sr // target_fps
258

sandy's avatar
sandy committed
259
    def load_audio(self, audio_path: str):
260
261
        audio_array, ori_sr = ta.load(audio_path)
        audio_array = ta.functional.resample(audio_array.mean(0), orig_freq=ori_sr, new_freq=self.audio_sr)
sandy's avatar
sandy committed
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
        return audio_array

    def load_multi_person_audio(self, audio_paths: List[str]):
        audio_arrays = []
        max_len = 0

        for audio_path in audio_paths:
            audio_array = self.load_audio(audio_path)
            audio_arrays.append(audio_array)
            max_len = max(max_len, audio_array.numel())

        num_files = len(audio_arrays)
        padded = torch.zeros(num_files, max_len, dtype=torch.float32)

        for i, arr in enumerate(audio_arrays):
            length = arr.numel()
            padded[i, :length] = arr

        return padded
281
282
283

    def get_audio_range(self, start_frame: int, end_frame: int) -> Tuple[int, int]:
        """Calculate audio range for given frame range"""
sandy's avatar
sandy committed
284
        return round(start_frame * self.audio_frame_rate), round(end_frame * self.audio_frame_rate)
285

sandy's avatar
sandy committed
286
287
288
289
290
    def segment_audio(self, audio_array: torch.Tensor, expected_frames: int, max_num_frames: int, prev_frame_length: int = 5) -> List[AudioSegment]:
        """
        Segment audio based on frame requirements
        audio_array is (N, T) tensor
        """
291
        segments = []
sandy's avatar
sandy committed
292
        segments_idx = self.init_segments_idx(expected_frames, max_num_frames, prev_frame_length)
293

sandy's avatar
sandy committed
294
        audio_start, audio_end = self.get_audio_range(0, expected_frames)
sandy's avatar
sandy committed
295
        audio_array_ori = audio_array[:, audio_start:audio_end]
296

sandy's avatar
sandy committed
297
298
        for idx, (start_idx, end_idx) in enumerate(segments_idx):
            audio_start, audio_end = self.get_audio_range(start_idx, end_idx)
sandy's avatar
sandy committed
299
            audio_array = audio_array_ori[:, audio_start:audio_end]
300

sandy's avatar
sandy committed
301
302
            if idx < len(segments_idx) - 1:
                end_idx = segments_idx[idx + 1][0]
sandy's avatar
sandy committed
303
304
305
306
307
            else:  # for last segments
                if audio_array.shape[1] < audio_end - audio_start:
                    padding_len = audio_end - audio_start - audio_array.shape[1]
                    audio_array = F.pad(audio_array, (0, padding_len))
                    # Adjust end_idx to account for the frames added by padding
sandy's avatar
sandy committed
308
                    end_idx = end_idx - padding_len // self.audio_frame_rate
309

sandy's avatar
sandy committed
310
311
            segments.append(AudioSegment(audio_array, start_idx, end_idx))
        del audio_array, audio_array_ori
312
313
        return segments

sandy's avatar
sandy committed
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
    def init_segments_idx(self, total_frame: int, clip_frame: int = 81, overlap_frame: int = 5) -> list[tuple[int, int, int]]:
        """Initialize segment indices with overlap"""
        start_end_list = []
        min_frame = clip_frame
        for start in range(0, total_frame, clip_frame - overlap_frame):
            is_last = start + clip_frame >= total_frame
            end = min(start + clip_frame, total_frame)
            if end - start < min_frame:
                end = start + min_frame
            if ((end - start) - 1) % 4 != 0:
                end = start + (((end - start) - 1) // 4) * 4 + 1
            start_end_list.append((start, end))
            if is_last:
                break
        return start_end_list

330

Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
331
@RUNNER_REGISTER("seko_talk")
helloyongyang's avatar
helloyongyang committed
332
333
334
class WanAudioRunner(WanRunner):  # type:ignore
    def __init__(self, config):
        super().__init__(config)
335
        self.prev_frame_length = self.config.get("prev_frame_length", 5)
336
        self.frame_preprocessor = FramePreprocessorTorchVersion()
helloyongyang's avatar
helloyongyang committed
337
338
339

    def init_scheduler(self):
        """Initialize consistency model scheduler"""
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
340
        self.scheduler = EulerScheduler(self.config)
helloyongyang's avatar
helloyongyang committed
341

342
    def read_audio_input(self, audio_path):
sandy's avatar
sandy committed
343
        """Read audio input - handles both single and multi-person scenarios"""
helloyongyang's avatar
helloyongyang committed
344
345
346
        audio_sr = self.config.get("audio_sr", 16000)
        target_fps = self.config.get("target_fps", 16)
        self._audio_processor = AudioProcessor(audio_sr, target_fps)
sandy's avatar
sandy committed
347

LiangLiu's avatar
LiangLiu committed
348
349
350
        if not isinstance(audio_path, str):
            return [], 0, None, 0

sandy's avatar
sandy committed
351
        # Get audio files from person objects or legacy format
352
        audio_files, mask_files = self.get_audio_files_from_audio_path(audio_path)
helloyongyang's avatar
helloyongyang committed
353

sandy's avatar
sandy committed
354
355
356
357
358
359
360
361
362
        # Load audio based on single or multi-person mode
        if len(audio_files) == 1:
            audio_array = self._audio_processor.load_audio(audio_files[0])
            audio_array = audio_array.unsqueeze(0)  # Add batch dimension for consistency
        else:
            audio_array = self._audio_processor.load_multi_person_audio(audio_files)

        video_duration = self.config.get("video_duration", 5)
        audio_len = int(audio_array.shape[1] / audio_sr * target_fps)
yihuiwen's avatar
yihuiwen committed
363
364
365
        if GET_RECORDER_MODE():
            monitor_cli.lightx2v_input_audio_len.observe(audio_len)

helloyongyang's avatar
helloyongyang committed
366
367
368
        expected_frames = min(max(1, int(video_duration * target_fps)), audio_len)

        # Segment audio
369
        audio_segments = self._audio_processor.segment_audio(audio_array, expected_frames, self.config.get("target_video_length", 81), self.prev_frame_length)
helloyongyang's avatar
helloyongyang committed
370

371
372
373
374
375
376
        # Mask latent for multi-person s2v
        if mask_files is not None:
            mask_latents = [self.process_single_mask(mask_file) for mask_file in mask_files]
            mask_latents = torch.cat(mask_latents, dim=0)
        else:
            mask_latents = None
sandy's avatar
sandy committed
377

378
        return audio_segments, expected_frames, mask_latents, len(audio_files)
sandy's avatar
sandy committed
379

380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
    def get_audio_files_from_audio_path(self, audio_path):
        if os.path.isdir(audio_path):
            audio_files = []
            mask_files = []
            logger.info(f"audio_path is a directory, loading config.json from {audio_path}")
            audio_config_path = os.path.join(audio_path, "config.json")
            assert os.path.exists(audio_config_path), "config.json not found in audio_path"
            with open(audio_config_path, "r") as f:
                audio_config = json.load(f)
            for talk_object in audio_config["talk_objects"]:
                audio_files.append(os.path.join(audio_path, talk_object["audio"]))
                mask_files.append(os.path.join(audio_path, talk_object["mask"]))
        else:
            logger.info(f"audio_path is a file without mask: {audio_path}")
            audio_files = [audio_path]
            mask_files = None
sandy's avatar
sandy committed
396

397
        return audio_files, mask_files
sandy's avatar
sandy committed
398

399
    def process_single_mask(self, mask_file):
sandy's avatar
sandy committed
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
        mask_img = Image.open(mask_file).convert("RGB")
        mask_img = TF.to_tensor(mask_img).sub_(0.5).div_(0.5).unsqueeze(0).cuda()

        if mask_img.shape[1] == 3:  # If it is an RGB three-channel image
            mask_img = mask_img[:, :1]  # Only take the first channel

        mask_img, h, w = resize_image(
            mask_img,
            resize_mode=self.config.get("resize_mode", "adaptive"),
            bucket_shape=self.config.get("bucket_shape", None),
            fixed_area=self.config.get("fixed_area", None),
            fixed_shape=self.config.get("fixed_shape", None),
        )

        mask_latent = torch.nn.functional.interpolate(
            mask_img,  # (1, 1, H, W)
            size=(h // 16, w // 16),
            mode="bicubic",
        )

        mask_latent = (mask_latent > 0).to(torch.int8)
        return mask_latent
helloyongyang's avatar
helloyongyang committed
422
423

    def read_image_input(self, img_path):
LiangLiu's avatar
LiangLiu committed
424
425
426
427
        if isinstance(img_path, Image.Image):
            ref_img = img_path
        else:
            ref_img = Image.open(img_path).convert("RGB")
helloyongyang's avatar
helloyongyang committed
428
429
        ref_img = TF.to_tensor(ref_img).sub_(0.5).div_(0.5).unsqueeze(0).cuda()

430
431
432
433
434
435
436
        ref_img, h, w = resize_image(
            ref_img,
            resize_mode=self.config.get("resize_mode", "adaptive"),
            bucket_shape=self.config.get("bucket_shape", None),
            fixed_area=self.config.get("fixed_area", None),
            fixed_shape=self.config.get("fixed_shape", None),
        )
437
        logger.info(f"[wan_audio] resize_image target_h: {h}, target_w: {w}")
438
439
        patched_h = h // self.config["vae_stride"][1] // self.config["patch_size"][1]
        patched_w = w // self.config["vae_stride"][2] // self.config["patch_size"][2]
helloyongyang's avatar
helloyongyang committed
440
441
442

        patched_h, patched_w = get_optimal_patched_size_with_sp(patched_h, patched_w, 1)

443
444
        latent_h = patched_h * self.config["patch_size"][1]
        latent_w = patched_w * self.config["patch_size"][2]
helloyongyang's avatar
helloyongyang committed
445

446
447
        latent_shape = self.get_latent_shape_with_lat_hw(latent_h, latent_w)
        target_shape = [latent_h * self.config["vae_stride"][1], latent_w * self.config["vae_stride"][2]]
helloyongyang's avatar
helloyongyang committed
448

449
        logger.info(f"[wan_audio] target_h: {target_shape[0]}, target_w: {target_shape[1]}, latent_h: {latent_h}, latent_w: {latent_w}")
helloyongyang's avatar
helloyongyang committed
450

451
452
        ref_img = torch.nn.functional.interpolate(ref_img, size=(target_shape[0], target_shape[1]), mode="bicubic")
        return ref_img, latent_shape, target_shape
helloyongyang's avatar
helloyongyang committed
453

yihuiwen's avatar
yihuiwen committed
454
455
456
457
458
459
    @ProfilingContext4DebugL1(
        "Run Image Encoder",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_img_encode_duration,
        metrics_labels=["WanAudioRunner"],
    )
helloyongyang's avatar
helloyongyang committed
460
    def run_image_encoder(self, first_frame, last_frame=None):
461
462
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.image_encoder = self.load_image_encoder()
helloyongyang's avatar
helloyongyang committed
463
        clip_encoder_out = self.image_encoder.visual([first_frame]).squeeze(0).to(GET_DTYPE()) if self.config.get("use_image_encoder", True) else None
464
465
466
467
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.image_encoder
            torch.cuda.empty_cache()
            gc.collect()
helloyongyang's avatar
helloyongyang committed
468
469
        return clip_encoder_out

yihuiwen's avatar
yihuiwen committed
470
471
472
    @ProfilingContext4DebugL1(
        "Run VAE Encoder",
        recorder_mode=GET_RECORDER_MODE(),
473
        metrics_func=monitor_cli.lightx2v_run_vae_encoder_image_duration,
yihuiwen's avatar
yihuiwen committed
474
475
        metrics_labels=["WanAudioRunner"],
    )
helloyongyang's avatar
helloyongyang committed
476
    def run_vae_encoder(self, img):
477
478
479
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_encoder = self.load_vae_encoder()

helloyongyang's avatar
helloyongyang committed
480
        img = rearrange(img, "1 C H W -> 1 C 1 H W")
481
        vae_encoder_out = self.vae_encoder.encode(img.to(GET_DTYPE()))
sandy's avatar
sandy committed
482

483
484
485
486
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_encoder
            torch.cuda.empty_cache()
            gc.collect()
helloyongyang's avatar
helloyongyang committed
487
488
        return vae_encoder_out

489
    @ProfilingContext4DebugL2("Run Encoders")
490
491
492
493
    def _run_input_encoder_local_s2v(self):
        img, latent_shape, target_shape = self.read_image_input(self.input_info.image_path)
        self.input_info.latent_shape = latent_shape  # Important: set latent_shape in input_info
        self.input_info.target_shape = target_shape  # Important: set target_shape in input_info
helloyongyang's avatar
helloyongyang committed
494
495
        clip_encoder_out = self.run_image_encoder(img) if self.config.get("use_image_encoder", True) else None
        vae_encode_out = self.run_vae_encoder(img)
sandy's avatar
sandy committed
496

497
498
499
500
        audio_segments, expected_frames, person_mask_latens, audio_num = self.read_audio_input(self.input_info.audio_path)
        self.input_info.audio_num = audio_num
        self.input_info.with_mask = person_mask_latens is not None
        text_encoder_output = self.run_text_encoder(self.input_info)
helloyongyang's avatar
helloyongyang committed
501
502
503
504
505
506
507
508
509
510
        torch.cuda.empty_cache()
        gc.collect()
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": {
                "clip_encoder_out": clip_encoder_out,
                "vae_encoder_out": vae_encode_out,
            },
            "audio_segments": audio_segments,
            "expected_frames": expected_frames,
sandy's avatar
sandy committed
511
            "person_mask_latens": person_mask_latens,
helloyongyang's avatar
helloyongyang committed
512
        }
513
514
515

    def prepare_prev_latents(self, prev_video: Optional[torch.Tensor], prev_frame_length: int) -> Optional[Dict[str, torch.Tensor]]:
        """Prepare previous latents for conditioning"""
wangshankun's avatar
wangshankun committed
516
        device = torch.device("cuda")
517
        dtype = GET_DTYPE()
518

519
520
        tgt_h, tgt_w = self.input_info.target_shape[0], self.input_info.target_shape[1]
        prev_frames = torch.zeros((1, 3, self.config["target_video_length"], tgt_h, tgt_w), device=device)
521

522
523
524
        if prev_video is not None:
            # Extract and process last frames
            last_frames = prev_video[:, :, -prev_frame_length:].clone().to(device)
525
            if self.config["model_cls"] != "wan2.2_audio":
sandy's avatar
sandy committed
526
                last_frames = self.frame_preprocessor.process_prev_frames(last_frames)
527
            prev_frames[:, :, :prev_frame_length] = last_frames
sandy's avatar
sandy committed
528
529
530
            prev_len = (prev_frame_length - 1) // 4 + 1
        else:
            prev_len = 0
531

532
533
534
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_encoder = self.load_vae_encoder()

535
        _, nframe, height, width = self.model.scheduler.latents.shape
536
537
538
539
540
541
        with ProfilingContext4DebugL1(
            "vae_encoder in init run segment",
            recorder_mode=GET_RECORDER_MODE(),
            metrics_func=monitor_cli.lightx2v_run_vae_encoder_pre_latent_duration,
            metrics_labels=["WanAudioRunner"],
        ):
542
            if self.config["model_cls"] == "wan2.2_audio":
543
544
545
546
547
                if prev_video is not None:
                    prev_latents = self.vae_encoder.encode(prev_frames.to(dtype))
                else:
                    prev_latents = None
                prev_mask = self.model.scheduler.mask
548
            else:
549
                prev_latents = self.vae_encoder.encode(prev_frames.to(dtype))
550

551
552
            frames_n = (nframe - 1) * 4 + 1
            prev_mask = torch.ones((1, frames_n, height, width), device=device, dtype=dtype)
553
554
            prev_frame_len = max((prev_len - 1) * 4 + 1, 0)
            prev_mask[:, prev_frame_len:] = 0
555
            prev_mask = self._wan_mask_rearrange(prev_mask)
helloyongyang's avatar
fix ci  
helloyongyang committed
556

sandy's avatar
sandy committed
557
558
        if prev_latents is not None:
            if prev_latents.shape[-2:] != (height, width):
559
                logger.warning(f"Size mismatch: prev_latents {prev_latents.shape} vs scheduler latents (H={height}, W={width}). Config tgt_h={tgt_h}, tgt_w={tgt_w}")
sandy's avatar
sandy committed
560
                prev_latents = torch.nn.functional.interpolate(prev_latents, size=(height, width), mode="bilinear", align_corners=False)
561

562
563
564
565
566
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_encoder
            torch.cuda.empty_cache()
            gc.collect()

sandy's avatar
sandy committed
567
        return {"prev_latents": prev_latents, "prev_mask": prev_mask, "prev_len": prev_len}
568
569
570
571
572
573
574
575
576
577
578

    def _wan_mask_rearrange(self, mask: torch.Tensor) -> torch.Tensor:
        """Rearrange mask for WAN model"""
        if mask.ndim == 3:
            mask = mask[None]
        assert mask.ndim == 4
        _, t, h, w = mask.shape
        assert t == ((t - 1) // 4 * 4 + 1)
        mask_first_frame = torch.repeat_interleave(mask[:, 0:1], repeats=4, dim=1)
        mask = torch.concat([mask_first_frame, mask[:, 1:]], dim=1)
        mask = mask.view(mask.shape[1] // 4, 4, h, w)
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
579
        return mask.transpose(0, 1).contiguous()
580

helloyongyang's avatar
helloyongyang committed
581
582
    def get_video_segment_num(self):
        self.video_segment_num = len(self.inputs["audio_segments"])
wangshankun's avatar
wangshankun committed
583

helloyongyang's avatar
helloyongyang committed
584
585
    def init_run(self):
        super().init_run()
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
586
        self.scheduler.set_audio_adapter(self.audio_adapter)
helloyongyang's avatar
helloyongyang committed
587
        self.prev_video = None
588
589
        if self.input_info.return_result_tensor:
            self.gen_video_final = torch.zeros((self.inputs["expected_frames"], self.input_info.target_shape[0], self.input_info.target_shape[1], 3), dtype=torch.float32, device="cpu")
sandy's avatar
sandy committed
590
            self.cut_audio_final = torch.zeros((self.inputs["expected_frames"] * self._audio_processor.audio_frame_rate), dtype=torch.float32, device="cpu")
LiangLiu's avatar
LiangLiu committed
591
592
        else:
            self.gen_video_final = None
sandy's avatar
sandy committed
593
            self.cut_audio_final = None
wangshankun's avatar
wangshankun committed
594

595
596
597
598
599
600
    @ProfilingContext4DebugL1(
        "Init run segment",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_init_run_segment_duration,
        metrics_labels=["WanAudioRunner"],
    )
LiangLiu's avatar
LiangLiu committed
601
    def init_run_segment(self, segment_idx, audio_array=None):
helloyongyang's avatar
helloyongyang committed
602
        self.segment_idx = segment_idx
LiangLiu's avatar
LiangLiu committed
603
        if audio_array is not None:
LiangLiu's avatar
LiangLiu committed
604
605
606
            end_idx = audio_array.shape[0] // self._audio_processor.audio_frame_rate - self.prev_frame_length
            audio_tensor = torch.Tensor(audio_array).float().unsqueeze(0)
            self.segment = AudioSegment(audio_tensor, 0, end_idx)
LiangLiu's avatar
LiangLiu committed
607
608
        else:
            self.segment = self.inputs["audio_segments"][segment_idx]
wangshankun's avatar
wangshankun committed
609

610
611
        self.input_info.seed = self.input_info.seed + segment_idx
        torch.manual_seed(self.input_info.seed)
612
        # logger.info(f"Processing segment {segment_idx + 1}/{self.video_segment_num}, seed: {self.config.seed}")
wangshankun's avatar
wangshankun committed
613

614
615
616
        if (self.config.get("lazy_load", False) or self.config.get("unload_modules", False)) and not hasattr(self, "audio_encoder"):
            self.audio_encoder = self.load_audio_encoder()

sandy's avatar
sandy committed
617
618
619
620
621
622
        features_list = []
        for i in range(self.segment.audio_array.shape[0]):
            feat = self.audio_encoder.infer(self.segment.audio_array[i])
            feat = self.audio_adapter.forward_audio_proj(feat, self.model.scheduler.latents.shape[1])
            features_list.append(feat.squeeze(0))
        audio_features = torch.stack(features_list, dim=0)
PengGao's avatar
PengGao committed
623

helloyongyang's avatar
helloyongyang committed
624
        self.inputs["audio_encoder_output"] = audio_features
625
        self.inputs["previmg_encoder_output"] = self.prepare_prev_latents(self.prev_video, prev_frame_length=self.prev_frame_length)
wangshankun's avatar
wangshankun committed
626

helloyongyang's avatar
helloyongyang committed
627
628
        # Reset scheduler for non-first segments
        if segment_idx > 0:
629
            self.model.scheduler.reset(self.input_info.seed, self.input_info.latent_shape, self.inputs["previmg_encoder_output"])
wangshankun's avatar
wangshankun committed
630

631
632
633
634
635
636
    @ProfilingContext4DebugL1(
        "End run segment",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_end_run_segment_duration,
        metrics_labels=["WanAudioRunner"],
    )
637
    def end_run_segment(self, segment_idx):
helloyongyang's avatar
helloyongyang committed
638
        self.gen_video = torch.clamp(self.gen_video, -1, 1).to(torch.float)
sandy's avatar
sandy committed
639
        useful_length = self.segment.end_frame - self.segment.start_frame
LiangLiu's avatar
LiangLiu committed
640
        video_seg = self.gen_video[:, :, :useful_length].cpu()
sandy's avatar
sandy committed
641
642
        audio_seg = self.segment.audio_array[:, : useful_length * self._audio_processor.audio_frame_rate]
        audio_seg = audio_seg.sum(dim=0)  # Multiple audio tracks, mixed into one track
LiangLiu's avatar
LiangLiu committed
643
644
645
646
647
648
649
650
651
652
653
        video_seg = vae_to_comfyui_image_inplace(video_seg)

        # [Warning] Need check whether video segment interpolation works...
        if "video_frame_interpolation" in self.config and self.vfi_model is not None:
            target_fps = self.config["video_frame_interpolation"]["target_fps"]
            logger.info(f"Interpolating frames from {self.config.get('fps', 16)} to {target_fps}")
            video_seg = self.vfi_model.interpolate_frames(
                video_seg,
                source_fps=self.config.get("fps", 16),
                target_fps=target_fps,
            )
LiangLiu's avatar
LiangLiu committed
654

655
656
657
658
659
660
661
662
        if "video_super_resolution" in self.config and self.vsr_model is not None:
            logger.info(f"Applying video super resolution with scale {self.config['video_super_resolution']['scale']}")
            video_seg = self.vsr_model.super_resolve_frames(
                video_seg,
                seed=self.config["video_super_resolution"]["seed"],
                scale=self.config["video_super_resolution"]["scale"],
            )

LiangLiu's avatar
LiangLiu committed
663
664
        if self.va_recorder:
            self.va_recorder.pub_livestream(video_seg, audio_seg)
665
        elif self.input_info.return_result_tensor:
LiangLiu's avatar
LiangLiu committed
666
            self.gen_video_final[self.segment.start_frame : self.segment.end_frame].copy_(video_seg)
sandy's avatar
sandy committed
667
            self.cut_audio_final[self.segment.start_frame * self._audio_processor.audio_frame_rate : self.segment.end_frame * self._audio_processor.audio_frame_rate].copy_(audio_seg)
LiangLiu's avatar
LiangLiu committed
668

helloyongyang's avatar
helloyongyang committed
669
670
671
        # Update prev_video for next iteration
        self.prev_video = self.gen_video

LiangLiu's avatar
LiangLiu committed
672
        del video_seg, audio_seg
helloyongyang's avatar
helloyongyang committed
673
674
        torch.cuda.empty_cache()

LiangLiu's avatar
LiangLiu committed
675
676
677
678
679
680
681
682
683
    def get_rank_and_world_size(self):
        rank = 0
        world_size = 1
        if dist.is_initialized():
            rank = dist.get_rank()
            world_size = dist.get_world_size()
        return rank, world_size

    def init_va_recorder(self):
684
        output_video_path = self.input_info.save_result_path
LiangLiu's avatar
LiangLiu committed
685
686
        self.va_recorder = None
        if isinstance(output_video_path, dict):
LiangLiu's avatar
LiangLiu committed
687
688
689
690
691
692
693
694
            output_video_path = output_video_path["data"]
        logger.info(f"init va_recorder with output_video_path: {output_video_path}")
        rank, world_size = self.get_rank_and_world_size()
        if output_video_path and rank == world_size - 1:
            record_fps = self.config.get("target_fps", 16)
            audio_sr = self.config.get("audio_sr", 16000)
            if "video_frame_interpolation" in self.config and self.vfi_model is not None:
                record_fps = self.config["video_frame_interpolation"]["target_fps"]
LiangLiu's avatar
LiangLiu committed
695
696
697

            whip_shared_path = os.getenv("WHIP_SHARED_LIB", None)
            if whip_shared_path and output_video_path.startswith("http"):
LiangLiu's avatar
LiangLiu committed
698
                self.va_recorder = X264VARecorder(
LiangLiu's avatar
LiangLiu committed
699
700
701
702
703
704
705
706
707
708
709
                    whip_shared_path=whip_shared_path,
                    livestream_url=output_video_path,
                    fps=record_fps,
                    sample_rate=audio_sr,
                )
            else:
                self.va_recorder = VARecorder(
                    livestream_url=output_video_path,
                    fps=record_fps,
                    sample_rate=audio_sr,
                )
LiangLiu's avatar
LiangLiu committed
710
711

    def init_va_reader(self):
LiangLiu's avatar
LiangLiu committed
712
        audio_path = self.input_info.audio_path
LiangLiu's avatar
LiangLiu committed
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
        self.va_reader = None
        if isinstance(audio_path, dict):
            assert audio_path["type"] == "stream", f"unexcept audio_path: {audio_path}"
            rank, world_size = self.get_rank_and_world_size()
            target_fps = self.config.get("target_fps", 16)
            max_num_frames = self.config.get("target_video_length", 81)
            audio_sr = self.config.get("audio_sr", 16000)
            prev_frames = self.config.get("prev_frame_length", 5)
            self.va_reader = VAReader(
                rank=rank,
                world_size=world_size,
                stream_url=audio_path["data"],
                sample_rate=audio_sr,
                segment_duration=max_num_frames / target_fps,
                prev_duration=prev_frames / target_fps,
                target_rank=1,
            )

    def run_main(self, total_steps=None):
        try:
            self.init_va_recorder()
            self.init_va_reader()
            logger.info(f"init va_recorder: {self.va_recorder} and va_reader: {self.va_reader}")

            if self.va_reader is None:
                return super().run_main(total_steps)

LiangLiu's avatar
LiangLiu committed
740
            self.va_reader.start()
LiangLiu's avatar
LiangLiu committed
741
            rank, world_size = self.get_rank_and_world_size()
LiangLiu's avatar
LiangLiu committed
742
743
            if rank == world_size - 1:
                assert self.va_recorder is not None, "va_recorder is required for stream audio input for rank 2"
LiangLiu's avatar
LiangLiu committed
744
745
746
                self.va_recorder.start(self.input_info.target_shape[1], self.input_info.target_shape[0])
            if world_size > 1:
                dist.barrier()
LiangLiu's avatar
LiangLiu committed
747
748

            self.init_run()
LiangLiu's avatar
LiangLiu committed
749
            if self.config.get("compile", False):
750
                self.model.select_graph_for_compile(self.input_info)
LiangLiu's avatar
LiangLiu committed
751
752
753
754
755
756
757
758
            self.video_segment_num = "unlimited"

            fetch_timeout = self.va_reader.segment_duration + 1
            segment_idx = 0
            fail_count = 0
            max_fail_count = 10

            while True:
759
                with ProfilingContext4DebugL1(f"stream segment get audio segment {segment_idx}"):
LiangLiu's avatar
LiangLiu committed
760
761
762
763
764
765
766
767
768
                    self.check_stop()
                    audio_array = self.va_reader.get_audio_segment(timeout=fetch_timeout)
                    if audio_array is None:
                        fail_count += 1
                        logger.warning(f"Failed to get audio chunk {fail_count} times")
                        if fail_count > max_fail_count:
                            raise Exception(f"Failed to get audio chunk {fail_count} times, stop reader")
                        continue

769
                with ProfilingContext4DebugL1(f"stream segment end2end {segment_idx}"):
LiangLiu's avatar
LiangLiu committed
770
771
                    fail_count = 0
                    self.init_run_segment(segment_idx, audio_array)
helloyongyang's avatar
helloyongyang committed
772
                    latents = self.run_segment(total_steps=None)
LiangLiu's avatar
LiangLiu committed
773
                    self.gen_video = self.run_vae_decoder(latents)
LiangLiu's avatar
LiangLiu committed
774
                    self.end_run_segment(segment_idx)
LiangLiu's avatar
LiangLiu committed
775
776
777
                    segment_idx += 1

        finally:
LiangLiu's avatar
LiangLiu committed
778
            if hasattr(self.model, "inputs"):
LiangLiu's avatar
LiangLiu committed
779
780
781
782
783
                self.end_run()
            if self.va_reader:
                self.va_reader.stop()
                self.va_reader = None
            if self.va_recorder:
LiangLiu's avatar
LiangLiu committed
784
                self.va_recorder.stop()
LiangLiu's avatar
LiangLiu committed
785
786
                self.va_recorder = None

787
    @ProfilingContext4DebugL1("Process after vae decoder")
788
789
    def process_images_after_vae_decoder(self):
        if self.input_info.return_result_tensor:
sandy's avatar
sandy committed
790
            audio_waveform = self.cut_audio_final.unsqueeze(0).unsqueeze(0)
LiangLiu's avatar
LiangLiu committed
791
792
793
            comfyui_audio = {"waveform": audio_waveform, "sample_rate": self._audio_processor.audio_sr}
            return {"video": self.gen_video_final, "audio": comfyui_audio}
        return {"video": None, "audio": None}
794

wangshankun's avatar
wangshankun committed
795
    def load_transformer(self):
796
        """Load transformer with LoRA support"""
797
798
        base_model = WanAudioModel(self.config["model_path"], self.config, self.init_device)
        if self.config.get("lora_configs") and self.config["lora_configs"]:
799
            assert not self.config.get("dit_quantized", False)
wangshankun's avatar
wangshankun committed
800
            lora_wrapper = WanLoraWrapper(base_model)
801
            for lora_config in self.config["lora_configs"]:
802
803
804
805
806
                lora_path = lora_config["path"]
                strength = lora_config.get("strength", 1.0)
                lora_name = lora_wrapper.load_lora(lora_path)
                lora_wrapper.apply_lora(lora_name, strength)
                logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
wangshankun's avatar
wangshankun committed
807

wangshankun's avatar
wangshankun committed
808
809
        return base_model

helloyongyang's avatar
helloyongyang committed
810
    def load_audio_encoder(self):
gushiqiao's avatar
gushiqiao committed
811
        audio_encoder_path = self.config.get("audio_encoder_path", os.path.join(self.config["model_path"], "TencentGameMate-chinese-hubert-large"))
812
813
        audio_encoder_offload = self.config.get("audio_encoder_cpu_offload", self.config.get("cpu_offload", False))
        model = SekoAudioEncoderModel(audio_encoder_path, self.config["audio_sr"], audio_encoder_offload)
helloyongyang's avatar
helloyongyang committed
814
        return model
815

helloyongyang's avatar
helloyongyang committed
816
    def load_audio_adapter(self):
817
818
819
820
821
        audio_adapter_offload = self.config.get("audio_adapter_cpu_offload", self.config.get("cpu_offload", False))
        if audio_adapter_offload:
            device = torch.device("cpu")
        else:
            device = torch.device("cuda")
helloyongyang's avatar
helloyongyang committed
822
        audio_adapter = AudioAdapter(
sandy's avatar
sandy committed
823
            attention_head_dim=self.config["dim"] // self.config["num_heads"],
helloyongyang's avatar
helloyongyang committed
824
825
826
827
828
829
830
831
832
            num_attention_heads=self.config["num_heads"],
            base_num_layers=self.config["num_layers"],
            interval=1,
            audio_feature_dim=1024,
            time_freq_dim=256,
            projection_transformer_layers=4,
            mlp_dims=(1024, 1024, 32 * 1024),
            quantized=self.config.get("adapter_quantized", False),
            quant_scheme=self.config.get("adapter_quant_scheme", None),
833
            cpu_offload=audio_adapter_offload,
helloyongyang's avatar
helloyongyang committed
834
        )
835

836
        audio_adapter.to(device)
837
        load_from_rank0 = self.config.get("load_from_rank0", False)
838
        weights_dict = load_weights(self.config["adapter_model_path"], cpu_offload=audio_adapter_offload, remove_key="ca", load_from_rank0=load_from_rank0)
839
        audio_adapter.load_state_dict(weights_dict, strict=False)
helloyongyang's avatar
helloyongyang committed
840
        return audio_adapter.to(dtype=GET_DTYPE())
wangshankun's avatar
wangshankun committed
841

helloyongyang's avatar
helloyongyang committed
842
843
    def load_model(self):
        super().load_model()
844
845
846
        with ProfilingContext4DebugL2("Load audio encoder and adapter"):
            self.audio_encoder = self.load_audio_encoder()
            self.audio_adapter = self.load_audio_adapter()
wangshankun's avatar
wangshankun committed
847

848
849
850
851
852
853
854
855
    def get_latent_shape_with_lat_hw(self, latent_h, latent_w):
        latent_shape = [
            self.config.get("num_channels_latents", 16),
            (self.config["target_video_length"] - 1) // self.config["vae_stride"][0] + 1,
            latent_h,
            latent_w,
        ]
        return latent_shape
sandy's avatar
sandy committed
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891


@RUNNER_REGISTER("wan2.2_audio")
class Wan22AudioRunner(WanAudioRunner):
    def __init__(self, config):
        super().__init__(config)

    def load_vae_decoder(self):
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
            vae_device = torch.device("cuda")
        vae_config = {
            "vae_pth": find_torch_model_path(self.config, "vae_pth", "Wan2.2_VAE.pth"),
            "device": vae_device,
            "cpu_offload": vae_offload,
            "offload_cache": self.config.get("vae_offload_cache", False),
        }
        vae_decoder = Wan2_2_VAE(**vae_config)
        return vae_decoder

    def load_vae_encoder(self):
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
            vae_device = torch.device("cuda")
        vae_config = {
            "vae_pth": find_torch_model_path(self.config, "vae_pth", "Wan2.2_VAE.pth"),
            "device": vae_device,
            "cpu_offload": vae_offload,
            "offload_cache": self.config.get("vae_offload_cache", False),
        }
892
        if self.config.task not in ["i2v", "s2v"]:
sandy's avatar
sandy committed
893
894
895
896
897
898
899
900
            return None
        else:
            return Wan2_2_VAE(**vae_config)

    def load_vae(self):
        vae_encoder = self.load_vae_encoder()
        vae_decoder = self.load_vae_decoder()
        return vae_encoder, vae_decoder