wan_audio_runner.py 39.8 KB
Newer Older
wangshankun's avatar
wangshankun committed
1
import gc
2
import io
3
import json
PengGao's avatar
PengGao committed
4
import os
sandy's avatar
sandy committed
5
import warnings
PengGao's avatar
PengGao committed
6
from dataclasses import dataclass
7
from typing import Dict, List, Optional, Tuple, Union
PengGao's avatar
PengGao committed
8

wangshankun's avatar
wangshankun committed
9
10
import numpy as np
import torch
sandy's avatar
sandy committed
11
import torch.nn.functional as F
gushiqiao's avatar
gushiqiao committed
12
import torchaudio as ta
helloyongyang's avatar
helloyongyang committed
13
import torchvision.transforms.functional as TF
14
from PIL import Image, ImageCms, ImageOps
gushiqiao's avatar
gushiqiao committed
15
from einops import rearrange
PengGao's avatar
PengGao committed
16
from loguru import logger
gushiqiao's avatar
gushiqiao committed
17
18
from torchvision.transforms import InterpolationMode
from torchvision.transforms.functional import resize
19

LiangLiu's avatar
LiangLiu committed
20
from lightx2v.deploy.common.va_controller import VAController
21
from lightx2v.models.input_encoders.hf.seko_audio.audio_adapter import AudioAdapter
helloyongyang's avatar
helloyongyang committed
22
from lightx2v.models.input_encoders.hf.seko_audio.audio_encoder import SekoAudioEncoderModel
23
from lightx2v.models.networks.wan.audio_model import WanAudioModel
PengGao's avatar
PengGao committed
24
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
25
from lightx2v.models.runners.wan.wan_runner import WanRunner
26
from lightx2v.models.schedulers.wan.audio.scheduler import EulerScheduler
sandy's avatar
sandy committed
27
from lightx2v.models.video_encoders.hf.wan.vae_2_2 import Wan2_2_VAE
yihuiwen's avatar
yihuiwen committed
28
from lightx2v.server.metrics import monitor_cli
29
from lightx2v.utils.envs import *
30
from lightx2v.utils.profiler import *
PengGao's avatar
PengGao committed
31
from lightx2v.utils.registry_factory import RUNNER_REGISTER
LiangLiu's avatar
LiangLiu committed
32
from lightx2v.utils.utils import find_torch_model_path, load_weights, vae_to_comfyui_image_inplace
33
from lightx2v_platform.base.global_var import AI_DEVICE
34

sandy's avatar
sandy committed
35
36
37
warnings.filterwarnings("ignore", category=UserWarning, module="torchaudio")
warnings.filterwarnings("ignore", category=UserWarning, module="torchvision.io")

wangshankun's avatar
wangshankun committed
38

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def get_optimal_patched_size_with_sp(patched_h, patched_w, sp_size):
    assert sp_size > 0 and (sp_size & (sp_size - 1)) == 0, "sp_size must be a power of 2"

    h_ratio, w_ratio = 1, 1
    while sp_size != 1:
        sp_size //= 2
        if patched_h % 2 == 0:
            patched_h //= 2
            h_ratio *= 2
        elif patched_w % 2 == 0:
            patched_w //= 2
            w_ratio *= 2
        else:
            if patched_h > patched_w:
                patched_h //= 2
54
55
                h_ratio *= 2
            else:
56
                patched_w //= 2
57
                w_ratio *= 2
58
    return patched_h * h_ratio, patched_w * w_ratio
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79


def get_crop_bbox(ori_h, ori_w, tgt_h, tgt_w):
    tgt_ar = tgt_h / tgt_w
    ori_ar = ori_h / ori_w
    if abs(ori_ar - tgt_ar) < 0.01:
        return 0, ori_h, 0, ori_w
    if ori_ar > tgt_ar:
        crop_h = int(tgt_ar * ori_w)
        y0 = (ori_h - crop_h) // 2
        y1 = y0 + crop_h
        return y0, y1, 0, ori_w
    else:
        crop_w = int(ori_h / tgt_ar)
        x0 = (ori_w - crop_w) // 2
        x1 = x0 + crop_w
        return 0, ori_h, x0, x1


def isotropic_crop_resize(frames: torch.Tensor, size: tuple):
    """
80
    frames: (C, H, W) or (T, C, H, W) or (N, C, H, W)
81
82
    size: (H, W)
    """
83
84
85
86
87
88
89
    original_shape = frames.shape

    if len(frames.shape) == 3:
        frames = frames.unsqueeze(0)
    elif len(frames.shape) == 4 and frames.shape[0] > 1:
        pass

90
91
92
93
    ori_h, ori_w = frames.shape[2:]
    h, w = size
    y0, y1, x0, x1 = get_crop_bbox(ori_h, ori_w, h, w)
    cropped_frames = frames[:, :, y0:y1, x0:x1]
94
    resized_frames = resize(cropped_frames, [h, w], InterpolationMode.BICUBIC, antialias=True)
95
96
97
98

    if len(original_shape) == 3:
        resized_frames = resized_frames.squeeze(0)

99
100
101
    return resized_frames


102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
def fixed_shape_resize(img, target_height, target_width):
    orig_height, orig_width = img.shape[-2:]

    target_ratio = target_height / target_width
    orig_ratio = orig_height / orig_width

    if orig_ratio > target_ratio:
        crop_width = orig_width
        crop_height = int(crop_width * target_ratio)
    else:
        crop_height = orig_height
        crop_width = int(crop_height / target_ratio)

    cropped_img = TF.center_crop(img, [crop_height, crop_width])

    resized_img = TF.resize(cropped_img, [target_height, target_width], antialias=True)

    h, w = resized_img.shape[-2:]
    return resized_img, h, w


123
def resize_image(img, resize_mode="adaptive", bucket_shape=None, fixed_area=None, fixed_shape=None):
124
    assert resize_mode in ["adaptive", "keep_ratio_fixed_area", "fixed_min_area", "fixed_max_area", "fixed_shape", "fixed_min_side"]
125
126
127
128
129

    if resize_mode == "fixed_shape":
        assert fixed_shape is not None
        logger.info(f"[wan_audio] fixed_shape_resize fixed_height: {fixed_shape[0]}, fixed_width: {fixed_shape[1]}")
        return fixed_shape_resize(img, fixed_shape[0], fixed_shape[1])
130

131
132
133
134
135
136
137
138
139
140
141
    if bucket_shape is not None:
        """
        "adaptive_shape": {
            "0.667": [[480, 832], [544, 960], [720, 1280]],
            "1.500": [[832, 480], [960, 544], [1280, 720]],
            "1.000": [[480, 480], [576, 576], [704, 704], [960, 960]]
        }
        """
        bucket_config = {}
        for ratio, resolutions in bucket_shape.items():
            bucket_config[float(ratio)] = np.array(resolutions, dtype=np.int64)
142
        # logger.info(f"[wan_audio] use custom bucket_shape: {bucket_config}")
143
144
145
146
147
148
    else:
        bucket_config = {
            0.667: np.array([[480, 832], [544, 960], [720, 1280]], dtype=np.int64),
            1.500: np.array([[832, 480], [960, 544], [1280, 720]], dtype=np.int64),
            1.000: np.array([[480, 480], [576, 576], [704, 704], [960, 960]], dtype=np.int64),
        }
149
        # logger.info(f"[wan_audio] use default bucket_shape: {bucket_config}")
150

151
152
153
    ori_height = img.shape[-2]
    ori_weight = img.shape[-1]
    ori_ratio = ori_height / ori_weight
154
155
156
157
158
159
160
161
162
163
164

    if resize_mode == "adaptive":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
        if ori_ratio < 1.0:
            target_h, target_w = 480, 832
        elif ori_ratio == 1.0:
            target_h, target_w = 480, 480
        else:
            target_h, target_w = 832, 480
165
        for resolution in bucket_config[closet_ratio]:
166
167
168
            if ori_height * ori_weight >= resolution[0] * resolution[1]:
                target_h, target_w = resolution
    elif resize_mode == "keep_ratio_fixed_area":
PengGao's avatar
PengGao committed
169
170
171
172
173
174
175
176
177
        area_in_pixels = 480 * 832
        if fixed_area == "480p":
            area_in_pixels = 480 * 832
        elif fixed_area == "720p":
            area_in_pixels = 720 * 1280
        else:
            area_in_pixels = 480 * 832
        target_h = round(np.sqrt(area_in_pixels * ori_ratio))
        target_w = round(np.sqrt(area_in_pixels / ori_ratio))
178
179
180
181
    elif resize_mode == "fixed_min_area":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
182
        target_h, target_w = bucket_config[closet_ratio][0]
183
    elif resize_mode == "fixed_min_side":
PengGao's avatar
PengGao committed
184
185
186
187
188
189
190
191
        min_side = 720
        if fixed_area == "720p":
            min_side = 720
        elif fixed_area == "480p":
            min_side = 480
        else:
            logger.warning(f"[wan_audio] fixed_area is not '480p' or '720p', using default 480p: {fixed_area}")
            min_side = 480
192
193
194
195
196
197
        if ori_ratio < 1.0:
            target_h = min_side
            target_w = round(target_h / ori_ratio)
        else:
            target_w = min_side
            target_h = round(target_w * ori_ratio)
198
199
200
201
    elif resize_mode == "fixed_max_area":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
202
        target_h, target_w = bucket_config[closet_ratio][-1]
203

204
    cropped_img = isotropic_crop_resize(img, (target_h, target_w))
PengGao's avatar
PengGao committed
205
    logger.info(f"[wan_audio] resize_image: {img.shape} -> {cropped_img.shape}, resize_mode: {resize_mode}, target_h: {target_h}, target_w: {target_w}")
206
207
208
    return cropped_img, target_h, target_w


209
210
211
212
@dataclass
class AudioSegment:
    """Data class for audio segment information"""

sandy's avatar
sandy committed
213
    audio_array: torch.Tensor
214
215
216
217
    start_frame: int
    end_frame: int


218
class FramePreprocessorTorchVersion:
219
220
221
222
223
224
225
    """Handles frame preprocessing including noise and masking"""

    def __init__(self, noise_mean: float = -3.0, noise_std: float = 0.5, mask_rate: float = 0.1):
        self.noise_mean = noise_mean
        self.noise_std = noise_std
        self.mask_rate = mask_rate

226
    def add_noise(self, frames: torch.Tensor, generator: Optional[torch.Generator] = None) -> torch.Tensor:
227
228
        """Add noise to frames"""

229
        device = frames.device
230
231
        shape = frames.shape
        bs = 1 if len(shape) == 4 else shape[0]
232
233
234
235
236
237
238
239
240
241

        # Generate sigma values on the same device
        sigma = torch.normal(mean=self.noise_mean, std=self.noise_std, size=(bs,), device=device, generator=generator)
        sigma = torch.exp(sigma)

        for _ in range(1, len(shape)):
            sigma = sigma.unsqueeze(-1)

        # Generate noise on the same device
        noise = torch.randn(*shape, device=device, generator=generator) * sigma
242
243
        return frames + noise

244
    def add_mask(self, frames: torch.Tensor, generator: Optional[torch.Generator] = None) -> torch.Tensor:
245
246
        """Add mask to frames"""

247
        device = frames.device
248
        h, w = frames.shape[-2:]
249
250
251

        # Generate mask on the same device
        mask = torch.rand(h, w, device=device, generator=generator) > self.mask_rate
252
253
254
255
        return frames * mask

    def process_prev_frames(self, frames: torch.Tensor) -> torch.Tensor:
        """Process previous frames with noise and masking"""
256
257
258
        frames = self.add_noise(frames, torch.Generator(device=frames.device))
        frames = self.add_mask(frames, torch.Generator(device=frames.device))
        return frames
259
260
261
262
263
264
265
266


class AudioProcessor:
    """Handles audio loading and segmentation"""

    def __init__(self, audio_sr: int = 16000, target_fps: int = 16):
        self.audio_sr = audio_sr
        self.target_fps = target_fps
sandy's avatar
sandy committed
267
        self.audio_frame_rate = audio_sr // target_fps
268

sandy's avatar
sandy committed
269
    def load_audio(self, audio_path: str):
270
271
        audio_array, ori_sr = ta.load(audio_path)
        audio_array = ta.functional.resample(audio_array.mean(0), orig_freq=ori_sr, new_freq=self.audio_sr)
sandy's avatar
sandy committed
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
        return audio_array

    def load_multi_person_audio(self, audio_paths: List[str]):
        audio_arrays = []
        max_len = 0

        for audio_path in audio_paths:
            audio_array = self.load_audio(audio_path)
            audio_arrays.append(audio_array)
            max_len = max(max_len, audio_array.numel())

        num_files = len(audio_arrays)
        padded = torch.zeros(num_files, max_len, dtype=torch.float32)

        for i, arr in enumerate(audio_arrays):
            length = arr.numel()
            padded[i, :length] = arr

        return padded
291
292
293

    def get_audio_range(self, start_frame: int, end_frame: int) -> Tuple[int, int]:
        """Calculate audio range for given frame range"""
sandy's avatar
sandy committed
294
        return round(start_frame * self.audio_frame_rate), round(end_frame * self.audio_frame_rate)
295

sandy's avatar
sandy committed
296
297
298
299
300
    def segment_audio(self, audio_array: torch.Tensor, expected_frames: int, max_num_frames: int, prev_frame_length: int = 5) -> List[AudioSegment]:
        """
        Segment audio based on frame requirements
        audio_array is (N, T) tensor
        """
301
        segments = []
sandy's avatar
sandy committed
302
        segments_idx = self.init_segments_idx(expected_frames, max_num_frames, prev_frame_length)
303

sandy's avatar
sandy committed
304
        audio_start, audio_end = self.get_audio_range(0, expected_frames)
sandy's avatar
sandy committed
305
        audio_array_ori = audio_array[:, audio_start:audio_end]
306

sandy's avatar
sandy committed
307
308
        for idx, (start_idx, end_idx) in enumerate(segments_idx):
            audio_start, audio_end = self.get_audio_range(start_idx, end_idx)
sandy's avatar
sandy committed
309
            audio_array = audio_array_ori[:, audio_start:audio_end]
310

sandy's avatar
sandy committed
311
312
            if idx < len(segments_idx) - 1:
                end_idx = segments_idx[idx + 1][0]
sandy's avatar
sandy committed
313
314
315
316
317
            else:  # for last segments
                if audio_array.shape[1] < audio_end - audio_start:
                    padding_len = audio_end - audio_start - audio_array.shape[1]
                    audio_array = F.pad(audio_array, (0, padding_len))
                    # Adjust end_idx to account for the frames added by padding
sandy's avatar
sandy committed
318
                    end_idx = end_idx - padding_len // self.audio_frame_rate
319

sandy's avatar
sandy committed
320
321
            segments.append(AudioSegment(audio_array, start_idx, end_idx))
        del audio_array, audio_array_ori
322
323
        return segments

sandy's avatar
sandy committed
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
    def init_segments_idx(self, total_frame: int, clip_frame: int = 81, overlap_frame: int = 5) -> list[tuple[int, int, int]]:
        """Initialize segment indices with overlap"""
        start_end_list = []
        min_frame = clip_frame
        for start in range(0, total_frame, clip_frame - overlap_frame):
            is_last = start + clip_frame >= total_frame
            end = min(start + clip_frame, total_frame)
            if end - start < min_frame:
                end = start + min_frame
            if ((end - start) - 1) % 4 != 0:
                end = start + (((end - start) - 1) // 4) * 4 + 1
            start_end_list.append((start, end))
            if is_last:
                break
        return start_end_list

340

341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
def load_image(image: Union[str, Image.Image], to_rgb: bool = True) -> Image.Image:
    _image = image
    if isinstance(image, str):
        if os.path.isfile(image):
            _image = Image.open(image)
        else:
            raise ValueError(f"Incorrect path. {image} is not a valid path.")
    # orientation transpose
    _image = ImageOps.exif_transpose(_image)
    # convert color space to sRGB
    icc_profile = _image.info.get("icc_profile")
    if icc_profile:
        srgb_profile = ImageCms.createProfile("sRGB")
        input_profile = ImageCms.ImageCmsProfile(io.BytesIO(icc_profile))
        _image = ImageCms.profileToProfile(_image, input_profile, srgb_profile)
    # convert to "RGB"
    if to_rgb:
        _image = _image.convert("RGB")

    return _image


Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
363
@RUNNER_REGISTER("seko_talk")
helloyongyang's avatar
helloyongyang committed
364
365
366
class WanAudioRunner(WanRunner):  # type:ignore
    def __init__(self, config):
        super().__init__(config)
367
        self.prev_frame_length = self.config.get("prev_frame_length", 5)
368
        self.frame_preprocessor = FramePreprocessorTorchVersion()
helloyongyang's avatar
helloyongyang committed
369
370
371

    def init_scheduler(self):
        """Initialize consistency model scheduler"""
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
372
        self.scheduler = EulerScheduler(self.config)
helloyongyang's avatar
helloyongyang committed
373

374
    def read_audio_input(self, audio_path):
sandy's avatar
sandy committed
375
        """Read audio input - handles both single and multi-person scenarios"""
helloyongyang's avatar
helloyongyang committed
376
377
378
        audio_sr = self.config.get("audio_sr", 16000)
        target_fps = self.config.get("target_fps", 16)
        self._audio_processor = AudioProcessor(audio_sr, target_fps)
sandy's avatar
sandy committed
379

LiangLiu's avatar
LiangLiu committed
380
381
382
        if not isinstance(audio_path, str):
            return [], 0, None, 0

sandy's avatar
sandy committed
383
        # Get audio files from person objects or legacy format
384
        audio_files, mask_files = self.get_audio_files_from_audio_path(audio_path)
helloyongyang's avatar
helloyongyang committed
385

sandy's avatar
sandy committed
386
387
388
389
390
391
392
393
394
        # Load audio based on single or multi-person mode
        if len(audio_files) == 1:
            audio_array = self._audio_processor.load_audio(audio_files[0])
            audio_array = audio_array.unsqueeze(0)  # Add batch dimension for consistency
        else:
            audio_array = self._audio_processor.load_multi_person_audio(audio_files)

        video_duration = self.config.get("video_duration", 5)
        audio_len = int(audio_array.shape[1] / audio_sr * target_fps)
yihuiwen's avatar
yihuiwen committed
395
396
397
        if GET_RECORDER_MODE():
            monitor_cli.lightx2v_input_audio_len.observe(audio_len)

helloyongyang's avatar
helloyongyang committed
398
        expected_frames = min(max(1, int(video_duration * target_fps)), audio_len)
gushiqiao's avatar
gushiqiao committed
399
400
        if expected_frames < int(video_duration * target_fps):
            logger.warning(f"Input video duration is greater than actual audio duration, using audio duration instead: audio_duration={audio_len / target_fps}, video_duration={video_duration}")
helloyongyang's avatar
helloyongyang committed
401
402

        # Segment audio
403
        audio_segments = self._audio_processor.segment_audio(audio_array, expected_frames, self.config.get("target_video_length", 81), self.prev_frame_length)
helloyongyang's avatar
helloyongyang committed
404

405
406
407
408
409
410
        # Mask latent for multi-person s2v
        if mask_files is not None:
            mask_latents = [self.process_single_mask(mask_file) for mask_file in mask_files]
            mask_latents = torch.cat(mask_latents, dim=0)
        else:
            mask_latents = None
sandy's avatar
sandy committed
411

412
        return audio_segments, expected_frames, mask_latents, len(audio_files)
sandy's avatar
sandy committed
413

414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
    def get_audio_files_from_audio_path(self, audio_path):
        if os.path.isdir(audio_path):
            audio_files = []
            mask_files = []
            logger.info(f"audio_path is a directory, loading config.json from {audio_path}")
            audio_config_path = os.path.join(audio_path, "config.json")
            assert os.path.exists(audio_config_path), "config.json not found in audio_path"
            with open(audio_config_path, "r") as f:
                audio_config = json.load(f)
            for talk_object in audio_config["talk_objects"]:
                audio_files.append(os.path.join(audio_path, talk_object["audio"]))
                mask_files.append(os.path.join(audio_path, talk_object["mask"]))
        else:
            logger.info(f"audio_path is a file without mask: {audio_path}")
            audio_files = [audio_path]
            mask_files = None
sandy's avatar
sandy committed
430

431
        return audio_files, mask_files
sandy's avatar
sandy committed
432

433
    def process_single_mask(self, mask_file):
434
        mask_img = load_image(mask_file)
435
        mask_img = TF.to_tensor(mask_img).sub_(0.5).div_(0.5).unsqueeze(0).to(AI_DEVICE)
sandy's avatar
sandy committed
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455

        if mask_img.shape[1] == 3:  # If it is an RGB three-channel image
            mask_img = mask_img[:, :1]  # Only take the first channel

        mask_img, h, w = resize_image(
            mask_img,
            resize_mode=self.config.get("resize_mode", "adaptive"),
            bucket_shape=self.config.get("bucket_shape", None),
            fixed_area=self.config.get("fixed_area", None),
            fixed_shape=self.config.get("fixed_shape", None),
        )

        mask_latent = torch.nn.functional.interpolate(
            mask_img,  # (1, 1, H, W)
            size=(h // 16, w // 16),
            mode="bicubic",
        )

        mask_latent = (mask_latent > 0).to(torch.int8)
        return mask_latent
helloyongyang's avatar
helloyongyang committed
456
457

    def read_image_input(self, img_path):
LiangLiu's avatar
LiangLiu committed
458
459
460
        if isinstance(img_path, Image.Image):
            ref_img = img_path
        else:
461
            ref_img = load_image(img_path)
462
        ref_img = TF.to_tensor(ref_img).sub_(0.5).div_(0.5).unsqueeze(0).to(AI_DEVICE)
helloyongyang's avatar
helloyongyang committed
463

464
465
466
467
468
469
470
        ref_img, h, w = resize_image(
            ref_img,
            resize_mode=self.config.get("resize_mode", "adaptive"),
            bucket_shape=self.config.get("bucket_shape", None),
            fixed_area=self.config.get("fixed_area", None),
            fixed_shape=self.config.get("fixed_shape", None),
        )
471
        logger.info(f"[wan_audio] resize_image target_h: {h}, target_w: {w}")
472
473
        patched_h = h // self.config["vae_stride"][1] // self.config["patch_size"][1]
        patched_w = w // self.config["vae_stride"][2] // self.config["patch_size"][2]
helloyongyang's avatar
helloyongyang committed
474
475
476

        patched_h, patched_w = get_optimal_patched_size_with_sp(patched_h, patched_w, 1)

477
478
        latent_h = patched_h * self.config["patch_size"][1]
        latent_w = patched_w * self.config["patch_size"][2]
helloyongyang's avatar
helloyongyang committed
479

480
481
        latent_shape = self.get_latent_shape_with_lat_hw(latent_h, latent_w)
        target_shape = [latent_h * self.config["vae_stride"][1], latent_w * self.config["vae_stride"][2]]
helloyongyang's avatar
helloyongyang committed
482

483
        logger.info(f"[wan_audio] target_h: {target_shape[0]}, target_w: {target_shape[1]}, latent_h: {latent_h}, latent_w: {latent_w}")
helloyongyang's avatar
helloyongyang committed
484

485
486
        ref_img = torch.nn.functional.interpolate(ref_img, size=(target_shape[0], target_shape[1]), mode="bicubic")
        return ref_img, latent_shape, target_shape
helloyongyang's avatar
helloyongyang committed
487

yihuiwen's avatar
yihuiwen committed
488
489
490
491
492
493
    @ProfilingContext4DebugL1(
        "Run Image Encoder",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_img_encode_duration,
        metrics_labels=["WanAudioRunner"],
    )
helloyongyang's avatar
helloyongyang committed
494
    def run_image_encoder(self, first_frame, last_frame=None):
495
496
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.image_encoder = self.load_image_encoder()
helloyongyang's avatar
helloyongyang committed
497
        clip_encoder_out = self.image_encoder.visual([first_frame]).squeeze(0).to(GET_DTYPE()) if self.config.get("use_image_encoder", True) else None
498
499
500
501
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.image_encoder
            torch.cuda.empty_cache()
            gc.collect()
helloyongyang's avatar
helloyongyang committed
502
503
        return clip_encoder_out

yihuiwen's avatar
yihuiwen committed
504
505
506
    @ProfilingContext4DebugL1(
        "Run VAE Encoder",
        recorder_mode=GET_RECORDER_MODE(),
507
        metrics_func=monitor_cli.lightx2v_run_vae_encoder_image_duration,
yihuiwen's avatar
yihuiwen committed
508
509
        metrics_labels=["WanAudioRunner"],
    )
helloyongyang's avatar
helloyongyang committed
510
    def run_vae_encoder(self, img):
511
512
513
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_encoder = self.load_vae_encoder()

helloyongyang's avatar
helloyongyang committed
514
        img = rearrange(img, "1 C H W -> 1 C 1 H W")
515
        vae_encoder_out = self.vae_encoder.encode(img.to(GET_DTYPE()))
sandy's avatar
sandy committed
516

517
518
519
520
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_encoder
            torch.cuda.empty_cache()
            gc.collect()
helloyongyang's avatar
helloyongyang committed
521
522
        return vae_encoder_out

523
    @ProfilingContext4DebugL2("Run Encoders")
524
525
    def _run_input_encoder_local_s2v(self):
        img, latent_shape, target_shape = self.read_image_input(self.input_info.image_path)
sandy's avatar
sandy committed
526
527
        if self.config.get("f2v_process", False):
            self.ref_img = img
528
529
        self.input_info.latent_shape = latent_shape  # Important: set latent_shape in input_info
        self.input_info.target_shape = target_shape  # Important: set target_shape in input_info
helloyongyang's avatar
helloyongyang committed
530
531
        clip_encoder_out = self.run_image_encoder(img) if self.config.get("use_image_encoder", True) else None
        vae_encode_out = self.run_vae_encoder(img)
sandy's avatar
sandy committed
532

533
534
535
536
        audio_segments, expected_frames, person_mask_latens, audio_num = self.read_audio_input(self.input_info.audio_path)
        self.input_info.audio_num = audio_num
        self.input_info.with_mask = person_mask_latens is not None
        text_encoder_output = self.run_text_encoder(self.input_info)
helloyongyang's avatar
helloyongyang committed
537
538
539
540
541
542
543
544
545
546
        torch.cuda.empty_cache()
        gc.collect()
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": {
                "clip_encoder_out": clip_encoder_out,
                "vae_encoder_out": vae_encode_out,
            },
            "audio_segments": audio_segments,
            "expected_frames": expected_frames,
sandy's avatar
sandy committed
547
            "person_mask_latens": person_mask_latens,
helloyongyang's avatar
helloyongyang committed
548
        }
549
550
551

    def prepare_prev_latents(self, prev_video: Optional[torch.Tensor], prev_frame_length: int) -> Optional[Dict[str, torch.Tensor]]:
        """Prepare previous latents for conditioning"""
552
        dtype = GET_DTYPE()
553

554
        tgt_h, tgt_w = self.input_info.target_shape[0], self.input_info.target_shape[1]
555
        prev_frames = torch.zeros((1, 3, self.config["target_video_length"], tgt_h, tgt_w), device=AI_DEVICE)
556

557
558
        if prev_video is not None:
            # Extract and process last frames
559
            last_frames = prev_video[:, :, -prev_frame_length:].clone().to(AI_DEVICE)
sandy's avatar
sandy committed
560
            if self.config["model_cls"] != "wan2.2_audio" and not self.config.get("f2v_process", False):
sandy's avatar
sandy committed
561
                last_frames = self.frame_preprocessor.process_prev_frames(last_frames)
562
            prev_frames[:, :, :prev_frame_length] = last_frames
sandy's avatar
sandy committed
563
564
565
            prev_len = (prev_frame_length - 1) // 4 + 1
        else:
            prev_len = 0
566

567
568
569
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_encoder = self.load_vae_encoder()

570
        _, nframe, height, width = self.model.scheduler.latents.shape
571
572
573
574
575
576
        with ProfilingContext4DebugL1(
            "vae_encoder in init run segment",
            recorder_mode=GET_RECORDER_MODE(),
            metrics_func=monitor_cli.lightx2v_run_vae_encoder_pre_latent_duration,
            metrics_labels=["WanAudioRunner"],
        ):
577
            if self.config["model_cls"] == "wan2.2_audio":
578
579
580
581
582
                if prev_video is not None:
                    prev_latents = self.vae_encoder.encode(prev_frames.to(dtype))
                else:
                    prev_latents = None
                prev_mask = self.model.scheduler.mask
583
            else:
584
                prev_latents = self.vae_encoder.encode(prev_frames.to(dtype))
585

586
            frames_n = (nframe - 1) * 4 + 1
587
            prev_mask = torch.ones((1, frames_n, height, width), device=AI_DEVICE, dtype=dtype)
588
589
            prev_frame_len = max((prev_len - 1) * 4 + 1, 0)
            prev_mask[:, prev_frame_len:] = 0
590
            prev_mask = self._wan_mask_rearrange(prev_mask)
helloyongyang's avatar
fix ci  
helloyongyang committed
591

sandy's avatar
sandy committed
592
593
        if prev_latents is not None:
            if prev_latents.shape[-2:] != (height, width):
594
                logger.warning(f"Size mismatch: prev_latents {prev_latents.shape} vs scheduler latents (H={height}, W={width}). Config tgt_h={tgt_h}, tgt_w={tgt_w}")
sandy's avatar
sandy committed
595
                prev_latents = torch.nn.functional.interpolate(prev_latents, size=(height, width), mode="bilinear", align_corners=False)
596

597
598
599
600
601
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_encoder
            torch.cuda.empty_cache()
            gc.collect()

sandy's avatar
sandy committed
602
        return {"prev_latents": prev_latents, "prev_mask": prev_mask, "prev_len": prev_len}
603
604
605
606
607
608
609
610
611
612
613

    def _wan_mask_rearrange(self, mask: torch.Tensor) -> torch.Tensor:
        """Rearrange mask for WAN model"""
        if mask.ndim == 3:
            mask = mask[None]
        assert mask.ndim == 4
        _, t, h, w = mask.shape
        assert t == ((t - 1) // 4 * 4 + 1)
        mask_first_frame = torch.repeat_interleave(mask[:, 0:1], repeats=4, dim=1)
        mask = torch.concat([mask_first_frame, mask[:, 1:]], dim=1)
        mask = mask.view(mask.shape[1] // 4, 4, h, w)
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
614
        return mask.transpose(0, 1).contiguous()
615

helloyongyang's avatar
helloyongyang committed
616
617
    def get_video_segment_num(self):
        self.video_segment_num = len(self.inputs["audio_segments"])
wangshankun's avatar
wangshankun committed
618

helloyongyang's avatar
helloyongyang committed
619
620
    def init_run(self):
        super().init_run()
Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
621
        self.scheduler.set_audio_adapter(self.audio_adapter)
sandy's avatar
sandy committed
622
623
624
625
        if self.config.get("f2v_process", False):
            self.prev_video = self.ref_img.unsqueeze(2)
        else:
            self.prev_video = None
626
627
        if self.input_info.return_result_tensor:
            self.gen_video_final = torch.zeros((self.inputs["expected_frames"], self.input_info.target_shape[0], self.input_info.target_shape[1], 3), dtype=torch.float32, device="cpu")
sandy's avatar
sandy committed
628
            self.cut_audio_final = torch.zeros((self.inputs["expected_frames"] * self._audio_processor.audio_frame_rate), dtype=torch.float32, device="cpu")
LiangLiu's avatar
LiangLiu committed
629
630
        else:
            self.gen_video_final = None
sandy's avatar
sandy committed
631
            self.cut_audio_final = None
wangshankun's avatar
wangshankun committed
632

633
634
635
636
637
638
    @ProfilingContext4DebugL1(
        "Init run segment",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_init_run_segment_duration,
        metrics_labels=["WanAudioRunner"],
    )
LiangLiu's avatar
LiangLiu committed
639
    def init_run_segment(self, segment_idx, audio_array=None):
helloyongyang's avatar
helloyongyang committed
640
        self.segment_idx = segment_idx
LiangLiu's avatar
LiangLiu committed
641
        if audio_array is not None:
LiangLiu's avatar
LiangLiu committed
642
643
644
            end_idx = audio_array.shape[0] // self._audio_processor.audio_frame_rate - self.prev_frame_length
            audio_tensor = torch.Tensor(audio_array).float().unsqueeze(0)
            self.segment = AudioSegment(audio_tensor, 0, end_idx)
LiangLiu's avatar
LiangLiu committed
645
646
        else:
            self.segment = self.inputs["audio_segments"][segment_idx]
wangshankun's avatar
wangshankun committed
647

648
649
        self.input_info.seed = self.input_info.seed + segment_idx
        torch.manual_seed(self.input_info.seed)
650
        # logger.info(f"Processing segment {segment_idx + 1}/{self.video_segment_num}, seed: {self.config.seed}")
wangshankun's avatar
wangshankun committed
651

652
653
654
        if (self.config.get("lazy_load", False) or self.config.get("unload_modules", False)) and not hasattr(self, "audio_encoder"):
            self.audio_encoder = self.load_audio_encoder()

sandy's avatar
sandy committed
655
656
657
658
659
660
        features_list = []
        for i in range(self.segment.audio_array.shape[0]):
            feat = self.audio_encoder.infer(self.segment.audio_array[i])
            feat = self.audio_adapter.forward_audio_proj(feat, self.model.scheduler.latents.shape[1])
            features_list.append(feat.squeeze(0))
        audio_features = torch.stack(features_list, dim=0)
PengGao's avatar
PengGao committed
661

helloyongyang's avatar
helloyongyang committed
662
        self.inputs["audio_encoder_output"] = audio_features
663
        self.inputs["previmg_encoder_output"] = self.prepare_prev_latents(self.prev_video, prev_frame_length=self.prev_frame_length)
wangshankun's avatar
wangshankun committed
664

helloyongyang's avatar
helloyongyang committed
665
666
        # Reset scheduler for non-first segments
        if segment_idx > 0:
667
            self.model.scheduler.reset(self.input_info.seed, self.input_info.latent_shape, self.inputs["previmg_encoder_output"])
wangshankun's avatar
wangshankun committed
668

669
670
671
672
673
674
    @ProfilingContext4DebugL1(
        "End run segment",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_end_run_segment_duration,
        metrics_labels=["WanAudioRunner"],
    )
675
    def end_run_segment(self, segment_idx):
helloyongyang's avatar
helloyongyang committed
676
        self.gen_video = torch.clamp(self.gen_video, -1, 1).to(torch.float)
sandy's avatar
sandy committed
677
        useful_length = self.segment.end_frame - self.segment.start_frame
LiangLiu's avatar
LiangLiu committed
678
        video_seg = self.gen_video[:, :, :useful_length].cpu()
sandy's avatar
sandy committed
679
680
        audio_seg = self.segment.audio_array[:, : useful_length * self._audio_processor.audio_frame_rate]
        audio_seg = audio_seg.sum(dim=0)  # Multiple audio tracks, mixed into one track
LiangLiu's avatar
LiangLiu committed
681
682
683
684
685
686
687
688
689
690
691
        video_seg = vae_to_comfyui_image_inplace(video_seg)

        # [Warning] Need check whether video segment interpolation works...
        if "video_frame_interpolation" in self.config and self.vfi_model is not None:
            target_fps = self.config["video_frame_interpolation"]["target_fps"]
            logger.info(f"Interpolating frames from {self.config.get('fps', 16)} to {target_fps}")
            video_seg = self.vfi_model.interpolate_frames(
                video_seg,
                source_fps=self.config.get("fps", 16),
                target_fps=target_fps,
            )
LiangLiu's avatar
LiangLiu committed
692

693
        if "video_super_resolution" in self.config and self.vsr_model is not None:
LiangLiu's avatar
LiangLiu committed
694
            # logger.info(f"Applying video super resolution with scale {self.config['video_super_resolution']['scale']}")
695
696
697
698
699
700
            video_seg = self.vsr_model.super_resolve_frames(
                video_seg,
                seed=self.config["video_super_resolution"]["seed"],
                scale=self.config["video_super_resolution"]["scale"],
            )

LiangLiu's avatar
LiangLiu committed
701
702
        if self.va_controller.recorder is not None:
            self.va_controller.pub_livestream(video_seg, audio_seg, self.gen_video[:, :, :useful_length])
703
        elif self.input_info.return_result_tensor:
LiangLiu's avatar
LiangLiu committed
704
            self.gen_video_final[self.segment.start_frame : self.segment.end_frame].copy_(video_seg)
sandy's avatar
sandy committed
705
            self.cut_audio_final[self.segment.start_frame * self._audio_processor.audio_frame_rate : self.segment.end_frame * self._audio_processor.audio_frame_rate].copy_(audio_seg)
LiangLiu's avatar
LiangLiu committed
706

helloyongyang's avatar
helloyongyang committed
707
708
709
        # Update prev_video for next iteration
        self.prev_video = self.gen_video

LiangLiu's avatar
LiangLiu committed
710
        del video_seg, audio_seg
helloyongyang's avatar
helloyongyang committed
711
712
        torch.cuda.empty_cache()

LiangLiu's avatar
LiangLiu committed
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
    @ProfilingContext4DebugL1(
        "End run segment stream",
        recorder_mode=GET_RECORDER_MODE(),
        metrics_func=monitor_cli.lightx2v_run_end_run_segment_duration,
        metrics_labels=["WanAudioRunner"],
    )
    def end_run_segment_stream(self, latents):
        valid_length = self.segment.end_frame - self.segment.start_frame
        frame_segments = []
        frame_idx = 0

        # frame_segment: 1*C*1*H*W, 1*C*4*H*W, 1*C*4*H*W, ...
        for origin_seg in self.run_vae_decoder_stream(latents):
            origin_seg = torch.clamp(origin_seg, -1, 1).to(torch.float)
            valid_T = min(valid_length - frame_idx, origin_seg.shape[2])

            video_seg = vae_to_comfyui_image_inplace(origin_seg[:, :, :valid_T].cpu())
            audio_start = frame_idx * self._audio_processor.audio_frame_rate
            audio_end = (frame_idx + valid_T) * self._audio_processor.audio_frame_rate
            audio_seg = self.segment.audio_array[:, audio_start:audio_end].sum(dim=0)

            if self.va_controller.recorder is not None:
                self.va_controller.pub_livestream(video_seg, audio_seg, origin_seg[:, :, :valid_T])

            frame_segments.append(origin_seg)
            frame_idx += valid_T
            del video_seg, audio_seg

        # Update prev_video for next iteration
        self.prev_video = torch.cat(frame_segments, dim=2)
        torch.cuda.empty_cache()
LiangLiu's avatar
LiangLiu committed
744

PengGao's avatar
PengGao committed
745
    def run_main(self):
LiangLiu's avatar
LiangLiu committed
746
        try:
LiangLiu's avatar
LiangLiu committed
747
            self.va_controller = None
LiangLiu's avatar
LiangLiu committed
748
749
            self.va_controller = VAController(self)
            logger.info(f"init va_recorder: {self.va_controller.recorder} and va_reader: {self.va_controller.reader}")
LiangLiu's avatar
LiangLiu committed
750

LiangLiu's avatar
LiangLiu committed
751
752
            # fixed audio segments inputs
            if self.va_controller.reader is None:
PengGao's avatar
PengGao committed
753
                return super().run_main()
LiangLiu's avatar
LiangLiu committed
754

LiangLiu's avatar
LiangLiu committed
755
            self.va_controller.start()
LiangLiu's avatar
LiangLiu committed
756
            self.init_run()
LiangLiu's avatar
LiangLiu committed
757
            if self.config.get("compile", False) and hasattr(self.model, "comple"):
758
                self.model.select_graph_for_compile(self.input_info)
LiangLiu's avatar
LiangLiu committed
759
760
            # steam audio input, video segment num is unlimited
            self.video_segment_num = 1000000
LiangLiu's avatar
LiangLiu committed
761
            segment_idx = 0
LiangLiu's avatar
LiangLiu committed
762
763
            fail_count, max_fail_count = 0, 10
            self.va_controller.before_control()
LiangLiu's avatar
LiangLiu committed
764
765

            while True:
766
                with ProfilingContext4DebugL1(f"stream segment get audio segment {segment_idx}"):
LiangLiu's avatar
LiangLiu committed
767
768
769
770
771
772
773
774
                    control = self.va_controller.next_control()
                    if control.action == "immediate":
                        self.prev_video = control.data
                    elif control.action == "wait":
                        time.sleep(0.01)
                        continue

                    audio_array = self.va_controller.reader.get_audio_segment()
LiangLiu's avatar
LiangLiu committed
775
776
777
778
779
780
781
                    if audio_array is None:
                        fail_count += 1
                        logger.warning(f"Failed to get audio chunk {fail_count} times")
                        if fail_count > max_fail_count:
                            raise Exception(f"Failed to get audio chunk {fail_count} times, stop reader")
                        continue

782
                with ProfilingContext4DebugL1(f"stream segment end2end {segment_idx}"):
LiangLiu's avatar
LiangLiu committed
783
784
785
786
787
788
789
                    try:
                        # reset pause signal
                        self.pause_signal = False
                        self.init_run_segment(segment_idx, audio_array)
                        self.check_stop()
                        latents = self.run_segment(segment_idx)
                        self.check_stop()
LiangLiu's avatar
LiangLiu committed
790
791
792
793
794
795
                        if self.config.get("use_stream_vae", False):
                            self.end_run_segment_stream(latents)
                        else:
                            self.gen_video = self.run_vae_decoder(latents)
                            self.check_stop()
                            self.end_run_segment(segment_idx)
LiangLiu's avatar
LiangLiu committed
796
797
798
799
800
801
802
                        segment_idx += 1
                        fail_count = 0
                    except Exception as e:
                        if "pause_signal, pause running" in str(e):
                            logger.warning(f"model infer audio pause: {e}, should continue")
                        else:
                            raise
LiangLiu's avatar
LiangLiu committed
803
        finally:
LiangLiu's avatar
LiangLiu committed
804
            if hasattr(self.model, "inputs"):
LiangLiu's avatar
LiangLiu committed
805
                self.end_run()
LiangLiu's avatar
LiangLiu committed
806
807
808
            if self.va_controller is not None:
                self.va_controller.clear()
                self.va_controller = None
LiangLiu's avatar
LiangLiu committed
809

810
    @ProfilingContext4DebugL1("Process after vae decoder")
811
812
    def process_images_after_vae_decoder(self):
        if self.input_info.return_result_tensor:
sandy's avatar
sandy committed
813
            audio_waveform = self.cut_audio_final.unsqueeze(0).unsqueeze(0)
LiangLiu's avatar
LiangLiu committed
814
815
816
            comfyui_audio = {"waveform": audio_waveform, "sample_rate": self._audio_processor.audio_sr}
            return {"video": self.gen_video_final, "audio": comfyui_audio}
        return {"video": None, "audio": None}
817

wangshankun's avatar
wangshankun committed
818
    def load_transformer(self):
819
        """Load transformer with LoRA support"""
820
821
        base_model = WanAudioModel(self.config["model_path"], self.config, self.init_device)
        if self.config.get("lora_configs") and self.config["lora_configs"]:
822
            assert not self.config.get("dit_quantized", False)
wangshankun's avatar
wangshankun committed
823
            lora_wrapper = WanLoraWrapper(base_model)
824
            for lora_config in self.config["lora_configs"]:
825
826
827
828
829
                lora_path = lora_config["path"]
                strength = lora_config.get("strength", 1.0)
                lora_name = lora_wrapper.load_lora(lora_path)
                lora_wrapper.apply_lora(lora_name, strength)
                logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
wangshankun's avatar
wangshankun committed
830

wangshankun's avatar
wangshankun committed
831
832
        return base_model

helloyongyang's avatar
helloyongyang committed
833
    def load_audio_encoder(self):
gushiqiao's avatar
gushiqiao committed
834
        audio_encoder_path = self.config.get("audio_encoder_path", os.path.join(self.config["model_path"], "TencentGameMate-chinese-hubert-large"))
835
        audio_encoder_offload = self.config.get("audio_encoder_cpu_offload", self.config.get("cpu_offload", False))
836
        model = SekoAudioEncoderModel(audio_encoder_path, self.config["audio_sr"], audio_encoder_offload)
helloyongyang's avatar
helloyongyang committed
837
        return model
838

helloyongyang's avatar
helloyongyang committed
839
    def load_audio_adapter(self):
840
841
842
843
        audio_adapter_offload = self.config.get("audio_adapter_cpu_offload", self.config.get("cpu_offload", False))
        if audio_adapter_offload:
            device = torch.device("cpu")
        else:
844
            device = torch.device(AI_DEVICE)
helloyongyang's avatar
helloyongyang committed
845
        audio_adapter = AudioAdapter(
sandy's avatar
sandy committed
846
            attention_head_dim=self.config["dim"] // self.config["num_heads"],
helloyongyang's avatar
helloyongyang committed
847
848
849
850
851
852
853
854
855
            num_attention_heads=self.config["num_heads"],
            base_num_layers=self.config["num_layers"],
            interval=1,
            audio_feature_dim=1024,
            time_freq_dim=256,
            projection_transformer_layers=4,
            mlp_dims=(1024, 1024, 32 * 1024),
            quantized=self.config.get("adapter_quantized", False),
            quant_scheme=self.config.get("adapter_quant_scheme", None),
856
            cpu_offload=audio_adapter_offload,
helloyongyang's avatar
helloyongyang committed
857
        )
858

859
        audio_adapter.to(device)
860
        load_from_rank0 = self.config.get("load_from_rank0", False)
861
        weights_dict = load_weights(self.config["adapter_model_path"], cpu_offload=audio_adapter_offload, remove_key="ca", load_from_rank0=load_from_rank0)
862
        audio_adapter.load_state_dict(weights_dict, strict=False)
helloyongyang's avatar
helloyongyang committed
863
        return audio_adapter.to(dtype=GET_DTYPE())
wangshankun's avatar
wangshankun committed
864

helloyongyang's avatar
helloyongyang committed
865
866
    def load_model(self):
        super().load_model()
867
868
869
        with ProfilingContext4DebugL2("Load audio encoder and adapter"):
            self.audio_encoder = self.load_audio_encoder()
            self.audio_adapter = self.load_audio_adapter()
wangshankun's avatar
wangshankun committed
870

871
872
873
874
875
876
877
878
    def get_latent_shape_with_lat_hw(self, latent_h, latent_w):
        latent_shape = [
            self.config.get("num_channels_latents", 16),
            (self.config["target_video_length"] - 1) // self.config["vae_stride"][0] + 1,
            latent_h,
            latent_w,
        ]
        return latent_shape
sandy's avatar
sandy committed
879
880
881
882
883
884
885
886
887
888
889
890
891


@RUNNER_REGISTER("wan2.2_audio")
class Wan22AudioRunner(WanAudioRunner):
    def __init__(self, config):
        super().__init__(config)

    def load_vae_decoder(self):
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
892
            vae_device = torch.device(AI_DEVICE)
sandy's avatar
sandy committed
893
        vae_config = {
gushiqiao's avatar
gushiqiao committed
894
            "vae_path": find_torch_model_path(self.config, "vae_path", "Wan2.2_VAE.pth"),
sandy's avatar
sandy committed
895
896
897
898
899
900
901
902
903
904
905
906
907
            "device": vae_device,
            "cpu_offload": vae_offload,
            "offload_cache": self.config.get("vae_offload_cache", False),
        }
        vae_decoder = Wan2_2_VAE(**vae_config)
        return vae_decoder

    def load_vae_encoder(self):
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
908
            vae_device = torch.device(AI_DEVICE)
sandy's avatar
sandy committed
909
        vae_config = {
gushiqiao's avatar
gushiqiao committed
910
            "vae_path": find_torch_model_path(self.config, "vae_path", "Wan2.2_VAE.pth"),
sandy's avatar
sandy committed
911
912
913
914
            "device": vae_device,
            "cpu_offload": vae_offload,
            "offload_cache": self.config.get("vae_offload_cache", False),
        }
915
        if self.config.task not in ["i2v", "s2v"]:
sandy's avatar
sandy committed
916
917
918
919
920
921
922
923
            return None
        else:
            return Wan2_2_VAE(**vae_config)

    def load_vae(self):
        vae_encoder = self.load_vae_encoder()
        vae_decoder = self.load_vae_decoder()
        return vae_encoder, vae_decoder