wan_audio_runner.py 34.2 KB
Newer Older
wangshankun's avatar
wangshankun committed
1
import gc
PengGao's avatar
PengGao committed
2
3
4
import os
import subprocess
from dataclasses import dataclass
5
from typing import Dict, List, Optional, Tuple
PengGao's avatar
PengGao committed
6

wangshankun's avatar
wangshankun committed
7
8
import numpy as np
import torch
9
import torch.distributed as dist
gushiqiao's avatar
gushiqiao committed
10
import torchaudio as ta
helloyongyang's avatar
helloyongyang committed
11
import torchvision.transforms.functional as TF
wangshankun's avatar
wangshankun committed
12
from PIL import Image
gushiqiao's avatar
gushiqiao committed
13
from einops import rearrange
PengGao's avatar
PengGao committed
14
from loguru import logger
gushiqiao's avatar
gushiqiao committed
15
16
from torchvision.transforms import InterpolationMode
from torchvision.transforms.functional import resize
17

LiangLiu's avatar
LiangLiu committed
18
19
from lightx2v.deploy.common.va_reader import VAReader
from lightx2v.deploy.common.va_recorder import VARecorder
20
from lightx2v.models.input_encoders.hf.seko_audio.audio_adapter import AudioAdapter
helloyongyang's avatar
helloyongyang committed
21
from lightx2v.models.input_encoders.hf.seko_audio.audio_encoder import SekoAudioEncoderModel
22
from lightx2v.models.networks.wan.audio_model import WanAudioModel
PengGao's avatar
PengGao committed
23
from lightx2v.models.networks.wan.lora_adapter import WanLoraWrapper
24
from lightx2v.models.runners.wan.wan_runner import WanRunner
25
from lightx2v.models.schedulers.wan.audio.scheduler import EulerScheduler
sandy's avatar
sandy committed
26
from lightx2v.models.video_encoders.hf.wan.vae_2_2 import Wan2_2_VAE
27
from lightx2v.utils.envs import *
28
from lightx2v.utils.profiler import *
PengGao's avatar
PengGao committed
29
from lightx2v.utils.registry_factory import RUNNER_REGISTER
sandy's avatar
sandy committed
30
from lightx2v.utils.utils import find_torch_model_path, load_weights, save_to_video, vae_to_comfyui_image
31

wangshankun's avatar
wangshankun committed
32

33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
def get_optimal_patched_size_with_sp(patched_h, patched_w, sp_size):
    assert sp_size > 0 and (sp_size & (sp_size - 1)) == 0, "sp_size must be a power of 2"

    h_ratio, w_ratio = 1, 1
    while sp_size != 1:
        sp_size //= 2
        if patched_h % 2 == 0:
            patched_h //= 2
            h_ratio *= 2
        elif patched_w % 2 == 0:
            patched_w //= 2
            w_ratio *= 2
        else:
            if patched_h > patched_w:
                patched_h //= 2
48
49
                h_ratio *= 2
            else:
50
                patched_w //= 2
51
                w_ratio *= 2
52
    return patched_h * h_ratio, patched_w * w_ratio
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80


def get_crop_bbox(ori_h, ori_w, tgt_h, tgt_w):
    tgt_ar = tgt_h / tgt_w
    ori_ar = ori_h / ori_w
    if abs(ori_ar - tgt_ar) < 0.01:
        return 0, ori_h, 0, ori_w
    if ori_ar > tgt_ar:
        crop_h = int(tgt_ar * ori_w)
        y0 = (ori_h - crop_h) // 2
        y1 = y0 + crop_h
        return y0, y1, 0, ori_w
    else:
        crop_w = int(ori_h / tgt_ar)
        x0 = (ori_w - crop_w) // 2
        x1 = x0 + crop_w
        return 0, ori_h, x0, x1


def isotropic_crop_resize(frames: torch.Tensor, size: tuple):
    """
    frames: (T, C, H, W)
    size: (H, W)
    """
    ori_h, ori_w = frames.shape[2:]
    h, w = size
    y0, y1, x0, x1 = get_crop_bbox(ori_h, ori_w, h, w)
    cropped_frames = frames[:, :, y0:y1, x0:x1]
81
    resized_frames = resize(cropped_frames, [h, w], InterpolationMode.BICUBIC, antialias=True)
82
83
84
    return resized_frames


85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
def fixed_shape_resize(img, target_height, target_width):
    orig_height, orig_width = img.shape[-2:]

    target_ratio = target_height / target_width
    orig_ratio = orig_height / orig_width

    if orig_ratio > target_ratio:
        crop_width = orig_width
        crop_height = int(crop_width * target_ratio)
    else:
        crop_height = orig_height
        crop_width = int(crop_height / target_ratio)

    cropped_img = TF.center_crop(img, [crop_height, crop_width])

    resized_img = TF.resize(cropped_img, [target_height, target_width], antialias=True)

    h, w = resized_img.shape[-2:]
    return resized_img, h, w


106
def resize_image(img, resize_mode="adaptive", bucket_shape=None, fixed_area=None, fixed_shape=None):
107
108
109
110
111
112
    assert resize_mode in ["adaptive", "keep_ratio_fixed_area", "fixed_min_area", "fixed_max_area", "fixed_shape"]

    if resize_mode == "fixed_shape":
        assert fixed_shape is not None
        logger.info(f"[wan_audio] fixed_shape_resize fixed_height: {fixed_shape[0]}, fixed_width: {fixed_shape[1]}")
        return fixed_shape_resize(img, fixed_shape[0], fixed_shape[1])
113

114
115
116
117
118
119
120
121
122
123
124
    if bucket_shape is not None:
        """
        "adaptive_shape": {
            "0.667": [[480, 832], [544, 960], [720, 1280]],
            "1.500": [[832, 480], [960, 544], [1280, 720]],
            "1.000": [[480, 480], [576, 576], [704, 704], [960, 960]]
        }
        """
        bucket_config = {}
        for ratio, resolutions in bucket_shape.items():
            bucket_config[float(ratio)] = np.array(resolutions, dtype=np.int64)
125
        # logger.info(f"[wan_audio] use custom bucket_shape: {bucket_config}")
126
127
128
129
130
131
    else:
        bucket_config = {
            0.667: np.array([[480, 832], [544, 960], [720, 1280]], dtype=np.int64),
            1.500: np.array([[832, 480], [960, 544], [1280, 720]], dtype=np.int64),
            1.000: np.array([[480, 480], [576, 576], [704, 704], [960, 960]], dtype=np.int64),
        }
132
        # logger.info(f"[wan_audio] use default bucket_shape: {bucket_config}")
133

134
135
136
    ori_height = img.shape[-2]
    ori_weight = img.shape[-1]
    ori_ratio = ori_height / ori_weight
137
138
139
140
141
142
143
144
145
146
147

    if resize_mode == "adaptive":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
        if ori_ratio < 1.0:
            target_h, target_w = 480, 832
        elif ori_ratio == 1.0:
            target_h, target_w = 480, 480
        else:
            target_h, target_w = 832, 480
148
        for resolution in bucket_config[closet_ratio]:
149
150
151
152
153
154
155
156
157
158
159
            if ori_height * ori_weight >= resolution[0] * resolution[1]:
                target_h, target_w = resolution
    elif resize_mode == "keep_ratio_fixed_area":
        assert fixed_area in ["480p", "720p"], f"fixed_area must be in ['480p', '720p'], but got {fixed_area}, please set fixed_area in config."
        fixed_area = 480 * 832 if fixed_area == "480p" else 720 * 1280
        target_h = round(np.sqrt(fixed_area * ori_ratio))
        target_w = round(np.sqrt(fixed_area / ori_ratio))
    elif resize_mode == "fixed_min_area":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
160
        target_h, target_w = bucket_config[closet_ratio][0]
161
162
163
164
    elif resize_mode == "fixed_max_area":
        aspect_ratios = np.array(np.array(list(bucket_config.keys())))
        closet_aspect_idx = np.argmin(np.abs(aspect_ratios - ori_ratio))
        closet_ratio = aspect_ratios[closet_aspect_idx]
165
        target_h, target_w = bucket_config[closet_ratio][-1]
166

167
168
169
170
    cropped_img = isotropic_crop_resize(img, (target_h, target_w))
    return cropped_img, target_h, target_w


171
172
173
174
175
176
177
178
179
@dataclass
class AudioSegment:
    """Data class for audio segment information"""

    audio_array: np.ndarray
    start_frame: int
    end_frame: int


180
class FramePreprocessorTorchVersion:
181
182
183
184
185
186
187
    """Handles frame preprocessing including noise and masking"""

    def __init__(self, noise_mean: float = -3.0, noise_std: float = 0.5, mask_rate: float = 0.1):
        self.noise_mean = noise_mean
        self.noise_std = noise_std
        self.mask_rate = mask_rate

188
    def add_noise(self, frames: torch.Tensor, generator: Optional[torch.Generator] = None) -> torch.Tensor:
189
190
        """Add noise to frames"""

191
        device = frames.device
192
193
        shape = frames.shape
        bs = 1 if len(shape) == 4 else shape[0]
194
195
196
197
198
199
200
201
202
203

        # Generate sigma values on the same device
        sigma = torch.normal(mean=self.noise_mean, std=self.noise_std, size=(bs,), device=device, generator=generator)
        sigma = torch.exp(sigma)

        for _ in range(1, len(shape)):
            sigma = sigma.unsqueeze(-1)

        # Generate noise on the same device
        noise = torch.randn(*shape, device=device, generator=generator) * sigma
204
205
        return frames + noise

206
    def add_mask(self, frames: torch.Tensor, generator: Optional[torch.Generator] = None) -> torch.Tensor:
207
208
        """Add mask to frames"""

209
        device = frames.device
210
        h, w = frames.shape[-2:]
211
212
213

        # Generate mask on the same device
        mask = torch.rand(h, w, device=device, generator=generator) > self.mask_rate
214
215
216
217
        return frames * mask

    def process_prev_frames(self, frames: torch.Tensor) -> torch.Tensor:
        """Process previous frames with noise and masking"""
218
219
220
        frames = self.add_noise(frames, torch.Generator(device=frames.device))
        frames = self.add_mask(frames, torch.Generator(device=frames.device))
        return frames
221
222
223
224
225
226
227
228


class AudioProcessor:
    """Handles audio loading and segmentation"""

    def __init__(self, audio_sr: int = 16000, target_fps: int = 16):
        self.audio_sr = audio_sr
        self.target_fps = target_fps
sandy's avatar
sandy committed
229
        self.audio_frame_rate = audio_sr // target_fps
230
231
232
233
234
235
236
237
238

    def load_audio(self, audio_path: str) -> np.ndarray:
        """Load and resample audio"""
        audio_array, ori_sr = ta.load(audio_path)
        audio_array = ta.functional.resample(audio_array.mean(0), orig_freq=ori_sr, new_freq=self.audio_sr)
        return audio_array.numpy()

    def get_audio_range(self, start_frame: int, end_frame: int) -> Tuple[int, int]:
        """Calculate audio range for given frame range"""
sandy's avatar
sandy committed
239
        return round(start_frame * self.audio_frame_rate), round(end_frame * self.audio_frame_rate)
240
241
242
243

    def segment_audio(self, audio_array: np.ndarray, expected_frames: int, max_num_frames: int, prev_frame_length: int = 5) -> List[AudioSegment]:
        """Segment audio based on frame requirements"""
        segments = []
sandy's avatar
sandy committed
244
        segments_idx = self.init_segments_idx(expected_frames, max_num_frames, prev_frame_length)
245

sandy's avatar
sandy committed
246
247
        audio_start, audio_end = self.get_audio_range(0, expected_frames)
        audio_array_ori = audio_array[audio_start:audio_end]
248

sandy's avatar
sandy committed
249
250
251
        for idx, (start_idx, end_idx) in enumerate(segments_idx):
            audio_start, audio_end = self.get_audio_range(start_idx, end_idx)
            audio_array = audio_array_ori[audio_start:audio_end]
252

sandy's avatar
sandy committed
253
254
            if idx < len(segments_idx) - 1:
                end_idx = segments_idx[idx + 1][0]
255
            else:
sandy's avatar
sandy committed
256
257
258
259
                if audio_array.shape[0] < audio_end - audio_start:
                    padding_len = audio_end - audio_start - audio_array.shape[0]
                    audio_array = np.concatenate((audio_array, np.zeros(padding_len)), axis=0)
                    end_idx = end_idx - padding_len // self.audio_frame_rate
260

sandy's avatar
sandy committed
261
262
            segments.append(AudioSegment(audio_array, start_idx, end_idx))
        del audio_array, audio_array_ori
263
264
        return segments

sandy's avatar
sandy committed
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
    def init_segments_idx(self, total_frame: int, clip_frame: int = 81, overlap_frame: int = 5) -> list[tuple[int, int, int]]:
        """Initialize segment indices with overlap"""
        start_end_list = []
        min_frame = clip_frame
        for start in range(0, total_frame, clip_frame - overlap_frame):
            is_last = start + clip_frame >= total_frame
            end = min(start + clip_frame, total_frame)
            if end - start < min_frame:
                end = start + min_frame
            if ((end - start) - 1) % 4 != 0:
                end = start + (((end - start) - 1) // 4) * 4 + 1
            start_end_list.append((start, end))
            if is_last:
                break
        return start_end_list

281

Yang Yong(雍洋)'s avatar
Yang Yong(雍洋) committed
282
@RUNNER_REGISTER("seko_talk")
helloyongyang's avatar
helloyongyang committed
283
284
285
class WanAudioRunner(WanRunner):  # type:ignore
    def __init__(self, config):
        super().__init__(config)
286
        self.prev_frame_length = self.config.get("prev_frame_length", 5)
287
        self.frame_preprocessor = FramePreprocessorTorchVersion()
helloyongyang's avatar
helloyongyang committed
288
289
290

    def init_scheduler(self):
        """Initialize consistency model scheduler"""
291
        scheduler = EulerScheduler(self.config)
292
293
294
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.audio_adapter = self.load_audio_adapter()
            self.model.set_audio_adapter(self.audio_adapter)
295
        scheduler.set_audio_adapter(self.audio_adapter)
helloyongyang's avatar
helloyongyang committed
296
297
298
299
300
301
302
        self.model.set_scheduler(scheduler)

    def read_audio_input(self):
        """Read audio input"""
        audio_sr = self.config.get("audio_sr", 16000)
        target_fps = self.config.get("target_fps", 16)
        self._audio_processor = AudioProcessor(audio_sr, target_fps)
LiangLiu's avatar
LiangLiu committed
303
304
        if not isinstance(self.config["audio_path"], str):
            return [], 0
helloyongyang's avatar
helloyongyang committed
305
306
307
308
309
310
311
312
        audio_array = self._audio_processor.load_audio(self.config["audio_path"])

        video_duration = self.config.get("video_duration", 5)

        audio_len = int(audio_array.shape[0] / audio_sr * target_fps)
        expected_frames = min(max(1, int(video_duration * target_fps)), audio_len)

        # Segment audio
313
        audio_segments = self._audio_processor.segment_audio(audio_array, expected_frames, self.config.get("target_video_length", 81), self.prev_frame_length)
helloyongyang's avatar
helloyongyang committed
314
315
316
317

        return audio_segments, expected_frames

    def read_image_input(self, img_path):
LiangLiu's avatar
LiangLiu committed
318
319
320
321
        if isinstance(img_path, Image.Image):
            ref_img = img_path
        else:
            ref_img = Image.open(img_path).convert("RGB")
helloyongyang's avatar
helloyongyang committed
322
323
        ref_img = TF.to_tensor(ref_img).sub_(0.5).div_(0.5).unsqueeze(0).cuda()

324
325
326
327
328
329
330
        ref_img, h, w = resize_image(
            ref_img,
            resize_mode=self.config.get("resize_mode", "adaptive"),
            bucket_shape=self.config.get("bucket_shape", None),
            fixed_area=self.config.get("fixed_area", None),
            fixed_shape=self.config.get("fixed_shape", None),
        )
331
        logger.info(f"[wan_audio] resize_image target_h: {h}, target_w: {w}")
helloyongyang's avatar
helloyongyang committed
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
        patched_h = h // self.config.vae_stride[1] // self.config.patch_size[1]
        patched_w = w // self.config.vae_stride[2] // self.config.patch_size[2]

        patched_h, patched_w = get_optimal_patched_size_with_sp(patched_h, patched_w, 1)

        self.config.lat_h = patched_h * self.config.patch_size[1]
        self.config.lat_w = patched_w * self.config.patch_size[2]

        self.config.tgt_h = self.config.lat_h * self.config.vae_stride[1]
        self.config.tgt_w = self.config.lat_w * self.config.vae_stride[2]

        logger.info(f"[wan_audio] tgt_h: {self.config.tgt_h}, tgt_w: {self.config.tgt_w}, lat_h: {self.config.lat_h}, lat_w: {self.config.lat_w}")

        ref_img = torch.nn.functional.interpolate(ref_img, size=(self.config.tgt_h, self.config.tgt_w), mode="bicubic")
        return ref_img

    def run_image_encoder(self, first_frame, last_frame=None):
349
350
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.image_encoder = self.load_image_encoder()
helloyongyang's avatar
helloyongyang committed
351
        clip_encoder_out = self.image_encoder.visual([first_frame]).squeeze(0).to(GET_DTYPE()) if self.config.get("use_image_encoder", True) else None
352
353
354
355
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.image_encoder
            torch.cuda.empty_cache()
            gc.collect()
helloyongyang's avatar
helloyongyang committed
356
357
358
        return clip_encoder_out

    def run_vae_encoder(self, img):
359
360
361
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_encoder = self.load_vae_encoder()

helloyongyang's avatar
helloyongyang committed
362
        img = rearrange(img, "1 C H W -> 1 C 1 H W")
363
        vae_encoder_out = self.vae_encoder.encode(img.to(GET_DTYPE()))
sandy's avatar
sandy committed
364

365
366
367
368
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_encoder
            torch.cuda.empty_cache()
            gc.collect()
helloyongyang's avatar
helloyongyang committed
369
370
        return vae_encoder_out

371
    @ProfilingContext4DebugL2("Run Encoders")
helloyongyang's avatar
helloyongyang committed
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
    def _run_input_encoder_local_r2v_audio(self):
        prompt = self.config["prompt_enhanced"] if self.config["use_prompt_enhancer"] else self.config["prompt"]
        img = self.read_image_input(self.config["image_path"])
        clip_encoder_out = self.run_image_encoder(img) if self.config.get("use_image_encoder", True) else None
        vae_encode_out = self.run_vae_encoder(img)
        audio_segments, expected_frames = self.read_audio_input()
        text_encoder_output = self.run_text_encoder(prompt, None)
        torch.cuda.empty_cache()
        gc.collect()
        return {
            "text_encoder_output": text_encoder_output,
            "image_encoder_output": {
                "clip_encoder_out": clip_encoder_out,
                "vae_encoder_out": vae_encode_out,
            },
            "audio_segments": audio_segments,
            "expected_frames": expected_frames,
        }
390
391
392

    def prepare_prev_latents(self, prev_video: Optional[torch.Tensor], prev_frame_length: int) -> Optional[Dict[str, torch.Tensor]]:
        """Prepare previous latents for conditioning"""
wangshankun's avatar
wangshankun committed
393
        device = torch.device("cuda")
394
        dtype = GET_DTYPE()
395
396
397
398

        tgt_h, tgt_w = self.config.tgt_h, self.config.tgt_w
        prev_frames = torch.zeros((1, 3, self.config.target_video_length, tgt_h, tgt_w), device=device)

399
400
401
        if prev_video is not None:
            # Extract and process last frames
            last_frames = prev_video[:, :, -prev_frame_length:].clone().to(device)
sandy's avatar
sandy committed
402
403
            if self.config.model_cls != "wan2.2_audio":
                last_frames = self.frame_preprocessor.process_prev_frames(last_frames)
404
            prev_frames[:, :, :prev_frame_length] = last_frames
sandy's avatar
sandy committed
405
406
407
            prev_len = (prev_frame_length - 1) // 4 + 1
        else:
            prev_len = 0
408

409
410
411
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            self.vae_encoder = self.load_vae_encoder()

412
        _, nframe, height, width = self.model.scheduler.latents.shape
413
        with ProfilingContext4DebugL1("vae_encoder in init run segment"):
414
415
416
417
418
419
            if self.config.model_cls == "wan2.2_audio":
                if prev_video is not None:
                    prev_latents = self.vae_encoder.encode(prev_frames.to(dtype))
                else:
                    prev_latents = None
                prev_mask = self.model.scheduler.mask
420
            else:
421
                prev_latents = self.vae_encoder.encode(prev_frames.to(dtype))
422

423
424
            frames_n = (nframe - 1) * 4 + 1
            prev_mask = torch.ones((1, frames_n, height, width), device=device, dtype=dtype)
425
426
            prev_frame_len = max((prev_len - 1) * 4 + 1, 0)
            prev_mask[:, prev_frame_len:] = 0
427
            prev_mask = self._wan_mask_rearrange(prev_mask)
helloyongyang's avatar
fix ci  
helloyongyang committed
428

sandy's avatar
sandy committed
429
430
431
432
        if prev_latents is not None:
            if prev_latents.shape[-2:] != (height, width):
                logger.warning(f"Size mismatch: prev_latents {prev_latents.shape} vs scheduler latents (H={height}, W={width}). Config tgt_h={self.config.tgt_h}, tgt_w={self.config.tgt_w}")
                prev_latents = torch.nn.functional.interpolate(prev_latents, size=(height, width), mode="bilinear", align_corners=False)
433

434
435
436
437
438
        if self.config.get("lazy_load", False) or self.config.get("unload_modules", False):
            del self.vae_encoder
            torch.cuda.empty_cache()
            gc.collect()

sandy's avatar
sandy committed
439
        return {"prev_latents": prev_latents, "prev_mask": prev_mask, "prev_len": prev_len}
440
441
442
443
444
445
446
447
448
449
450
451
452

    def _wan_mask_rearrange(self, mask: torch.Tensor) -> torch.Tensor:
        """Rearrange mask for WAN model"""
        if mask.ndim == 3:
            mask = mask[None]
        assert mask.ndim == 4
        _, t, h, w = mask.shape
        assert t == ((t - 1) // 4 * 4 + 1)
        mask_first_frame = torch.repeat_interleave(mask[:, 0:1], repeats=4, dim=1)
        mask = torch.concat([mask_first_frame, mask[:, 1:]], dim=1)
        mask = mask.view(mask.shape[1] // 4, 4, h, w)
        return mask.transpose(0, 1)

helloyongyang's avatar
helloyongyang committed
453
454
    def get_video_segment_num(self):
        self.video_segment_num = len(self.inputs["audio_segments"])
wangshankun's avatar
wangshankun committed
455

helloyongyang's avatar
helloyongyang committed
456
457
    def init_run(self):
        super().init_run()
wangshankun's avatar
wangshankun committed
458

helloyongyang's avatar
helloyongyang committed
459
460
461
        self.gen_video_list = []
        self.cut_audio_list = []
        self.prev_video = None
wangshankun's avatar
wangshankun committed
462

463
    @ProfilingContext4DebugL1("Init run segment")
LiangLiu's avatar
LiangLiu committed
464
    def init_run_segment(self, segment_idx, audio_array=None):
helloyongyang's avatar
helloyongyang committed
465
        self.segment_idx = segment_idx
LiangLiu's avatar
LiangLiu committed
466
        if audio_array is not None:
sandy's avatar
sandy committed
467
            self.segment = AudioSegment(audio_array, 0, audio_array.shape[0])
LiangLiu's avatar
LiangLiu committed
468
469
        else:
            self.segment = self.inputs["audio_segments"][segment_idx]
wangshankun's avatar
wangshankun committed
470

helloyongyang's avatar
helloyongyang committed
471
472
        self.config.seed = self.config.seed + segment_idx
        torch.manual_seed(self.config.seed)
473
        # logger.info(f"Processing segment {segment_idx + 1}/{self.video_segment_num}, seed: {self.config.seed}")
wangshankun's avatar
wangshankun committed
474

475
476
477
478
        if (self.config.get("lazy_load", False) or self.config.get("unload_modules", False)) and not hasattr(self, "audio_encoder"):
            self.audio_encoder = self.load_audio_encoder()

        audio_features = self.audio_encoder.infer(self.segment.audio_array)
helloyongyang's avatar
helloyongyang committed
479
        audio_features = self.audio_adapter.forward_audio_proj(audio_features, self.model.scheduler.latents.shape[1])
PengGao's avatar
PengGao committed
480

helloyongyang's avatar
helloyongyang committed
481
        self.inputs["audio_encoder_output"] = audio_features
482
        self.inputs["previmg_encoder_output"] = self.prepare_prev_latents(self.prev_video, prev_frame_length=self.prev_frame_length)
wangshankun's avatar
wangshankun committed
483

helloyongyang's avatar
helloyongyang committed
484
485
        # Reset scheduler for non-first segments
        if segment_idx > 0:
sandy's avatar
sandy committed
486
            self.model.scheduler.reset(self.inputs["previmg_encoder_output"])
wangshankun's avatar
wangshankun committed
487

488
    @ProfilingContext4DebugL1("End run segment")
helloyongyang's avatar
helloyongyang committed
489
490
    def end_run_segment(self):
        self.gen_video = torch.clamp(self.gen_video, -1, 1).to(torch.float)
sandy's avatar
sandy committed
491
492
493
        useful_length = self.segment.end_frame - self.segment.start_frame
        self.gen_video_list.append(self.gen_video[:, :, :useful_length].cpu())
        self.cut_audio_list.append(self.segment.audio_array[: useful_length * self._audio_processor.audio_frame_rate])
helloyongyang's avatar
helloyongyang committed
494

LiangLiu's avatar
LiangLiu committed
495
496
497
498
499
500
501
502
        if self.va_recorder:
            cur_video = vae_to_comfyui_image(self.gen_video_list[-1])
            self.va_recorder.pub_livestream(cur_video, self.cut_audio_list[-1])

        if self.va_reader:
            self.gen_video_list.pop()
            self.cut_audio_list.pop()

helloyongyang's avatar
helloyongyang committed
503
504
505
506
507
508
509
        # Update prev_video for next iteration
        self.prev_video = self.gen_video

        # Clean up GPU memory after each segment
        del self.gen_video
        torch.cuda.empty_cache()

LiangLiu's avatar
LiangLiu committed
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
    def get_rank_and_world_size(self):
        rank = 0
        world_size = 1
        if dist.is_initialized():
            rank = dist.get_rank()
            world_size = dist.get_world_size()
        return rank, world_size

    def init_va_recorder(self):
        output_video_path = self.config.get("save_video_path", None)
        self.va_recorder = None
        if isinstance(output_video_path, dict):
            assert output_video_path["type"] == "stream", f"unexcept save_video_path: {output_video_path}"
            rank, world_size = self.get_rank_and_world_size()
            if rank == 2 % world_size:
                record_fps = self.config.get("target_fps", 16)
                audio_sr = self.config.get("audio_sr", 16000)
                if "video_frame_interpolation" in self.config and self.vfi_model is not None:
                    record_fps = self.config["video_frame_interpolation"]["target_fps"]
                self.va_recorder = VARecorder(
                    livestream_url=output_video_path["data"],
                    fps=record_fps,
                    sample_rate=audio_sr,
                )

    def init_va_reader(self):
        audio_path = self.config.get("audio_path", None)
        self.va_reader = None
        if isinstance(audio_path, dict):
            assert audio_path["type"] == "stream", f"unexcept audio_path: {audio_path}"
            rank, world_size = self.get_rank_and_world_size()
            target_fps = self.config.get("target_fps", 16)
            max_num_frames = self.config.get("target_video_length", 81)
            audio_sr = self.config.get("audio_sr", 16000)
            prev_frames = self.config.get("prev_frame_length", 5)
            self.va_reader = VAReader(
                rank=rank,
                world_size=world_size,
                stream_url=audio_path["data"],
                sample_rate=audio_sr,
                segment_duration=max_num_frames / target_fps,
                prev_duration=prev_frames / target_fps,
                target_rank=1,
            )

    def run_main(self, total_steps=None):
        try:
            self.init_va_recorder()
            self.init_va_reader()
            logger.info(f"init va_recorder: {self.va_recorder} and va_reader: {self.va_reader}")

            if self.va_reader is None:
                return super().run_main(total_steps)

            rank, world_size = self.get_rank_and_world_size()
            if rank == 2 % world_size:
                assert self.va_recorder is not None, "va_recorder is required for stream audio input for rank 0"
            self.va_reader.start()

            self.init_run()
            self.video_segment_num = "unlimited"

            fetch_timeout = self.va_reader.segment_duration + 1
            segment_idx = 0
            fail_count = 0
            max_fail_count = 10

            while True:
578
                with ProfilingContext4DebugL1(f"stream segment get audio segment {segment_idx}"):
LiangLiu's avatar
LiangLiu committed
579
580
581
582
583
584
585
586
587
                    self.check_stop()
                    audio_array = self.va_reader.get_audio_segment(timeout=fetch_timeout)
                    if audio_array is None:
                        fail_count += 1
                        logger.warning(f"Failed to get audio chunk {fail_count} times")
                        if fail_count > max_fail_count:
                            raise Exception(f"Failed to get audio chunk {fail_count} times, stop reader")
                        continue

588
                with ProfilingContext4DebugL1(f"stream segment end2end {segment_idx}"):
LiangLiu's avatar
LiangLiu committed
589
590
                    fail_count = 0
                    self.init_run_segment(segment_idx, audio_array)
helloyongyang's avatar
helloyongyang committed
591
                    latents = self.run_segment(total_steps=None)
LiangLiu's avatar
LiangLiu committed
592
593
594
595
596
597
598
599
600
601
602
603
604
605
                    self.gen_video = self.run_vae_decoder(latents)
                    self.end_run_segment()
                    segment_idx += 1

        finally:
            if hasattr(self.model, "scheduler"):
                self.end_run()
            if self.va_reader:
                self.va_reader.stop()
                self.va_reader = None
            if self.va_recorder:
                self.va_recorder.stop(wait=False)
                self.va_recorder = None

606
    @ProfilingContext4DebugL1("Process after vae decoder")
helloyongyang's avatar
helloyongyang committed
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
    def process_images_after_vae_decoder(self, save_video=True):
        # Merge results
        gen_lvideo = torch.cat(self.gen_video_list, dim=2).float()
        merge_audio = np.concatenate(self.cut_audio_list, axis=0).astype(np.float32)

        comfyui_images = vae_to_comfyui_image(gen_lvideo)

        # Apply frame interpolation if configured
        if "video_frame_interpolation" in self.config and self.vfi_model is not None:
            target_fps = self.config["video_frame_interpolation"]["target_fps"]
            logger.info(f"Interpolating frames from {self.config.get('fps', 16)} to {target_fps}")
            comfyui_images = self.vfi_model.interpolate_frames(
                comfyui_images,
                source_fps=self.config.get("fps", 16),
                target_fps=target_fps,
            )
623

LiangLiu's avatar
LiangLiu committed
624
        if save_video and isinstance(self.config["save_video_path"], str):
helloyongyang's avatar
helloyongyang committed
625
626
627
628
            if "video_frame_interpolation" in self.config and self.config["video_frame_interpolation"].get("target_fps"):
                fps = self.config["video_frame_interpolation"]["target_fps"]
            else:
                fps = self.config.get("fps", 16)
629

helloyongyang's avatar
helloyongyang committed
630
631
            if not dist.is_initialized() or dist.get_rank() == 0:
                logger.info(f"🎬 Start to save video 🎬")
632

helloyongyang's avatar
helloyongyang committed
633
634
                self._save_video_with_audio(comfyui_images, merge_audio, fps)
                logger.info(f"✅ Video saved successfully to: {self.config.save_video_path} ✅")
635

helloyongyang's avatar
helloyongyang committed
636
637
638
        # Convert audio to ComfyUI format
        audio_waveform = torch.from_numpy(merge_audio).unsqueeze(0).unsqueeze(0)
        comfyui_audio = {"waveform": audio_waveform, "sample_rate": self._audio_processor.audio_sr}
639

helloyongyang's avatar
helloyongyang committed
640
        return {"video": comfyui_images, "audio": comfyui_audio}
641

helloyongyang's avatar
helloyongyang committed
642
643
644
    def init_modules(self):
        super().init_modules()
        self.run_input_encoder = self._run_input_encoder_local_r2v_audio
645
646
647
648
649
650
651
652
653
654
655
656
657

    def _save_video_with_audio(self, images, audio_array, fps):
        """Save video with audio"""
        import tempfile

        with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as video_tmp:
            video_path = video_tmp.name

        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as audio_tmp:
            audio_path = audio_tmp.name

        try:
            save_to_video(images, video_path, fps)
658
            ta.save(audio_path, torch.tensor(audio_array[None]), sample_rate=self._audio_processor.audio_sr)  # type: ignore
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674

            output_path = self.config.get("save_video_path")
            parent_dir = os.path.dirname(output_path)
            if parent_dir and not os.path.exists(parent_dir):
                os.makedirs(parent_dir, exist_ok=True)

            subprocess.call(["/usr/bin/ffmpeg", "-y", "-i", video_path, "-i", audio_path, output_path])

            logger.info(f"Saved video with audio to: {output_path}")

        finally:
            # Clean up temp files
            if os.path.exists(video_path):
                os.remove(video_path)
            if os.path.exists(audio_path):
                os.remove(audio_path)
wangshankun's avatar
wangshankun committed
675
676

    def load_transformer(self):
677
        """Load transformer with LoRA support"""
wangshankun's avatar
wangshankun committed
678
        base_model = WanAudioModel(self.config.model_path, self.config, self.init_device)
679
        if self.config.get("lora_configs") and self.config.lora_configs:
wangshankun's avatar
wangshankun committed
680
681
            assert not self.config.get("dit_quantized", False) or self.config.mm_config.get("weight_auto_quant", False)
            lora_wrapper = WanLoraWrapper(base_model)
682
683
684
685
686
687
            for lora_config in self.config.lora_configs:
                lora_path = lora_config["path"]
                strength = lora_config.get("strength", 1.0)
                lora_name = lora_wrapper.load_lora(lora_path)
                lora_wrapper.apply_lora(lora_name, strength)
                logger.info(f"Loaded LoRA: {lora_name} with strength: {strength}")
wangshankun's avatar
wangshankun committed
688

wangshankun's avatar
wangshankun committed
689
690
        return base_model

helloyongyang's avatar
helloyongyang committed
691
    def load_audio_encoder(self):
692
        audio_encoder_path = os.path.join(self.config["model_path"], "TencentGameMate-chinese-hubert-large")
693
694
        audio_encoder_offload = self.config.get("audio_encoder_cpu_offload", self.config.get("cpu_offload", False))
        model = SekoAudioEncoderModel(audio_encoder_path, self.config["audio_sr"], audio_encoder_offload)
helloyongyang's avatar
helloyongyang committed
695
        return model
696

helloyongyang's avatar
helloyongyang committed
697
    def load_audio_adapter(self):
698
699
700
701
702
        audio_adapter_offload = self.config.get("audio_adapter_cpu_offload", self.config.get("cpu_offload", False))
        if audio_adapter_offload:
            device = torch.device("cpu")
        else:
            device = torch.device("cuda")
helloyongyang's avatar
helloyongyang committed
703
        audio_adapter = AudioAdapter(
sandy's avatar
sandy committed
704
            attention_head_dim=self.config["dim"] // self.config["num_heads"],
helloyongyang's avatar
helloyongyang committed
705
706
707
708
709
710
711
712
713
            num_attention_heads=self.config["num_heads"],
            base_num_layers=self.config["num_layers"],
            interval=1,
            audio_feature_dim=1024,
            time_freq_dim=256,
            projection_transformer_layers=4,
            mlp_dims=(1024, 1024, 32 * 1024),
            quantized=self.config.get("adapter_quantized", False),
            quant_scheme=self.config.get("adapter_quant_scheme", None),
714
            cpu_offload=audio_adapter_offload,
helloyongyang's avatar
helloyongyang committed
715
        )
716
        audio_adapter.to(device)
helloyongyang's avatar
helloyongyang committed
717
        if self.config.get("adapter_quantized", False):
718
            if self.config.get("adapter_quant_scheme", None) in ["fp8", "fp8-q8f"]:
719
                model_name = "audio_adapter_model_fp8.safetensors"
helloyongyang's avatar
helloyongyang committed
720
            elif self.config.get("adapter_quant_scheme", None) == "int8":
721
                model_name = "audio_adapter_model_int8.safetensors"
helloyongyang's avatar
helloyongyang committed
722
723
            else:
                raise ValueError(f"Unsupported quant_scheme: {self.config.get('adapter_quant_scheme', None)}")
wangshankun's avatar
wangshankun committed
724
        else:
725
            model_name = "audio_adapter_model.safetensors"
726
727
728

        weights_dict = load_weights(os.path.join(self.config["model_path"], model_name), cpu_offload=audio_adapter_offload)
        audio_adapter.load_state_dict(weights_dict, strict=False)
helloyongyang's avatar
helloyongyang committed
729
        return audio_adapter.to(dtype=GET_DTYPE())
wangshankun's avatar
wangshankun committed
730

helloyongyang's avatar
helloyongyang committed
731
732
    def load_model(self):
        super().load_model()
733
734
735
736
        with ProfilingContext4DebugL2("Load audio encoder and adapter"):
            self.audio_encoder = self.load_audio_encoder()
            self.audio_adapter = self.load_audio_adapter()
            self.model.set_audio_adapter(self.audio_adapter)
wangshankun's avatar
wangshankun committed
737
738

    def set_target_shape(self):
739
        """Set target shape for generation"""
wangshankun's avatar
wangshankun committed
740
741
        ret = {}
        num_channels_latents = 16
wangshankun's avatar
wangshankun committed
742
743
        if self.config.model_cls == "wan2.2_audio":
            num_channels_latents = self.config.num_channels_latents
744

wangshankun's avatar
wangshankun committed
745
746
747
748
749
750
751
752
753
754
755
        if self.config.task == "i2v":
            self.config.target_shape = (
                num_channels_latents,
                (self.config.target_video_length - 1) // self.config.vae_stride[0] + 1,
                self.config.lat_h,
                self.config.lat_w,
            )
            ret["lat_h"] = self.config.lat_h
            ret["lat_w"] = self.config.lat_w
        else:
            error_msg = "t2v task is not supported in WanAudioRunner"
756
            assert False, error_msg
wangshankun's avatar
wangshankun committed
757
758
759

        ret["target_shape"] = self.config.target_shape
        return ret
sandy's avatar
sandy committed
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804


@RUNNER_REGISTER("wan2.2_audio")
class Wan22AudioRunner(WanAudioRunner):
    def __init__(self, config):
        super().__init__(config)

    def load_vae_decoder(self):
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
            vae_device = torch.device("cuda")
        vae_config = {
            "vae_pth": find_torch_model_path(self.config, "vae_pth", "Wan2.2_VAE.pth"),
            "device": vae_device,
            "cpu_offload": vae_offload,
            "offload_cache": self.config.get("vae_offload_cache", False),
        }
        vae_decoder = Wan2_2_VAE(**vae_config)
        return vae_decoder

    def load_vae_encoder(self):
        # offload config
        vae_offload = self.config.get("vae_cpu_offload", self.config.get("cpu_offload"))
        if vae_offload:
            vae_device = torch.device("cpu")
        else:
            vae_device = torch.device("cuda")
        vae_config = {
            "vae_pth": find_torch_model_path(self.config, "vae_pth", "Wan2.2_VAE.pth"),
            "device": vae_device,
            "cpu_offload": vae_offload,
            "offload_cache": self.config.get("vae_offload_cache", False),
        }
        if self.config.task != "i2v":
            return None
        else:
            return Wan2_2_VAE(**vae_config)

    def load_vae(self):
        vae_encoder = self.load_vae_encoder()
        vae_decoder = self.load_vae_decoder()
        return vae_encoder, vae_decoder