pre_infer.py 5.51 KB
Newer Older
helloyongyang's avatar
helloyongyang committed
1
import torch
GoatWu's avatar
GoatWu committed
2
from diffusers.models.embeddings import TimestepEmbedding
3
from .utils import rope_params, sinusoidal_embedding_1d, guidance_scale_embedding
gushiqiao's avatar
gushiqiao committed
4
from lightx2v.utils.envs import *
helloyongyang's avatar
helloyongyang committed
5
6
7
8


class WanPreInfer:
    def __init__(self, config):
Dongz's avatar
Dongz committed
9
        assert (config["dim"] % config["num_heads"]) == 0 and (config["dim"] // config["num_heads"]) % 2 == 0
10
        self.config = config
helloyongyang's avatar
helloyongyang committed
11
        d = config["dim"] // config["num_heads"]
gushiqiao's avatar
gushiqiao committed
12
        self.clean_cuda_cache = config.get("clean_cuda_cache", False)
Dongz's avatar
Dongz committed
13
        self.task = config["task"]
helloyongyang's avatar
helloyongyang committed
14
15
16
17
18
19
20
21
22
23
24
        self.freqs = torch.cat(
            [
                rope_params(1024, d - 4 * (d // 6)),
                rope_params(1024, 2 * (d // 6)),
                rope_params(1024, 2 * (d // 6)),
            ],
            dim=1,
        ).cuda()
        self.freq_dim = config["freq_dim"]
        self.dim = config["dim"]
        self.text_len = config["text_len"]
25
26
        self.enable_dynamic_cfg = config.get("enable_dynamic_cfg", False)
        self.cfg_scale = config.get("cfg_scale", 4.0)
helloyongyang's avatar
helloyongyang committed
27

28
29
30
    def set_scheduler(self, scheduler):
        self.scheduler = scheduler

wangshankun's avatar
wangshankun committed
31
    def infer(self, weights, inputs, positive, kv_start=0, kv_end=0):
32
        x = self.scheduler.latents
33
34
35
36
37
38
39

        if self.scheduler.flag_df:
            t = self.scheduler.df_timesteps[self.scheduler.step_index].unsqueeze(0)
            assert t.dim() == 2  # df推理模型timestep是二维
        else:
            t = torch.stack([self.scheduler.timesteps[self.scheduler.step_index]])

40
41
42
43
44
        if positive:
            context = inputs["text_encoder_output"]["context"]
        else:
            context = inputs["text_encoder_output"]["context_null"]

Dongz's avatar
Dongz committed
45
        if self.task == "i2v":
helloyongyang's avatar
helloyongyang committed
46
            clip_fea = inputs["image_encoder_output"]["clip_encoder_out"]
wangshankun's avatar
wangshankun committed
47

48
49
            if self.config.get("changing_resolution", False):
                image_encoder = inputs["image_encoder_output"]["vae_encode_out"][self.scheduler.changing_resolution_index]
50
51
52
            else:
                image_encoder = inputs["image_encoder_output"]["vae_encode_out"]

wangshankun's avatar
wangshankun committed
53
54
55
56
57
            frame_seq_length = (image_encoder.size(2) // 2) * (image_encoder.size(3) // 2)
            if kv_end - kv_start >= frame_seq_length:  # 如果是CausalVid, image_encoder取片段
                idx_s = kv_start // frame_seq_length
                idx_e = kv_end // frame_seq_length
                image_encoder = image_encoder[:, idx_s:idx_e, :, :]
58
59
            y = image_encoder
            x = torch.cat([x, y], dim=0)
helloyongyang's avatar
helloyongyang committed
60
61

        # embeddings
62
63
64
65
        x = weights.patch_embedding.apply(x.unsqueeze(0))
        grid_sizes = torch.tensor(x.shape[2:], dtype=torch.long).unsqueeze(0)
        x = x.flatten(2).transpose(1, 2).contiguous()
        seq_lens = torch.tensor(x.size(1), dtype=torch.long).cuda().unsqueeze(0)
helloyongyang's avatar
helloyongyang committed
66

67
        embed = sinusoidal_embedding_1d(self.freq_dim, t.flatten())
68
69
        if self.enable_dynamic_cfg:
            s = torch.tensor([self.cfg_scale], dtype=torch.float32).to(x.device)
GoatWu's avatar
GoatWu committed
70
71
72
73
            cfg_embed = guidance_scale_embedding(s, embedding_dim=256, cfg_range=(1.0, 6.0), target_range=1000.0, dtype=torch.float32).type_as(x)
            cfg_embed = weights.cfg_cond_proj_1.apply(cfg_embed)
            cfg_embed = torch.nn.functional.silu(cfg_embed)
            cfg_embed = weights.cfg_cond_proj_2.apply(cfg_embed)
74
            embed = embed + cfg_embed
gushiqiao's avatar
gushiqiao committed
75
76
77
78
        if GET_DTYPE() != "BF16":
            embed = weights.time_embedding_0.apply(embed.float())
        else:
            embed = weights.time_embedding_0.apply(embed)
helloyongyang's avatar
helloyongyang committed
79
        embed = torch.nn.functional.silu(embed)
TorynCurtis's avatar
TorynCurtis committed
80
        embed = weights.time_embedding_2.apply(embed)
helloyongyang's avatar
helloyongyang committed
81
82
        embed0 = torch.nn.functional.silu(embed)

TorynCurtis's avatar
TorynCurtis committed
83
        embed0 = weights.time_projection_1.apply(embed0).unflatten(1, (6, self.dim))
helloyongyang's avatar
helloyongyang committed
84

85
86
87
88
89
90
91
92
93
        if self.scheduler.flag_df:
            b, f = t.shape
            assert b == len(x)  # batch_size == 1
            embed = embed.view(b, f, 1, 1, self.dim)
            embed0 = embed0.view(b, f, 1, 1, 6, self.dim)
            embed = embed.repeat(1, 1, grid_sizes[0][1], grid_sizes[0][2], 1).flatten(1, 3)
            embed0 = embed0.repeat(1, 1, grid_sizes[0][1], grid_sizes[0][2], 1, 1).flatten(1, 3)
            embed0 = embed0.transpose(1, 2).contiguous()

helloyongyang's avatar
helloyongyang committed
94
        # text embeddings
Dongz's avatar
Dongz committed
95
        stacked = torch.stack([torch.cat([u, u.new_zeros(self.text_len - u.size(0), u.size(1))]) for u in context])
gushiqiao's avatar
gushiqiao committed
96
97
98
99
        if GET_DTYPE() != "BF16":
            out = weights.text_embedding_0.apply(stacked.squeeze(0).float())
        else:
            out = weights.text_embedding_0.apply(stacked.squeeze(0))
helloyongyang's avatar
helloyongyang committed
100
        out = torch.nn.functional.gelu(out, approximate="tanh")
TorynCurtis's avatar
TorynCurtis committed
101
        context = weights.text_embedding_2.apply(out)
gushiqiao's avatar
gushiqiao committed
102
103
104
        if self.clean_cuda_cache:
            del out, stacked
            torch.cuda.empty_cache()
helloyongyang's avatar
helloyongyang committed
105

Dongz's avatar
Dongz committed
106
        if self.task == "i2v":
TorynCurtis's avatar
TorynCurtis committed
107
            context_clip = weights.proj_0.apply(clip_fea)
108
109
110
            if self.clean_cuda_cache:
                del clip_fea
                torch.cuda.empty_cache()
TorynCurtis's avatar
TorynCurtis committed
111
            context_clip = weights.proj_1.apply(context_clip)
helloyongyang's avatar
helloyongyang committed
112
            context_clip = torch.nn.functional.gelu(context_clip, approximate="none")
113
114
            if self.clean_cuda_cache:
                torch.cuda.empty_cache()
TorynCurtis's avatar
TorynCurtis committed
115
116
            context_clip = weights.proj_3.apply(context_clip)
            context_clip = weights.proj_4.apply(context_clip)
helloyongyang's avatar
helloyongyang committed
117
            context = torch.concat([context_clip, context], dim=0)
gushiqiao's avatar
gushiqiao committed
118
        if self.clean_cuda_cache:
119
            del context_clip
gushiqiao's avatar
gushiqiao committed
120
            torch.cuda.empty_cache()
helloyongyang's avatar
helloyongyang committed
121
122
123
124
125
        return (
            embed,
            grid_sizes,
            (x.squeeze(0), embed0.squeeze(0), seq_lens, self.freqs, context),
        )