"megatron/legacy/model/bert_model.py" did not exist on "8d7f508a51585ab1f14827be00ee4afd1d5a748f"
pre_infer.py 4.12 KB
Newer Older
helloyongyang's avatar
helloyongyang committed
1
2
3
4
5
6
7
import torch
import math
from .utils import rope_params, sinusoidal_embedding_1d


class WanPreInfer:
    def __init__(self, config):
Dongz's avatar
Dongz committed
8
        assert (config["dim"] % config["num_heads"]) == 0 and (config["dim"] // config["num_heads"]) % 2 == 0
helloyongyang's avatar
helloyongyang committed
9
        d = config["dim"] // config["num_heads"]
Dongz's avatar
Dongz committed
10
11

        self.task = config["task"]
helloyongyang's avatar
helloyongyang committed
12
13
14
15
16
17
18
19
20
21
22
23
        self.freqs = torch.cat(
            [
                rope_params(1024, d - 4 * (d // 6)),
                rope_params(1024, 2 * (d // 6)),
                rope_params(1024, 2 * (d // 6)),
            ],
            dim=1,
        ).cuda()
        self.freq_dim = config["freq_dim"]
        self.dim = config["dim"]
        self.text_len = config["text_len"]

24
25
26
    def set_scheduler(self, scheduler):
        self.scheduler = scheduler

wangshankun's avatar
wangshankun committed
27
    def infer(self, weights, inputs, positive, kv_start=0, kv_end=0):
28
        x = [self.scheduler.latents]
29
30
31
32
33
34
35

        if self.scheduler.flag_df:
            t = self.scheduler.df_timesteps[self.scheduler.step_index].unsqueeze(0)
            assert t.dim() == 2  # df推理模型timestep是二维
        else:
            t = torch.stack([self.scheduler.timesteps[self.scheduler.step_index]])

36
37
38
39
40
41
        if positive:
            context = inputs["text_encoder_output"]["context"]
        else:
            context = inputs["text_encoder_output"]["context_null"]
        seq_len = self.scheduler.seq_len

Dongz's avatar
Dongz committed
42
        if self.task == "i2v":
helloyongyang's avatar
helloyongyang committed
43
            clip_fea = inputs["image_encoder_output"]["clip_encoder_out"]
wangshankun's avatar
wangshankun committed
44
45
46
47
48
49
50
51

            image_encoder = inputs["image_encoder_output"]["vae_encode_out"]
            frame_seq_length = (image_encoder.size(2) // 2) * (image_encoder.size(3) // 2)
            if kv_end - kv_start >= frame_seq_length:  # 如果是CausalVid, image_encoder取片段
                idx_s = kv_start // frame_seq_length
                idx_e = kv_end // frame_seq_length
                image_encoder = image_encoder[:, idx_s:idx_e, :, :]
            y = [image_encoder]
helloyongyang's avatar
helloyongyang committed
52
53
54
            x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]

        # embeddings
TorynCurtis's avatar
TorynCurtis committed
55
        x = [weights.patch_embedding.apply(u.unsqueeze(0)) for u in x]
Dongz's avatar
Dongz committed
56
        grid_sizes = torch.stack([torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
helloyongyang's avatar
helloyongyang committed
57
58
59
        x = [u.flatten(2).transpose(1, 2) for u in x]
        seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long).cuda()
        assert seq_lens.max() <= seq_len
Dongz's avatar
Dongz committed
60
        x = torch.cat([torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))], dim=1) for u in x])
helloyongyang's avatar
helloyongyang committed
61

62
        embed = sinusoidal_embedding_1d(self.freq_dim, t.flatten())
TorynCurtis's avatar
TorynCurtis committed
63
        embed = weights.time_embedding_0.apply(embed)
helloyongyang's avatar
helloyongyang committed
64
        embed = torch.nn.functional.silu(embed)
TorynCurtis's avatar
TorynCurtis committed
65
        embed = weights.time_embedding_2.apply(embed)
helloyongyang's avatar
helloyongyang committed
66
67
        embed0 = torch.nn.functional.silu(embed)

TorynCurtis's avatar
TorynCurtis committed
68
        embed0 = weights.time_projection_1.apply(embed0).unflatten(1, (6, self.dim))
helloyongyang's avatar
helloyongyang committed
69

70
71
72
73
74
75
76
77
78
        if self.scheduler.flag_df:
            b, f = t.shape
            assert b == len(x)  # batch_size == 1
            embed = embed.view(b, f, 1, 1, self.dim)
            embed0 = embed0.view(b, f, 1, 1, 6, self.dim)
            embed = embed.repeat(1, 1, grid_sizes[0][1], grid_sizes[0][2], 1).flatten(1, 3)
            embed0 = embed0.repeat(1, 1, grid_sizes[0][1], grid_sizes[0][2], 1, 1).flatten(1, 3)
            embed0 = embed0.transpose(1, 2).contiguous()

helloyongyang's avatar
helloyongyang committed
79
        # text embeddings
Dongz's avatar
Dongz committed
80
        stacked = torch.stack([torch.cat([u, u.new_zeros(self.text_len - u.size(0), u.size(1))]) for u in context])
TorynCurtis's avatar
TorynCurtis committed
81
        out = weights.text_embedding_0.apply(stacked.squeeze(0))
helloyongyang's avatar
helloyongyang committed
82
        out = torch.nn.functional.gelu(out, approximate="tanh")
TorynCurtis's avatar
TorynCurtis committed
83
        context = weights.text_embedding_2.apply(out)
helloyongyang's avatar
helloyongyang committed
84

Dongz's avatar
Dongz committed
85
        if self.task == "i2v":
TorynCurtis's avatar
TorynCurtis committed
86
87
            context_clip = weights.proj_0.apply(clip_fea)
            context_clip = weights.proj_1.apply(context_clip)
helloyongyang's avatar
helloyongyang committed
88
            context_clip = torch.nn.functional.gelu(context_clip, approximate="none")
TorynCurtis's avatar
TorynCurtis committed
89
90
            context_clip = weights.proj_3.apply(context_clip)
            context_clip = weights.proj_4.apply(context_clip)
Dongz's avatar
Dongz committed
91

helloyongyang's avatar
helloyongyang committed
92
            context = torch.concat([context_clip, context], dim=0)
Dongz's avatar
Dongz committed
93

helloyongyang's avatar
helloyongyang committed
94
95
96
97
98
        return (
            embed,
            grid_sizes,
            (x.squeeze(0), embed0.squeeze(0), seq_lens, self.freqs, context),
        )