FluxModel.h 4.6 KB
Newer Older
Zhekai Zhang's avatar
Zhekai Zhang committed
1
2
3
4
5
6
7
#pragma once

#include "common.h"
#include "Tensor.h"
#include "Module.h"
#include "Linear.h"
#include "layernorm.h"
K's avatar
K committed
8
9
10
11
#include <pybind11/functional.h>
namespace pybind11 {
    class function;
}
Zhekai Zhang's avatar
Zhekai Zhang committed
12

13
14
15
16
17
enum class AttentionImpl {
    FlashAttention2 = 0,
    NunchakuFP16,
};

Zhekai Zhang's avatar
Zhekai Zhang committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
class AdaLayerNormZeroSingle : public Module {
public:
    static constexpr bool USE_4BIT = true;
    using GEMM = std::conditional_t<USE_4BIT, GEMV_AWQ, GEMM_W8A8>;

    struct Output {
        Tensor x;
        Tensor gate_msa;
    };

public:
    AdaLayerNormZeroSingle(int dim, Tensor::ScalarType dtype, Device device);
    Output forward(Tensor x, Tensor emb);

public:
    const int dim;

private:
    GEMM linear;
    LayerNorm norm;
};

class AdaLayerNormZero : public Module {
public:
    static constexpr bool USE_4BIT = true;
    using GEMM = std::conditional_t<USE_4BIT, GEMV_AWQ, GEMM_W8A8>;

    struct Output {
        Tensor x;
        Tensor gate_msa;
        Tensor shift_mlp;
        Tensor scale_mlp;
        Tensor gate_mlp;
    };
public:
    AdaLayerNormZero(int dim, bool pre_only, Tensor::ScalarType dtype, Device device);
    Output forward(Tensor x, Tensor emb);

public:
    const int dim;
    const bool pre_only;

private:
    GEMM linear;
    LayerNorm norm;
};

65
class Attention : public Module {
Zhekai Zhang's avatar
Zhekai Zhang committed
66
67
public:
    static constexpr int POOL_SIZE = 128;
Hyunsung Lee's avatar
Hyunsung Lee committed
68

Zhekai Zhang's avatar
Zhekai Zhang committed
69
    Attention(int num_heads, int dim_head, Device device);
70
    Tensor forward(Tensor qkv);
Zhekai Zhang's avatar
Zhekai Zhang committed
71
72
    Tensor forward(Tensor qkv, Tensor pool_qkv, float sparsityRatio);

73
74
    static void setForceFP16(Module *module, bool value);

Zhekai Zhang's avatar
Zhekai Zhang committed
75
76
77
public:
    const int num_heads;
    const int dim_head;
78
    bool force_fp16;
Zhekai Zhang's avatar
Zhekai Zhang committed
79
80
81
82
83
84
85
86
87
88
89

private:
    Tensor cu_seqlens_cpu;
    Tensor headmask_type;
};

class FluxSingleTransformerBlock : public Module {
public:
    static constexpr bool USE_4BIT = true;
    using GEMM = std::conditional_t<USE_4BIT, GEMM_W4A4, GEMM_W8A8>;

90
    FluxSingleTransformerBlock(int dim, int num_attention_heads, int attention_head_dim, int mlp_ratio, bool use_fp4, Tensor::ScalarType dtype, Device device);
Zhekai Zhang's avatar
Zhekai Zhang committed
91
92
93
94
95
96
97
98
    Tensor forward(Tensor hidden_states, Tensor temb, Tensor rotary_emb);

public:
    const int dim;
    const int dim_head;
    const int num_heads;
    const int mlp_hidden_dim;

99
100
    AttentionImpl attnImpl = AttentionImpl::FlashAttention2;

Zhekai Zhang's avatar
Zhekai Zhang committed
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
private:
    AdaLayerNormZeroSingle norm;
    GEMM mlp_fc1;
    GEMM mlp_fc2;
    GEMM qkv_proj;
    RMSNorm norm_q, norm_k;
    Attention attn;
    GEMM out_proj;
};

class JointTransformerBlock : public Module {
public:
    static constexpr bool USE_4BIT = true;
    using GEMM = std::conditional_t<USE_4BIT, GEMM_W4A4, GEMM_W8A8>;

116
    JointTransformerBlock(int dim, int num_attention_heads, int attention_head_dim, bool context_pre_only, bool use_fp4, Tensor::ScalarType dtype, Device device);
Zhekai Zhang's avatar
Zhekai Zhang committed
117
118
119
120
121
122
123
    std::tuple<Tensor, Tensor> forward(Tensor hidden_states, Tensor encoder_hidden_states, Tensor temb, Tensor rotary_emb, Tensor rotary_emb_context, float sparsityRatio);

public:
    const int dim;
    const int dim_head;
    const int num_heads;
    const bool context_pre_only;
124
    AdaLayerNormZero norm1;
Zhekai Zhang's avatar
Zhekai Zhang committed
125

126
127
    AttentionImpl attnImpl = AttentionImpl::FlashAttention2;

Zhekai Zhang's avatar
Zhekai Zhang committed
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
private:
    AdaLayerNormZero norm1_context;
    GEMM qkv_proj;
    GEMM qkv_proj_context;
    RMSNorm norm_q, norm_k;
    RMSNorm norm_added_q, norm_added_k;
    Attention attn;
    GEMM out_proj;
    GEMM out_proj_context;
    LayerNorm norm2;
    LayerNorm norm2_context;
    GEMM mlp_fc1, mlp_fc2;
    GEMM mlp_context_fc1, mlp_context_fc2;
};

class FluxModel : public Module {
public:
muyangli's avatar
muyangli committed
145
    FluxModel(bool use_fp4, bool offload, Tensor::ScalarType dtype, Device device);
Hyunsung Lee's avatar
Hyunsung Lee committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
    Tensor forward(
        Tensor hidden_states,
        Tensor encoder_hidden_states,
        Tensor temb,
        Tensor rotary_emb_img,
        Tensor rotary_emb_context,
        Tensor rotary_emb_single,
        Tensor controlnet_block_samples,
        Tensor controlnet_single_block_samples,
        bool skip_first_layer = false);
    std::tuple<Tensor, Tensor> forward_layer(
        size_t layer,
        Tensor hidden_states,
        Tensor encoder_hidden_states,
        Tensor temb,
        Tensor rotary_emb_img,
        Tensor rotary_emb_context,
        Tensor controlnet_block_samples,
        Tensor controlnet_single_block_samples);
165
166
    void setAttentionImpl(AttentionImpl impl);

K's avatar
K committed
167
    void set_residual_callback(std::function<Tensor(const Tensor&)> cb);
Zhekai Zhang's avatar
Zhekai Zhang committed
168
public:
169
    const Tensor::ScalarType dtype;
Hyunsung Lee's avatar
Hyunsung Lee committed
170

Zhekai Zhang's avatar
Zhekai Zhang committed
171
172
    std::vector<std::unique_ptr<JointTransformerBlock>> transformer_blocks;
    std::vector<std::unique_ptr<FluxSingleTransformerBlock>> single_transformer_blocks;
muyangli's avatar
muyangli committed
173

K's avatar
K committed
174
    std::function<Tensor(const Tensor&)> residual_callback;
muyangli's avatar
muyangli committed
175
176
private:
    bool offload;
Zhekai Zhang's avatar
Zhekai Zhang committed
177
};