block.py 1.11 KB
Newer Older
Casper Hansen's avatar
Casper Hansen committed
1
2
3
import torch.nn as nn
from awq.modules.fused.attn import QuantAttentionFused

4
5
6

class MPTBlock(nn.Module):
    def __init__(self, hidden_size, n_heads, qkv_layer, o_proj, mpt_mlp, norm_1, norm_2, dev, max_seq_len):
Casper Hansen's avatar
Casper Hansen committed
7
8
9
        super().__init__()
        self.n_heads = n_heads
        self.hidden_size = hidden_size
10
11
12
13
        self.norm_1 = norm_1
        self.attn = QuantAttentionFused(hidden_size, self.n_heads, qkv_layer, o_proj, dev=dev, max_seq_len=max_seq_len, use_alibi=True).to(dev)
        self.norm_2 = norm_2
        self.ffn = mpt_mlp.to(dev)
Casper Hansen's avatar
Casper Hansen committed
14
15

    def forward(
16
        self, hidden_states, past_key_value, attn_bias=None, attention_mask=None, is_causal=None
Casper Hansen's avatar
Casper Hansen committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
    ):
        norm_out = self.norm_1(hidden_states)
        attn_output, _, past_key_value = self.attn.forward(
            hidden_states=norm_out,
            past_key_value=past_key_value,
            attention_mask=attention_mask,
            position_ids=None,
            output_attentions=False,
            use_cache=True
        )

        h = hidden_states + attn_output
        out = h + self.ffn.forward(self.norm_2(h))
        return out, None, past_key_value