model.py 4.08 KB
Newer Older
Casper Hansen's avatar
Casper Hansen committed
1
2
import torch
import torch.nn as nn
Vik Paruchuri's avatar
Vik Paruchuri committed
3
from typing import List
Casper Hansen's avatar
Casper Hansen committed
4
from transformers.modeling_outputs import BaseModelOutputWithPast
Casper's avatar
Casper committed
5
from awq.modules.fused.block import MPTBlock, FalconDecoderLayer, LlamaLikeBlock
Casper's avatar
Casper committed
6
from awq.utils.fused_utils import prepare_attention_mask, prepare_input_ids, prepare_cache
Casper's avatar
Casper committed
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

class LlamaLikeModel(nn.Module):
    """
    LlamaLikeModel is intended to be reused across models that have
    an architecture that closely resembles Llama, e.g. Mistral and Aquila.
    """
    def __init__(self, vocab_size, blocks, embedding, norm):
        super().__init__()
        self.vocab_size = vocab_size
        self.embedding = embedding
        self.blocks: List[LlamaLikeBlock] = blocks
        self.norm = norm
        self.last_forward_num_tokens = 0
    
    @torch.inference_mode()
    def forward(self, input_ids: torch.Tensor, attn_bias=None, attention_mask=None, is_causal=None, *args, **kwargs):
        input_ids, self.last_forward_num_tokens = prepare_input_ids(
            input_ids,
            self.last_forward_num_tokens
        )
        _bsz, seqlen = input_ids.shape
Casper's avatar
Casper committed
28
29
30

        prepare_cache(self.blocks, seqlen)

Casper's avatar
Casper committed
31
32
33
34
35
36
37
38
39
40
41
42
43
44
        h = self.embedding(input_ids)

        mask = prepare_attention_mask(
            seqlen=seqlen,
            start_pos=self.blocks[0].attn.start_pos,
            device=input_ids.device,
            type_as=h
        )

        for layer in self.blocks:
            h, _, past_key_value = layer(h, None, attention_mask=mask, is_causal=is_causal)
        h = self.norm(h)

        return BaseModelOutputWithPast(last_hidden_state=h, past_key_values=past_key_value, hidden_states=(), attentions=())
Casper Hansen's avatar
Casper Hansen committed
45
46
47
48
49
50

class MPTModel(nn.Module):
    def __init__(self, vocab_size, blocks, wte, norm_f):
        super().__init__()
        self.vocab_size = vocab_size
        self.wte = wte
Vik Paruchuri's avatar
Vik Paruchuri committed
51
        self.blocks: List[MPTBlock] = nn.ModuleList(blocks)
Casper Hansen's avatar
Casper Hansen committed
52
53
54
        self.norm_f = norm_f
        self.attn_uses_sequence_id = False
        self.prefix_lm = False
Casper's avatar
Casper committed
55
        self.last_forward_num_tokens = 0
Casper Hansen's avatar
Casper Hansen committed
56
57
58

    @torch.inference_mode()
    def forward(self, input_ids, attn_bias=None, attention_mask=None, is_causal=None, *args, **kwargs):
Casper's avatar
Casper committed
59
60
61
62
        input_ids, self.last_forward_num_tokens = prepare_input_ids(
            input_ids,
            self.last_forward_num_tokens
        )
Casper Hansen's avatar
Casper Hansen committed
63
        _bsz, seqlen = input_ids.shape
Casper's avatar
Casper committed
64
65
66

        prepare_cache(self.blocks, seqlen)

Casper Hansen's avatar
Casper Hansen committed
67
68
        h = self.wte(input_ids)

Casper's avatar
Casper committed
69
70
71
72
73
74
        mask = prepare_attention_mask(
            seqlen=seqlen,
            start_pos=self.blocks[0].attn.start_pos,
            device=input_ids.device,
            type_as=h
        )
Casper Hansen's avatar
Casper Hansen committed
75
76
77
78
79
80

        for layer in self.blocks:
            h, _, past_key_value = layer(h, None, attention_mask=mask, is_causal=is_causal)
        h = self.norm_f(h)

        return BaseModelOutputWithPast(last_hidden_state=h, past_key_values=past_key_value, hidden_states=(), attentions=())
81
82
83
84
85
86

class FalconModel(nn.Module):
    def __init__(self, vocab_size, blocks, word_embeddings, ln_f):
        super().__init__()
        self.vocab_size = vocab_size
        self.word_embeddings = word_embeddings
Vik Paruchuri's avatar
Vik Paruchuri committed
87
        self.blocks: List[FalconDecoderLayer] = nn.ModuleList(blocks)
88
89
90
        self.ln_f = ln_f
        self.attn_uses_sequence_id = False
        self.prefix_lm = False
Casper's avatar
Casper committed
91
        self.last_forward_num_tokens = 0
92
93
94

    @torch.inference_mode()
    def forward(self, input_ids, attn_bias=None, attention_mask=None, is_causal=None, *args, **kwargs):
Casper's avatar
Casper committed
95
96
97
98
        input_ids, self.last_forward_num_tokens = prepare_input_ids(
            input_ids,
            self.last_forward_num_tokens
        )
99
        _bsz, seqlen = input_ids.shape
Casper's avatar
Casper committed
100
101
102

        prepare_cache(self.blocks, seqlen)

103
104
        h = self.word_embeddings(input_ids)

Casper's avatar
Casper committed
105
106
107
108
109
110
        mask = prepare_attention_mask(
            seqlen=seqlen,
            start_pos=self.blocks[0].attn.start_pos,
            device=input_ids.device,
            type_as=h
        )
111
112
113
114
115
116

        for layer in self.blocks:
            h, _, past_key_value = layer(h, None, attention_mask=mask, is_causal=is_causal)
        h = self.ln_f(h)

        return BaseModelOutputWithPast(last_hidden_state=h, past_key_values=past_key_value, hidden_states=(), attentions=())