model.py 3.95 KB
Newer Older
Casper Hansen's avatar
Casper Hansen committed
1
2
import torch
import torch.nn as nn
Vik Paruchuri's avatar
Vik Paruchuri committed
3
from typing import List
Casper Hansen's avatar
Casper Hansen committed
4
from transformers.modeling_outputs import BaseModelOutputWithPast
Casper's avatar
Casper committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from awq.utils.fused_utils import prepare_attention_mask, prepare_input_ids
from awq.modules.fused.block import MPTBlock, FalconDecoderLayer, LlamaLikeBlock

class LlamaLikeModel(nn.Module):
    """
    LlamaLikeModel is intended to be reused across models that have
    an architecture that closely resembles Llama, e.g. Mistral and Aquila.
    """
    def __init__(self, vocab_size, blocks, embedding, norm):
        super().__init__()
        self.vocab_size = vocab_size
        self.embedding = embedding
        self.blocks: List[LlamaLikeBlock] = blocks
        self.norm = norm
        self.last_forward_num_tokens = 0
    
    @torch.inference_mode()
    def forward(self, input_ids: torch.Tensor, attn_bias=None, attention_mask=None, is_causal=None, *args, **kwargs):
        input_ids, self.last_forward_num_tokens = prepare_input_ids(
            input_ids,
            self.last_forward_num_tokens
        )
        
        _bsz, seqlen = input_ids.shape
        h = self.embedding(input_ids)

        mask = prepare_attention_mask(
            seqlen=seqlen,
            start_pos=self.blocks[0].attn.start_pos,
            device=input_ids.device,
            type_as=h
        )

        for layer in self.blocks:
            h, _, past_key_value = layer(h, None, attention_mask=mask, is_causal=is_causal)
        h = self.norm(h)

        return BaseModelOutputWithPast(last_hidden_state=h, past_key_values=past_key_value, hidden_states=(), attentions=())
Casper Hansen's avatar
Casper Hansen committed
43
44
45
46
47
48

class MPTModel(nn.Module):
    def __init__(self, vocab_size, blocks, wte, norm_f):
        super().__init__()
        self.vocab_size = vocab_size
        self.wte = wte
Vik Paruchuri's avatar
Vik Paruchuri committed
49
        self.blocks: List[MPTBlock] = nn.ModuleList(blocks)
Casper Hansen's avatar
Casper Hansen committed
50
51
52
        self.norm_f = norm_f
        self.attn_uses_sequence_id = False
        self.prefix_lm = False
Casper's avatar
Casper committed
53
        self.last_forward_num_tokens = 0
Casper Hansen's avatar
Casper Hansen committed
54
55
56

    @torch.inference_mode()
    def forward(self, input_ids, attn_bias=None, attention_mask=None, is_causal=None, *args, **kwargs):
Casper's avatar
Casper committed
57
58
59
60
61
        input_ids, self.last_forward_num_tokens = prepare_input_ids(
            input_ids,
            self.last_forward_num_tokens
        )

Casper Hansen's avatar
Casper Hansen committed
62
63
64
        _bsz, seqlen = input_ids.shape
        h = self.wte(input_ids)

Casper's avatar
Casper committed
65
66
67
68
69
70
        mask = prepare_attention_mask(
            seqlen=seqlen,
            start_pos=self.blocks[0].attn.start_pos,
            device=input_ids.device,
            type_as=h
        )
Casper Hansen's avatar
Casper Hansen committed
71
72
73
74
75
76

        for layer in self.blocks:
            h, _, past_key_value = layer(h, None, attention_mask=mask, is_causal=is_causal)
        h = self.norm_f(h)

        return BaseModelOutputWithPast(last_hidden_state=h, past_key_values=past_key_value, hidden_states=(), attentions=())
77
78
79
80
81
82

class FalconModel(nn.Module):
    def __init__(self, vocab_size, blocks, word_embeddings, ln_f):
        super().__init__()
        self.vocab_size = vocab_size
        self.word_embeddings = word_embeddings
Vik Paruchuri's avatar
Vik Paruchuri committed
83
        self.blocks: List[FalconDecoderLayer] = nn.ModuleList(blocks)
84
85
86
        self.ln_f = ln_f
        self.attn_uses_sequence_id = False
        self.prefix_lm = False
Casper's avatar
Casper committed
87
        self.last_forward_num_tokens = 0
88
89
90

    @torch.inference_mode()
    def forward(self, input_ids, attn_bias=None, attention_mask=None, is_causal=None, *args, **kwargs):
Casper's avatar
Casper committed
91
92
93
94
95
        input_ids, self.last_forward_num_tokens = prepare_input_ids(
            input_ids,
            self.last_forward_num_tokens
        )
        
96
97
98
        _bsz, seqlen = input_ids.shape
        h = self.word_embeddings(input_ids)

Casper's avatar
Casper committed
99
100
101
102
103
104
        mask = prepare_attention_mask(
            seqlen=seqlen,
            start_pos=self.blocks[0].attn.start_pos,
            device=input_ids.device,
            type_as=h
        )
105
106
107
108
109
110

        for layer in self.blocks:
            h, _, past_key_value = layer(h, None, attention_mask=mask, is_causal=is_causal)
        h = self.ln_f(h)

        return BaseModelOutputWithPast(last_hidden_state=h, past_key_values=past_key_value, hidden_states=(), attentions=())