falcon.py 3.88 KB
Newer Older
Casper Hansen's avatar
Casper Hansen committed
1
from .base import BaseAWQForCausalLM
2
from transformers.models.falcon.modeling_falcon import FalconDecoderLayer as OldFalconDecoderLayer, FalconForCausalLM, FalconAttention
Casper Hansen's avatar
Casper Hansen committed
3
4
5
6

class FalconAWQForCausalLM(BaseAWQForCausalLM):
    layer_type = "FalconDecoderLayer"

Casper Hansen's avatar
Casper Hansen committed
7
8
9
    @staticmethod
    def fuse_layers(model: FalconForCausalLM, quant_config:dict):
        fuser = FalconFuser(model)
10
11
12
13

        # TODO: Implement correctly fused modules for Falcon 40B and Falcon 180B
        if model.config.num_attention_heads == 71:
            fuser.fuse_transformer()
Casper Hansen's avatar
Casper Hansen committed
14

Casper Hansen's avatar
Casper Hansen committed
15
16
17
18
19
    @staticmethod
    def get_model_layers(model: FalconForCausalLM):
        return model.transformer.h
    
    @staticmethod
20
    def get_act_for_scaling(module: OldFalconDecoderLayer):
Casper Hansen's avatar
Casper Hansen committed
21
22
23
24
25
26
27
28
29
30
31
32
        return dict(
            is_scalable=True,
            scale_name="mlp.act",
            scale_layer=module.mlp.act,
            scale_shape=module.mlp.dense_h_to_4h.out_features
        )
    
    @staticmethod
    def move_embed(model: FalconForCausalLM, device):
        model.transformer.word_embeddings = model.transformer.word_embeddings.to(device)
    
    @staticmethod
33
    def get_layers_for_scaling(module: OldFalconDecoderLayer, input_feat, module_kwargs):
Casper Hansen's avatar
Casper Hansen committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
        layers = []
        
        # Falcon 7B (older architecture)
        if module.config.num_attention_heads == 71:
            # linear 1 + attention
            layers.append(dict(
                prev_op=module.input_layernorm,
                layers=[module.mlp.dense_h_to_4h, module.self_attention.query_key_value],
                inp=input_feat['self_attention.query_key_value'],
                module2inspect=module,
                kwargs=module_kwargs,
            ))

        # Falcon 40B (newer architecture)
        else:
            # linear 1 + attention
            layers.append(dict(
                prev_op=module.ln_attn,
                layers=[module.self_attention.query_key_value],
                inp=input_feat['self_attention.query_key_value'],
                module2inspect=module,
                kwargs=module_kwargs,
            ))

            # linear 2
            layers.append(dict(
                prev_op=module.ln_mlp,
                layers=[module.mlp.dense_h_to_4h],
                inp=input_feat['mlp.dense_h_to_4h'],
                module2inspect=module,
                kwargs=module_kwargs,
            ))

Casper Hansen's avatar
Casper Hansen committed
67
68
        return layers

69
70
from awq.modules.fused.model import FalconModel
from awq.modules.fused.block import FalconDecoderLayer
Casper Hansen's avatar
Casper Hansen committed
71
72

class FalconFuser:
73
    def __init__(self, model: FalconForCausalLM):
Casper Hansen's avatar
Casper Hansen committed
74
        self.model = model
75
76
77
    
    def fuse_transformer(self):
        blocks = []
Casper Hansen's avatar
Casper Hansen committed
78

79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
        module: OldFalconDecoderLayer
        for module in self.model.transformer.h:
            if module.config.num_attention_heads == 71:
                input_layernorm = module.input_layernorm
                ln_attn = None
                ln_mlp = None
                new_decoder_arch = False
            else:
                input_layernorm = None
                ln_attn = module.ln_attn
                ln_mlp = module.ln_mlp
                new_decoder_arch = True
            
            blocks.append(FalconDecoderLayer(
                hidden_size=module.config.hidden_size,
                n_heads=module.config.num_attention_heads,
                qkv_layer=module.self_attention.query_key_value,
                o_proj=module.self_attention.dense,
                mlp=module.mlp,
                dev=next(iter(module.state_dict().values())).device,
                max_seq_len=self.model.config.max_new_tokens,
                input_layernorm=input_layernorm,
                ln_attn=ln_attn,
                ln_mlp=ln_mlp,
                new_decoder_arch=new_decoder_arch
            ))
Casper Hansen's avatar
Casper Hansen committed
105

106
107
108
109
110
111
        self.model.transformer = FalconModel(
            self.model.config.vocab_size,
            blocks,
            self.model.transformer.word_embeddings,
            self.model.transformer.ln_f,
        )