exllamav2.py 7.2 KB
Newer Older
Nicolas Patry's avatar
Nicolas Patry committed
1
2
3
4
5
6
7
8
9
10
11
12
13
# Adapted from turboderp exllama: https://github.com/turboderp/exllamav2

from logging import getLogger

import torch
import torch.nn as nn
import math

logger = getLogger(__name__)

try:
    from exllamav2_kernels import make_q_matrix, gemm_half_q_half
except ImportError:
OlivierDehaene's avatar
OlivierDehaene committed
14
    logger.error("exllamav2_kernels not installed.")
Nicolas Patry's avatar
Nicolas Patry committed
15
16
17
18
19
    raise

# Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension
none_tensor = torch.empty((1, 1), device="meta")

OlivierDehaene's avatar
OlivierDehaene committed
20

Nicolas Patry's avatar
Nicolas Patry committed
21
22
23
24
def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda):
    """Matrix multiplication, returns x @ q4"""
    output_shape = x.shape[:-1] + (q4_width,)
    x = x.view(-1, x.shape[-1])
OlivierDehaene's avatar
OlivierDehaene committed
25
    output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device)
Nicolas Patry's avatar
Nicolas Patry committed
26
27
28
    gemm_half_q_half(x, q_handle, output, force_cuda)
    return output.view(output_shape)

OlivierDehaene's avatar
OlivierDehaene committed
29

30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# Group map needed for irregular group sizes


def make_group_map(q_groups, num_qrows):

    gr = q_groups.tolist()
    group_map = []
    num_groups = len(gr) // 2

    for i in range(num_groups):
        bits = gr[i * 2]
        if i < num_groups - 1:
            qrows = gr[i * 2 + 3] - gr[i * 2 + 1]
        else:
            qrows = num_qrows - gr[i * 2 + 1]
        rows = qrows * 32 // bits
        for j in range(rows):
            group_map += [i]
            group_map += [rows - j]

    return torch.tensor(group_map, dtype=torch.short, device=q_groups.device)


# Create Q matrix


Nicolas Patry's avatar
Nicolas Patry committed
56
57
def ext_make_q_matrix(w: dict, temp_dq, key: str = None):
    """
OlivierDehaene's avatar
OlivierDehaene committed
58
    Create Q matrix
Nicolas Patry's avatar
Nicolas Patry committed
59
60
    """
    # EXL2
OlivierDehaene's avatar
OlivierDehaene committed
61
    # won't work as the moment because the tensors are not the same.
Nicolas Patry's avatar
Nicolas Patry committed
62
63
64
65
    if "q_weight" in w:
        w["q_scale_max"] /= 256
        w["q_perm"] = w["q_perm"].short()
        w["q_invperm"] = w["q_invperm"].short()
66
67
68
69

        if "q_group_map" not in w:
            w["q_group_map"] = make_group_map(w["q_groups"], w["q_weight"].shape[0])

OlivierDehaene's avatar
OlivierDehaene committed
70
71
72
73
74
75
76
        return make_q_matrix(
            w["q_weight"],
            w["q_perm"],
            w["q_invperm"],
            w["q_scale"],
            w["q_scale_max"],
            w["q_groups"],
77
            w["q_group_map"],
OlivierDehaene's avatar
OlivierDehaene committed
78
79
80
81
82
            none_tensor,
            none_tensor,
            none_tensor,
            temp_dq,
        )
Nicolas Patry's avatar
Nicolas Patry committed
83
84
85
86
87
88
89
    # GPTQ
    elif "qweight" in w:
        if w["scales"].dtype == torch.float:
            w["scales"] = w["scales"].half()

        # GPTQ with g_idx (act_order)
        if w.get("g_idx", None) is not None and not (w["g_idx"] == 0).all().item():
OlivierDehaene's avatar
OlivierDehaene committed
90
91
92
93
94
            w["q_perm"] = torch.empty(
                (w["qweight"].shape[0] * 8,),
                dtype=torch.short,
                device=w["qweight"].device,
            )
Nicolas Patry's avatar
Nicolas Patry committed
95
96
            w["q_invperm"] = torch.empty_like(w["q_perm"])
            # make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx.
OlivierDehaene's avatar
OlivierDehaene committed
97
98
99
100
101
102
103
            return make_q_matrix(
                w["qweight"],
                w["q_perm"],
                w["q_invperm"],
                none_tensor,
                none_tensor,
                none_tensor,
104
                none_tensor,
OlivierDehaene's avatar
OlivierDehaene committed
105
106
107
108
109
                w["qzeros"],
                w["scales"],
                w["g_idx"].cpu(),
                temp_dq,
            )
Nicolas Patry's avatar
Nicolas Patry committed
110
111
        # GPTQ without g_idx
        else:
OlivierDehaene's avatar
OlivierDehaene committed
112
113
114
115
116
117
118
            return make_q_matrix(
                w["qweight"],
                none_tensor,
                none_tensor,
                none_tensor,
                none_tensor,
                none_tensor,
119
                none_tensor,
OlivierDehaene's avatar
OlivierDehaene committed
120
121
122
123
124
125
                w["qzeros"],
                w["scales"],
                none_tensor,
                temp_dq,
            )

Nicolas Patry's avatar
Nicolas Patry committed
126
127
128
129
130
131
132
133
134
135
136

DEVICE = None
FIXED_BYTES = 0
LAYERS = []


def set_device(device):
    global DEVICE
    DEVICE = device


137
def create_exllama_buffers(max_total_tokens: int):
Nicolas Patry's avatar
Nicolas Patry committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
    global FIXED_BYTES, LAYERS, DEVICE
    temp_dq = ExLlamaV2DeviceTensors(DEVICE, FIXED_BYTES)

    for layer in LAYERS:
        layer.post_init(temp_dq)


class QuantLinear(nn.Module):
    QUANT_TYPE = "exllamav2"

    """Linear layer implementation with per-group 4-bit quantization of the weights"""

    # def __init__(self, bits, group_size, infeatures, outfeatures, bias, trainable=False, **kwargs):
    def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize):
        super().__init__()
        if bits != 4:
            raise ValueError(
OlivierDehaene's avatar
OlivierDehaene committed
155
156
                f"Exllamav2 kernel supports only bits=4, requested bits={bits}. Something is wrong in the model initialization."
            )
Nicolas Patry's avatar
Nicolas Patry committed
157
158
159
        self.q_handle = None
        self.q_tensors = None
        self.bits = bits
OlivierDehaene's avatar
OlivierDehaene committed
160
        self.maxq = 2**self.bits - 1
Nicolas Patry's avatar
Nicolas Patry committed
161
162
        self.infeatures = qweight.shape[0] // self.bits * 32
        self.outfeatures = qweight.shape[1]
OlivierDehaene's avatar
OlivierDehaene committed
163
        self.padding = -self.outfeatures % 32
Nicolas Patry's avatar
Nicolas Patry committed
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
        self.outfeatures = self.outfeatures + self.padding

        self.device = qweight.device
        self.qweight = qweight
        self.qzeros = qzeros
        self.scales = scales
        self.g_idx = g_idx
        self.bias = bias if bias is not None else None
        self.group_size = groupsize

        global FIXED_BYTES, LAYERS
        FIXED_BYTES = max(FIXED_BYTES, self.scratch_space_fixed())
        LAYERS.append(self)

    def post_init(self, temp_dq):
        assert self.qweight.device.type == "cuda"
        assert self.qweight.device.index is not None
        self.q_tensors = {
OlivierDehaene's avatar
OlivierDehaene committed
182
183
184
185
            "qweight": self.qweight,
            "qzeros": self.qzeros,
            "scales": self.scales,
            "g_idx": self.g_idx,
Nicolas Patry's avatar
Nicolas Patry committed
186
187
        }
        temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size())
Nicolas Patry's avatar
Nicolas Patry committed
188
189
190
191

        # We NEED to keep a pointer on Python side, otherwise the garbage collector will mess with us,
        # and `Memory access fault by GPU node-2` will EAT you.
        self.temp_dq = temp_dq
OlivierDehaene's avatar
OlivierDehaene committed
192
193
194
        self.q_handle = ext_make_q_matrix(self.q_tensors, temp_dq)

    def forward(self, x, force_cuda=False):
Nicolas Patry's avatar
Nicolas Patry committed
195
196
197
198
199
        output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda)

        if self.bias is not None:
            output.add_(self.bias)
        return output
OlivierDehaene's avatar
OlivierDehaene committed
200

Nicolas Patry's avatar
Nicolas Patry committed
201
202
    def temp_dq_size(self):
        return self.infeatures * self.outfeatures * 2 + 128
OlivierDehaene's avatar
OlivierDehaene committed
203

Nicolas Patry's avatar
Nicolas Patry committed
204
205
    def temp_fwd_size(self, max_input_len, max_batch_size):
        return self.outfeatures * max_input_len * max_batch_size * 4 + 128
OlivierDehaene's avatar
OlivierDehaene committed
206

Nicolas Patry's avatar
Nicolas Patry committed
207
208
    def scratch_space_fixed(self, max_input_len=4096, max_batch_size=16):
        return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size)
OlivierDehaene's avatar
OlivierDehaene committed
209
210


Nicolas Patry's avatar
Nicolas Patry committed
211
212
213
214
215
216
217
218
219
220
class ExLlamaV2DeviceTensors:

    device_idx: int
    scratch_bytes: int
    scratch_idx: int
    scratch: torch.tensor = None

    def __init__(self, device, scratch_bytes):
        self.device = device
        self.scratch_bytes = scratch_bytes
OlivierDehaene's avatar
OlivierDehaene committed
221

Nicolas Patry's avatar
Nicolas Patry committed
222
    def prepare(self):
OlivierDehaene's avatar
OlivierDehaene committed
223
224
225
        self.scratch = torch.empty(
            (self.scratch_bytes // 2,), dtype=torch.half, device=self.device
        )
Nicolas Patry's avatar
Nicolas Patry committed
226
227
228

    def get_scratch_slice(self, size_bytes):

OlivierDehaene's avatar
OlivierDehaene committed
229
230
        if self.scratch is None:
            self.prepare()
Nicolas Patry's avatar
Nicolas Patry committed
231
232
233
234
235

        size_bytes = ((size_bytes + 127) // 128) * 128
        size_half = size_bytes // 2
        scratch_slice = self.scratch.narrow(0, 0, size_half)
        return scratch_slice