exllamav2.py 7.59 KB
Newer Older
Nicolas Patry's avatar
Nicolas Patry committed
1
2
# Adapted from turboderp exllama: https://github.com/turboderp/exllamav2

3
4
from dataclasses import dataclass
from typing import Optional
Nicolas Patry's avatar
Nicolas Patry committed
5
6
7
import torch
import torch.nn as nn

fxmarty's avatar
fxmarty committed
8
from loguru import logger
Nicolas Patry's avatar
Nicolas Patry committed
9

10
11
from text_generation_server.layers.exl2 import Exl2Weight
from text_generation_server.layers.gptq import GPTQWeight
12
from text_generation_server.utils.log import log_master
13

Nicolas Patry's avatar
Nicolas Patry committed
14
15
16
try:
    from exllamav2_kernels import make_q_matrix, gemm_half_q_half
except ImportError:
17
    log_master(logger.warning, "exllamav2_kernels not installed.")
Nicolas Patry's avatar
Nicolas Patry committed
18
19
20
21
22
    raise

# Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension
none_tensor = torch.empty((1, 1), device="meta")

OlivierDehaene's avatar
OlivierDehaene committed
23

24
25
26
27
28
29
30
31
32
@dataclass
class _ExtraTensors:
    """Additional generated quantizer tensors."""

    q_group_map: Optional[torch.Tensor] = None
    q_invperm: Optional[torch.Tensor] = None
    q_perm: Optional[torch.Tensor] = None


Nicolas Patry's avatar
Nicolas Patry committed
33
34
35
36
def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda):
    """Matrix multiplication, returns x @ q4"""
    output_shape = x.shape[:-1] + (q4_width,)
    x = x.view(-1, x.shape[-1])
OlivierDehaene's avatar
OlivierDehaene committed
37
    output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device)
Nicolas Patry's avatar
Nicolas Patry committed
38
39
40
    gemm_half_q_half(x, q_handle, output, force_cuda)
    return output.view(output_shape)

OlivierDehaene's avatar
OlivierDehaene committed
41

42
def make_group_map(q_groups: torch.Tensor, num_qrows: int):
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
    gr = q_groups.tolist()
    group_map = []
    num_groups = len(gr) // 2

    for i in range(num_groups):
        bits = gr[i * 2]
        if i < num_groups - 1:
            qrows = gr[i * 2 + 3] - gr[i * 2 + 1]
        else:
            qrows = num_qrows - gr[i * 2 + 1]
        rows = qrows * 32 // bits
        for j in range(rows):
            group_map += [i]
            group_map += [rows - j]

    return torch.tensor(group_map, dtype=torch.short, device=q_groups.device)


# Create Q matrix


64
65
66
67
68
69
def ext_make_q_matrix(
    w: Exl2Weight | GPTQWeight,
    extra: _ExtraTensors,
    temp_dq,
    key: Optional[str] = None,
):
Nicolas Patry's avatar
Nicolas Patry committed
70
    """
OlivierDehaene's avatar
OlivierDehaene committed
71
    Create Q matrix
Nicolas Patry's avatar
Nicolas Patry committed
72
73
    """
    # EXL2
74
75
76
    if isinstance(w, Exl2Weight):
        extra.q_group_map = make_group_map(w.q_groups, w.q_weight.shape[0])
        extra.q_perm = torch.argsort(w.q_invperm).short()
77

OlivierDehaene's avatar
OlivierDehaene committed
78
        return make_q_matrix(
79
80
81
82
83
84
85
            w.q_weight,
            extra.q_perm,
            w.q_invperm,
            w.q_scale,
            w.q_scale_max,
            w.q_groups,
            extra.q_group_map,
OlivierDehaene's avatar
OlivierDehaene committed
86
87
88
89
90
            none_tensor,
            none_tensor,
            none_tensor,
            temp_dq,
        )
Nicolas Patry's avatar
Nicolas Patry committed
91
    # GPTQ
92
93
94
    elif isinstance(w, GPTQWeight):
        if w.scales.dtype == torch.float:
            w.scales = w.scales.half()
Nicolas Patry's avatar
Nicolas Patry committed
95
96

        # GPTQ with g_idx (act_order)
97
98
99
        if w.g_idx is not None and not (w.g_idx == 0).all().item():
            extra.q_perm = torch.empty(
                (w.qweight.shape[0] * 8,),
OlivierDehaene's avatar
OlivierDehaene committed
100
                dtype=torch.short,
101
                device=w.qweight.device,
OlivierDehaene's avatar
OlivierDehaene committed
102
            )
103
            extra.q_invperm = torch.empty_like(extra.q_perm)
Nicolas Patry's avatar
Nicolas Patry committed
104
            # make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx.
OlivierDehaene's avatar
OlivierDehaene committed
105
            return make_q_matrix(
106
107
108
                w.qweight,
                extra.q_perm,
                extra.q_invperm,
OlivierDehaene's avatar
OlivierDehaene committed
109
110
111
                none_tensor,
                none_tensor,
                none_tensor,
112
                none_tensor,
113
114
115
                w.qzeros,
                w.scales,
                w.g_idx.cpu(),
OlivierDehaene's avatar
OlivierDehaene committed
116
117
                temp_dq,
            )
Nicolas Patry's avatar
Nicolas Patry committed
118
119
        # GPTQ without g_idx
        else:
OlivierDehaene's avatar
OlivierDehaene committed
120
            return make_q_matrix(
121
                w.qweight,
OlivierDehaene's avatar
OlivierDehaene committed
122
123
124
125
126
                none_tensor,
                none_tensor,
                none_tensor,
                none_tensor,
                none_tensor,
127
                none_tensor,
128
129
                w.qzeros,
                w.scales,
OlivierDehaene's avatar
OlivierDehaene committed
130
131
132
                none_tensor,
                temp_dq,
            )
Nicolas Patry's avatar
Nicolas Patry committed
133
134
    else:
        RuntimeError("Cannot create handle")
OlivierDehaene's avatar
OlivierDehaene committed
135

Nicolas Patry's avatar
Nicolas Patry committed
136
137
138
139
140
141
142
143
144
145

DEVICE = None
LAYERS = []


def set_device(device):
    global DEVICE
    DEVICE = device


146
def create_exllama_buffers(max_total_tokens: int):
147
148
    global LAYERS, DEVICE

149
150
151
152
153
    # No need to initialize scratch space if there are no layers
    # that use ExLLamav2.
    if len(LAYERS) == 0:
        return

154
155
    # Find the size of the scratch space.
    scratch_bytes = max(
156
157
        layer.scratch_space_fixed(max_input_len=max_total_tokens, max_batch_size=1)
        for layer in LAYERS
158
159
    )
    temp_dq = ExLlamaV2DeviceTensors(DEVICE, scratch_bytes)
Nicolas Patry's avatar
Nicolas Patry committed
160
161
162
163
164
165
166
167
168
169

    for layer in LAYERS:
        layer.post_init(temp_dq)


class QuantLinear(nn.Module):
    QUANT_TYPE = "exllamav2"

    """Linear layer implementation with per-group 4-bit quantization of the weights"""

170
171
172
173
174
    def __init__(
        self,
        weight: Exl2Weight | GPTQWeight,
        bias: torch.Tensor,
    ):
Nicolas Patry's avatar
Nicolas Patry committed
175
        super().__init__()
176

Nicolas Patry's avatar
Nicolas Patry committed
177
        self.q_handle = None
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
        self.q_tensors = weight
        self.extra_tensors = _ExtraTensors()

        if isinstance(weight, Exl2Weight):
            self.infeatures = weight.q_invperm.shape[0]
            self.outfeatures = weight.q_weight.shape[1]
        elif isinstance(weight, GPTQWeight):
            if weight.bits != 4:
                raise ValueError(
                    f"Exllamav2 kernel supports only bits=4, requested bits={weight.bits}. Something is wrong in the model initialization."
                )

            self.infeatures = weight.qweight.shape[0] // weight.bits * 32
            self.outfeatures = weight.qweight.shape[1]

OlivierDehaene's avatar
OlivierDehaene committed
193
        self.padding = -self.outfeatures % 32
Nicolas Patry's avatar
Nicolas Patry committed
194
195
        self.outfeatures = self.outfeatures + self.padding

196
        self.device = weight.device
Nicolas Patry's avatar
Nicolas Patry committed
197
198
        self.bias = bias if bias is not None else None

199
        global LAYERS
Nicolas Patry's avatar
Nicolas Patry committed
200
201
202
        LAYERS.append(self)

    def post_init(self, temp_dq):
203
204
205
        device = self.q_tensors.device
        assert device.type == "cuda"
        assert device.index is not None
Nicolas Patry's avatar
Nicolas Patry committed
206
        temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size())
Nicolas Patry's avatar
Nicolas Patry committed
207
208
209
210

        # We NEED to keep a pointer on Python side, otherwise the garbage collector will mess with us,
        # and `Memory access fault by GPU node-2` will EAT you.
        self.temp_dq = temp_dq
211
        self.q_handle = ext_make_q_matrix(self.q_tensors, self.extra_tensors, temp_dq)
OlivierDehaene's avatar
OlivierDehaene committed
212
213

    def forward(self, x, force_cuda=False):
Nicolas Patry's avatar
Nicolas Patry committed
214
215
216
217
218
        output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda)

        if self.bias is not None:
            output.add_(self.bias)
        return output
OlivierDehaene's avatar
OlivierDehaene committed
219

Nicolas Patry's avatar
Nicolas Patry committed
220
221
    def temp_dq_size(self):
        return self.infeatures * self.outfeatures * 2 + 128
OlivierDehaene's avatar
OlivierDehaene committed
222

Nicolas Patry's avatar
Nicolas Patry committed
223
224
    def temp_fwd_size(self, max_input_len, max_batch_size):
        return self.outfeatures * max_input_len * max_batch_size * 4 + 128
OlivierDehaene's avatar
OlivierDehaene committed
225

226
    def scratch_space_fixed(self, max_input_len, max_batch_size):
Nicolas Patry's avatar
Nicolas Patry committed
227
        return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size)
OlivierDehaene's avatar
OlivierDehaene committed
228
229


Nicolas Patry's avatar
Nicolas Patry committed
230
231
232
233
234
235
236
237
238
239
class ExLlamaV2DeviceTensors:

    device_idx: int
    scratch_bytes: int
    scratch_idx: int
    scratch: torch.tensor = None

    def __init__(self, device, scratch_bytes):
        self.device = device
        self.scratch_bytes = scratch_bytes
OlivierDehaene's avatar
OlivierDehaene committed
240

Nicolas Patry's avatar
Nicolas Patry committed
241
    def prepare(self):
OlivierDehaene's avatar
OlivierDehaene committed
242
243
244
        self.scratch = torch.empty(
            (self.scratch_bytes // 2,), dtype=torch.half, device=self.device
        )
Nicolas Patry's avatar
Nicolas Patry committed
245
246
247

    def get_scratch_slice(self, size_bytes):

OlivierDehaene's avatar
OlivierDehaene committed
248
249
        if self.scratch is None:
            self.prepare()
Nicolas Patry's avatar
Nicolas Patry committed
250
251
252
253
254

        size_bytes = ((size_bytes + 127) // 128) * 128
        size_half = size_bytes // 2
        scratch_slice = self.scratch.narrow(0, 0, size_half)
        return scratch_slice