exllamav2.py 6.65 KB
Newer Older
Nicolas Patry's avatar
Nicolas Patry committed
1
2
3
4
5
6
7
8
9
10
11
12
13
# Adapted from turboderp exllama: https://github.com/turboderp/exllamav2

from logging import getLogger

import torch
import torch.nn as nn
import math

logger = getLogger(__name__)

try:
    from exllamav2_kernels import make_q_matrix, gemm_half_q_half
except ImportError:
OlivierDehaene's avatar
OlivierDehaene committed
14
    logger.error("exllamav2_kernels not installed.")
Nicolas Patry's avatar
Nicolas Patry committed
15
16
17
18
19
    raise

# Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension
none_tensor = torch.empty((1, 1), device="meta")

OlivierDehaene's avatar
OlivierDehaene committed
20

Nicolas Patry's avatar
Nicolas Patry committed
21
22
23
24
def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda):
    """Matrix multiplication, returns x @ q4"""
    output_shape = x.shape[:-1] + (q4_width,)
    x = x.view(-1, x.shape[-1])
OlivierDehaene's avatar
OlivierDehaene committed
25
    output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device)
Nicolas Patry's avatar
Nicolas Patry committed
26
27
28
    gemm_half_q_half(x, q_handle, output, force_cuda)
    return output.view(output_shape)

OlivierDehaene's avatar
OlivierDehaene committed
29

Nicolas Patry's avatar
Nicolas Patry committed
30
31
def ext_make_q_matrix(w: dict, temp_dq, key: str = None):
    """
OlivierDehaene's avatar
OlivierDehaene committed
32
    Create Q matrix
Nicolas Patry's avatar
Nicolas Patry committed
33
34
    """
    # EXL2
OlivierDehaene's avatar
OlivierDehaene committed
35
    # won't work as the moment because the tensors are not the same.
Nicolas Patry's avatar
Nicolas Patry committed
36
37
38
39
    if "q_weight" in w:
        w["q_scale_max"] /= 256
        w["q_perm"] = w["q_perm"].short()
        w["q_invperm"] = w["q_invperm"].short()
OlivierDehaene's avatar
OlivierDehaene committed
40
41
42
43
44
45
46
47
48
49
50
51
        return make_q_matrix(
            w["q_weight"],
            w["q_perm"],
            w["q_invperm"],
            w["q_scale"],
            w["q_scale_max"],
            w["q_groups"],
            none_tensor,
            none_tensor,
            none_tensor,
            temp_dq,
        )
Nicolas Patry's avatar
Nicolas Patry committed
52
53
54
55
56
57
58
    # GPTQ
    elif "qweight" in w:
        if w["scales"].dtype == torch.float:
            w["scales"] = w["scales"].half()

        # GPTQ with g_idx (act_order)
        if w.get("g_idx", None) is not None and not (w["g_idx"] == 0).all().item():
OlivierDehaene's avatar
OlivierDehaene committed
59
60
61
62
63
            w["q_perm"] = torch.empty(
                (w["qweight"].shape[0] * 8,),
                dtype=torch.short,
                device=w["qweight"].device,
            )
Nicolas Patry's avatar
Nicolas Patry committed
64
65
            w["q_invperm"] = torch.empty_like(w["q_perm"])
            # make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx.
OlivierDehaene's avatar
OlivierDehaene committed
66
67
68
69
70
71
72
73
74
75
76
77
            return make_q_matrix(
                w["qweight"],
                w["q_perm"],
                w["q_invperm"],
                none_tensor,
                none_tensor,
                none_tensor,
                w["qzeros"],
                w["scales"],
                w["g_idx"].cpu(),
                temp_dq,
            )
Nicolas Patry's avatar
Nicolas Patry committed
78
79
        # GPTQ without g_idx
        else:
OlivierDehaene's avatar
OlivierDehaene committed
80
81
82
83
84
85
86
87
88
89
90
91
92
            return make_q_matrix(
                w["qweight"],
                none_tensor,
                none_tensor,
                none_tensor,
                none_tensor,
                none_tensor,
                w["qzeros"],
                w["scales"],
                none_tensor,
                temp_dq,
            )

Nicolas Patry's avatar
Nicolas Patry committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121

DEVICE = None
FIXED_BYTES = 0
LAYERS = []


def set_device(device):
    global DEVICE
    DEVICE = device


def create_exllama_buffers():
    global FIXED_BYTES, LAYERS, DEVICE
    temp_dq = ExLlamaV2DeviceTensors(DEVICE, FIXED_BYTES)

    for layer in LAYERS:
        layer.post_init(temp_dq)


class QuantLinear(nn.Module):
    QUANT_TYPE = "exllamav2"

    """Linear layer implementation with per-group 4-bit quantization of the weights"""

    # def __init__(self, bits, group_size, infeatures, outfeatures, bias, trainable=False, **kwargs):
    def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize):
        super().__init__()
        if bits != 4:
            raise ValueError(
OlivierDehaene's avatar
OlivierDehaene committed
122
123
                f"Exllamav2 kernel supports only bits=4, requested bits={bits}. Something is wrong in the model initialization."
            )
Nicolas Patry's avatar
Nicolas Patry committed
124
125
126
        self.q_handle = None
        self.q_tensors = None
        self.bits = bits
OlivierDehaene's avatar
OlivierDehaene committed
127
        self.maxq = 2**self.bits - 1
Nicolas Patry's avatar
Nicolas Patry committed
128
129
        self.infeatures = qweight.shape[0] // self.bits * 32
        self.outfeatures = qweight.shape[1]
OlivierDehaene's avatar
OlivierDehaene committed
130
        self.padding = -self.outfeatures % 32
Nicolas Patry's avatar
Nicolas Patry committed
131
132
133
134
135
136
137
138
139
140
141
142
143
144
        self.outfeatures = self.outfeatures + self.padding

        self.device = qweight.device
        self.qweight = qweight
        self.qzeros = qzeros
        self.scales = scales
        self.g_idx = g_idx
        self.bias = bias if bias is not None else None
        self.group_size = groupsize

        infeatures = self.infeatures
        outfeatures = self.outfeatures
        assert qweight.shape == (infeatures // 32 * self.bits, outfeatures)
        assert infeatures % self.group_size == 0
OlivierDehaene's avatar
OlivierDehaene committed
145
146
147
148
        assert qzeros.shape == (
            infeatures // self.group_size,
            outfeatures // 32 * self.bits,
        )
Nicolas Patry's avatar
Nicolas Patry committed
149
        assert scales.shape == (infeatures // self.group_size, outfeatures)
OlivierDehaene's avatar
OlivierDehaene committed
150
        assert g_idx.shape == (infeatures,), f"{g_idx.shape}, {infeatures}"
Nicolas Patry's avatar
Nicolas Patry committed
151
152
153
154
155
156
157
158
159

        global FIXED_BYTES, LAYERS
        FIXED_BYTES = max(FIXED_BYTES, self.scratch_space_fixed())
        LAYERS.append(self)

    def post_init(self, temp_dq):
        assert self.qweight.device.type == "cuda"
        assert self.qweight.device.index is not None
        self.q_tensors = {
OlivierDehaene's avatar
OlivierDehaene committed
160
161
162
163
            "qweight": self.qweight,
            "qzeros": self.qzeros,
            "scales": self.scales,
            "g_idx": self.g_idx,
Nicolas Patry's avatar
Nicolas Patry committed
164
165
        }
        temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size())
OlivierDehaene's avatar
OlivierDehaene committed
166
167
168
        self.q_handle = ext_make_q_matrix(self.q_tensors, temp_dq)

    def forward(self, x, force_cuda=False):
Nicolas Patry's avatar
Nicolas Patry committed
169
170
171
172
173
        output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda)

        if self.bias is not None:
            output.add_(self.bias)
        return output
OlivierDehaene's avatar
OlivierDehaene committed
174

Nicolas Patry's avatar
Nicolas Patry committed
175
176
    def temp_dq_size(self):
        return self.infeatures * self.outfeatures * 2 + 128
OlivierDehaene's avatar
OlivierDehaene committed
177

Nicolas Patry's avatar
Nicolas Patry committed
178
179
    def temp_fwd_size(self, max_input_len, max_batch_size):
        return self.outfeatures * max_input_len * max_batch_size * 4 + 128
OlivierDehaene's avatar
OlivierDehaene committed
180

Nicolas Patry's avatar
Nicolas Patry committed
181
182
    def scratch_space_fixed(self, max_input_len=4096, max_batch_size=16):
        return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size)
OlivierDehaene's avatar
OlivierDehaene committed
183
184


Nicolas Patry's avatar
Nicolas Patry committed
185
186
187
188
189
190
191
192
193
194
class ExLlamaV2DeviceTensors:

    device_idx: int
    scratch_bytes: int
    scratch_idx: int
    scratch: torch.tensor = None

    def __init__(self, device, scratch_bytes):
        self.device = device
        self.scratch_bytes = scratch_bytes
OlivierDehaene's avatar
OlivierDehaene committed
195

Nicolas Patry's avatar
Nicolas Patry committed
196
    def prepare(self):
OlivierDehaene's avatar
OlivierDehaene committed
197
198
199
        self.scratch = torch.empty(
            (self.scratch_bytes // 2,), dtype=torch.half, device=self.device
        )
Nicolas Patry's avatar
Nicolas Patry committed
200
201
202

    def get_scratch_slice(self, size_bytes):

OlivierDehaene's avatar
OlivierDehaene committed
203
204
        if self.scratch is None:
            self.prepare()
Nicolas Patry's avatar
Nicolas Patry committed
205
206
207
208
209

        size_bytes = ((size_bytes + 127) // 128) * 128
        size_half = size_bytes // 2
        scratch_slice = self.scratch.narrow(0, 0, size_half)
        return scratch_slice