test_functional.py 82.8 KB
Newer Older
Tim Dettmers's avatar
Tim Dettmers committed
1
2
3
import math
import random
import time
Tim Dettmers's avatar
Tim Dettmers committed
4
5
from itertools import product

6
7
8
import einops
import pytest
import torch
Tim Dettmers's avatar
Tim Dettmers committed
9
import numpy as np
10
11

import bitsandbytes as bnb
Tim Dettmers's avatar
Tim Dettmers committed
12
from bitsandbytes import functional as F
Tim Dettmers's avatar
Tim Dettmers committed
13
from scipy.stats import norm
Tim Dettmers's avatar
Tim Dettmers committed
14

15
torch.set_printoptions(
Tim Dettmers's avatar
Tim Dettmers committed
16
    precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000
17
)
Tim Dettmers's avatar
Tim Dettmers committed
18
19
k = 20

20

Tim Dettmers's avatar
Tim Dettmers committed
21
def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
Tim Dettmers's avatar
Tim Dettmers committed
22
    idx = torch.isclose(a, b, rtol, atol)
23
    sumval = (idx == 0).sum().item()
Tim Dettmers's avatar
Tim Dettmers committed
24
    if sumval > count:
Tim Dettmers's avatar
Tim Dettmers committed
25
26
        if throw:
            print(f"Too many values not close: assert {sumval} < {count}")
27
            torch.testing.assert_close(a, b, rtol, atol)
Tim Dettmers's avatar
Tim Dettmers committed
28
29

    return sumval
Tim Dettmers's avatar
Tim Dettmers committed
30

31

Tim Dettmers's avatar
Tim Dettmers committed
32
33
class FFN(torch.nn.Module):
    def __init__(self, input_features, hidden_size, bias=True):
34
        super().__init__()
Tim Dettmers's avatar
Tim Dettmers committed
35
36
37
38
39
40
41
42
43
44
45
46
        self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)
        self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)

        with torch.no_grad():
            torch.nn.init.xavier_uniform_(self.fc1.weight)
            torch.nn.init.xavier_uniform_(self.fc2.weight)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

47

48
class Timer:
Tim Dettmers's avatar
Tim Dettmers committed
49
50
51
52
53
    def __init__(self):
        self.starts = {}
        self.ends = {}
        self.agg = {}

54
    def tick(self, name="default"):
Tim Dettmers's avatar
Tim Dettmers committed
55
56
57
58
59
60
61
        if name not in self.starts:
            self.starts[name] = torch.cuda.Event(enable_timing=True)
            self.ends[name] = torch.cuda.Event(enable_timing=True)
            self.starts[name].record()
        else:
            ms = self.tock(name, evict=True, print_ms=False)

62
    def tock(self, name="default", evict=True, print_ms=True):
Tim Dettmers's avatar
Tim Dettmers committed
63
64
65
66
        if name in self.ends:
            self.ends[name].record()
            torch.cuda.synchronize()
            ms = self.starts[name].elapsed_time(self.ends[name])
67
68
            if name not in self.agg:
                self.agg[name] = 0.0
Tim Dettmers's avatar
Tim Dettmers committed
69
70
71
72
73
74
            self.agg[name] += ms
            if evict:
                self.starts.pop(name)
                self.ends.pop(name)

        if print_ms and name in self.agg:
75
            print(f"{name} took: {self.agg[name] / 1000.0:.5f}s")
Tim Dettmers's avatar
Tim Dettmers committed
76
77
78
79

        return self.agg[name]

    def reset(self):
80
        self.starts = {}
Tim Dettmers's avatar
Tim Dettmers committed
81
82
        self.ends = {}
        self.agg = {}
83
84
        print("Resetting benchmark data")

Tim Dettmers's avatar
Tim Dettmers committed
85

Tim Dettmers's avatar
Tim Dettmers committed
86
87
88
def setup():
    pass

89

Tim Dettmers's avatar
Tim Dettmers committed
90
91
92
def teardown():
    pass

93

94
95
96
@pytest.mark.parametrize(
    "dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
Tim Dettmers's avatar
Tim Dettmers committed
97
def test_estimate_quantiles(dtype):
98
    A = torch.rand(1024, 1024, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
99
100
101
    A = A.to(dtype)
    code = F.estimate_quantiles(A)

102
    percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
103
    torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
Tim Dettmers's avatar
Tim Dettmers committed
104

105
    A = torch.randn(1024, 1024, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
106
107
108
109
    A = A.to(dtype)
    code = F.estimate_quantiles(A)

    quantiles = torch.quantile(A.float(), percs)
110
    diff = torch.abs(code - quantiles)
Tim Dettmers's avatar
Tim Dettmers committed
111
112
113
114
115
    assert (diff > 5e-02).sum().item() == 0


def test_quantile_quantization():
    for i in range(100):
116
        A1 = torch.randn(1024, 1024, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
117
118
119
        code = F.estimate_quantiles(A1)
        C = F.quantize_no_absmax(A1, code)
        A2 = F.dequantize_no_absmax(C, code)
120
        diff = torch.abs(A1 - A2).mean().item()
Tim Dettmers's avatar
Tim Dettmers committed
121
122
        assert diff < 0.0075

123
        A1 = torch.rand(1024, 1024, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
124
125
126
        code = F.estimate_quantiles(A1)
        C = F.quantize_no_absmax(A1, code)
        A2 = F.dequantize_no_absmax(C, code)
127
        diff = torch.abs(A1 - A2).mean().item()
128
        torch.testing.assert_close(A1, A2, atol=5e-3, rtol=0)
Tim Dettmers's avatar
Tim Dettmers committed
129
130
131
132
133
134
135
        assert diff < 0.001


def test_dynamic_quantization():
    diffs = []
    reldiffs = []
    for i in range(100):
136
        A1 = torch.randn(1024, 1024, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
137
138
        C, S = F.quantize(A1)
        A2 = F.dequantize(C, S)
139
140
        diff = torch.abs(A1 - A2)
        reldiff = diff / torch.abs(A1 + 1e-8)
Tim Dettmers's avatar
Tim Dettmers committed
141
142
143
        diffs.append(diff.mean().item())
        reldiffs.append(reldiff.mean().item())
        assert diff.mean().item() < 0.0135
144
145
    # print(sum(diffs)/len(diffs))
    # print(sum(reldiffs)/len(reldiffs))
Tim Dettmers's avatar
Tim Dettmers committed
146
147

    for i in range(100):
148
        A1 = torch.rand(1024, 1024, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
149
150
        C, S = F.quantize(A1)
        A2 = F.dequantize(C, S)
151
        diff = torch.abs(A1 - A2).mean().item()
152
        torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
Tim Dettmers's avatar
Tim Dettmers committed
153
154
155
        assert diff < 0.004


156
157
158
159

@pytest.mark.parametrize("nested", [False, True], ids=["False", "True"])
@pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
def test_dynamic_blockwise_quantization(nested, blocksize):
160
    #print('')
161
162
163
164
165
166
167
168
169
170
171
172
173
174
    diffs = []
    reldiffs = []
    for i in range(100):
        A1 = torch.randn(1024, 1024, device="cuda")
        C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
        A2 = F.dequantize_blockwise(C, S)
        diff = torch.abs(A1 - A2)
        reldiff = diff / torch.abs(A1 + 1e-8)
        diffs.append(diff.mean().item())
        reldiffs.append(reldiff.mean().item())
    abserr = sum(diffs)/len(diffs)
    relerr = sum(reldiffs)/len(reldiffs)
    assert abserr < 0.011
    assert relerr < 0.018
Tim Dettmers's avatar
Tim Dettmers committed
175
176
    #print('nested=', nested, 'randn', blocksize, sum(diffs)/len(diffs))
    #print('nested=', nested, 'randn', blocksize, sum(reldiffs)/len(reldiffs))
177
178
179
180
181
182
183
184
185
186

    diffs = []
    for i in range(100):
        A1 = torch.rand(1024, 1024, device="cuda")
        C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
        A2 = F.dequantize_blockwise(C, S)
        diff = torch.abs(A1 - A2)
        reldiff = diff / torch.abs(A1 + 1e-8)
        diffs.append(diff.mean().item())
        reldiffs.append(reldiff.mean().item())
187
        #torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
188
189
190
191
    abserr = sum(diffs)/len(diffs)
    relerr = sum(reldiffs)/len(reldiffs)
    assert abserr < 0.0035
    assert relerr < 0.015
Tim Dettmers's avatar
Tim Dettmers committed
192
193
    #print('nested=', nested, 'rand', blocksize, sum(diffs)/len(diffs))
    #print('nested=', nested, 'rand', blocksize, sum(reldiffs)/len(reldiffs))
194

Tim Dettmers's avatar
Tim Dettmers committed
195
196


197
198
199
@pytest.mark.parametrize(
    "gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
Tim Dettmers's avatar
Tim Dettmers committed
200
def test_percentile_clipping(gtype):
201
202
    gnorm_vec1 = torch.zeros(100, device="cuda")
    gnorm_vec2 = torch.zeros(100, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
203
204
    n = 4
    step = 0
205
    percentile = 5
Tim Dettmers's avatar
Tim Dettmers committed
206
    for i in range(k):
Tim Dettmers's avatar
Tim Dettmers committed
207
        step += 1
208
209
210
211
212
        g = torch.randn(n, n, dtype=gtype, device="cuda")
        gnorm1, clip2, gnorm_scale = F.percentile_clipping(
            g, gnorm_vec2, step, percentile=percentile
        )
        assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
Tim Dettmers's avatar
Tim Dettmers committed
213
214
215
216
217
218
219
220
221
222

        gnorm2 = torch.norm(g.float())
        if step == 1:
            gnorm_vec1[:] = gnorm2
        else:
            gnorm_vec1[step % 100] = gnorm2

        vals, idx = torch.sort(gnorm_vec1)
        clip1 = vals[percentile]

223
224
225
        torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))
        torch.testing.assert_close(clip1, clip2)
        torch.testing.assert_close(gnorm1, gnorm2)
Tim Dettmers's avatar
Tim Dettmers committed
226
227


Tim Dettmers's avatar
Tim Dettmers committed
228
229
def quant(x):
    max1 = torch.abs(x).max()
230
    x = torch.round(x / max1 * 127)
Tim Dettmers's avatar
Tim Dettmers committed
231
232
    return max1, x.to(torch.int8)

233

Tim Dettmers's avatar
Tim Dettmers committed
234
def dequant(c, maxC):
235
236
    return c.float() * (maxC / 127)

Tim Dettmers's avatar
Tim Dettmers committed
237
238

def mm_dequant(maxA, maxB, C):
239
240
    return C.float() * (maxA / 127) * (maxB / 127)

Tim Dettmers's avatar
Tim Dettmers committed
241
242
243

def quant_multi(x, dim):
    max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
244
245
    max1[max1 == 0] = 1.0
    x = torch.round(x / max1 * 127)
Tim Dettmers's avatar
Tim Dettmers committed
246
247
    return max1, x.to(torch.int8)

248

Tim Dettmers's avatar
Tim Dettmers committed
249
def quant_multi_chunk(x, dim, chunk_size=32):
250
251
252
    if dim == 1:
        x_chunked = einops.rearrange(x, "(c a) b -> c a b", c=chunk_size)
        max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)
Tim Dettmers's avatar
Tim Dettmers committed
253
254
        max1 = torch.tile(max1, (1, 1, x.shape[1]))
        max1 = max1.view(x.shape)
255
256
    elif dim == 0:
        x_chunked = einops.rearrange(x, "a (b c) -> a b c", c=chunk_size)
Tim Dettmers's avatar
Tim Dettmers committed
257
258
259
        max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)
        max1 = torch.tile(max1, (x.shape[0], 1, 1))
        max1 = max1.view(x.shape)
260
261
    max1[max1 == 0] = 1.0
    x = torch.round(x / max1 * 127)
Tim Dettmers's avatar
Tim Dettmers committed
262
263
    return max1, x.to(torch.int8)

264

Tim Dettmers's avatar
Tim Dettmers committed
265
266
267
268
def quant_minmax(A):
    minA = A.min()
    maxA = A.max()

269

Tim Dettmers's avatar
Tim Dettmers committed
270
def mean(xx):
271
272
    return sum(xx) / float(len(xx))

Tim Dettmers's avatar
Tim Dettmers committed
273

274
275
276
277
278
# dim1 = torch.randint(1,1024*4, size=(4,)).tolist()
# dim2 = torch.randint(1,1024*4, size=(4,)).tolist()
dim1 = [1024 * 2]
dim2 = [1024 * 16]
methods = [
279
280
281
282
283
284
285
    (
        lambda x, dim: quant(x),
        lambda x, dim: quant(x),
        dequant,
        dequant,
        mm_dequant,
    )
286
]
Tim Dettmers's avatar
Tim Dettmers committed
287
methods.append((quant_multi, quant_multi, dequant, dequant, mm_dequant))
288
289
# methods.append((lambda x: quant_multi_chunk(x, dim=-1), lambda x: quant_multi_chunk(x, dim=0), dequant, dequant, mm_dequant))
method_names = ["linear", "vectorwise"]
Tim Dettmers's avatar
Tim Dettmers committed
290
batched = [False, True]
291
292
293
values = list(product(dim1, dim2, methods, batched))
values_names = list(product(dim1, dim2, method_names, batched))
names = [
294
    "dim1_{}_dim2_{}_quant_{}_batched_{}".format(*vals)
295
    for vals in values_names
296
297
298
]


299
300
301
@pytest.mark.parametrize(
    "dim1, dim2, quant_methods, batched", values, ids=names
)
Tim Dettmers's avatar
Tim Dettmers committed
302
303
304
305
306
def test_approx_igemm(dim1, dim2, quant_methods, batched):
    dim1 = dim1 - (dim1 % 32)
    dim2 = dim2 - (dim2 % 32)
    errors = []
    relerrors = []
Tim Dettmers's avatar
Tim Dettmers committed
307
    #print("")
Tim Dettmers's avatar
Tim Dettmers committed
308
309
    for i in range(5):
        if batched:
310
311
            A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device="cuda")
            B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
312
313
314
            maxA, Ac = quant_methods[0](A, 2)
            maxB, Bc = quant_methods[1](B, 1)
        else:
315
316
            A = torch.normal(0, 0.5, size=(dim1, dim2), device="cuda")
            B = torch.normal(0, 0.5, size=(dim2, dim1), device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
317
318
            maxA, Ac = quant_methods[0](A, 1)
            maxB, Bc = quant_methods[1](B, 0)
319
        torch.testing.assert_close(
320
321
            quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05
        )
Tim Dettmers's avatar
Tim Dettmers committed
322
323
324
325
326
327
328
329
        if batched:
            out2 = torch.bmm(A, B)
            C = torch.bmm(Ac.float(), Bc.float())
        else:
            out2 = torch.mm(A, B)
            C = F.igemm(Ac, Bc)
        out = quant_methods[4](maxA, maxB, C)
        std = out2.std()
330
331
332
333
        out /= std
        out2 /= std
        err = torch.abs(out - out2)
        relerr = err / torch.abs(out2)
Tim Dettmers's avatar
Tim Dettmers committed
334
335
        errors.append(err.mean().item())
        relerrors.append(relerr.mean().item())
Tim Dettmers's avatar
Tim Dettmers committed
336
337
    #print(mean(errors))
    #print(mean(relerrors))
Tim Dettmers's avatar
Tim Dettmers committed
338
339


Tim Dettmers's avatar
Tim Dettmers committed
340
341
342
343
344
def test_stable_embedding():
    layer = bnb.nn.StableEmbedding(1024, 1024)
    layer.reset_parameters()


Tim Dettmers's avatar
Tim Dettmers committed
345
n = 2
346
347
348
hidden_dim = torch.randint(32, 256, size=(n,)).tolist()
batch_dim = torch.randint(16, 256, size=(n,)).tolist()
seq_dim = torch.randint(16, 256, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
349
transpose = [(False, False), (False, True), (True, False), (True, True)]
350
351
values = list(product(hidden_dim, batch_dim, transpose, seq_dim))
names = [
352
    "hidden_dim_{}_batch_dim_{},transpose_{}_seq_dim_{}".format(*vals)
353
354
355
356
    for vals in values
]


357
358
359
@pytest.mark.parametrize(
    "hidden_dim, batch_dim, transpose, seq_dim", values, ids=names
)
Tim Dettmers's avatar
Tim Dettmers committed
360
361
362
363
364
def test_igemm(hidden_dim, batch_dim, transpose, seq_dim):
    hidden_dim = hidden_dim - (hidden_dim % 32)
    batch_dim = batch_dim - (batch_dim % 16)
    seq_dim = seq_dim - (seq_dim % 16)
    for i in range(k):
365
        shapeA = (
366
367
368
            (batch_dim, hidden_dim)
            if not transpose[0]
            else (hidden_dim, batch_dim)
369
370
371
372
373
374
375
376
        )
        shapeB = (
            (32 * random.randint(1, 4), hidden_dim)
            if transpose[1]
            else (hidden_dim, 32 * random.randint(1, 4))
        )
        A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
        B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
Tim Dettmers's avatar
Tim Dettmers committed
377
378
379
380
381
382
383
384
385
386
387
388
        if not transpose[0] and not transpose[1]:
            out2 = torch.matmul(A.float(), B.float())
            out = F.igemm(A, B)
        elif not transpose[0] and transpose[1]:
            out2 = torch.matmul(A.float(), B.t().float())
            out = F.igemm(A, B.t())
        elif transpose[0] and not transpose[1]:
            out2 = torch.matmul(A.t().float(), B.float())
            out = F.igemm(A.t(), B)
        elif transpose[0] and transpose[1]:
            out2 = torch.matmul(A.t().float(), B.t().float())
            out = F.igemm(A.t(), B.t())
Tim Dettmers's avatar
Tim Dettmers committed
389

390
        torch.testing.assert_close(out.float(), out2)
Tim Dettmers's avatar
Tim Dettmers committed
391

Tim Dettmers's avatar
Tim Dettmers committed
392
393
    for i in range(k):
        shapeA = (batch_dim, seq_dim, hidden_dim)
394
395
396
397
398
399
400
        shapeB = (
            (32 * random.randint(1, 4), hidden_dim)
            if transpose[1]
            else (hidden_dim, 32 * random.randint(1, 4))
        )
        A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
        B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
Tim Dettmers's avatar
Tim Dettmers committed
401
402
403
404
405
406
407
        if not transpose[0] and not transpose[1]:
            out2 = torch.matmul(A.float(), B.float())
            out = F.igemm(A, B)
        elif not transpose[0] and transpose[1]:
            out2 = torch.matmul(A.float(), B.t().float())
            out = F.igemm(A, B.t())

408
        torch.testing.assert_close(out.float(), out2)
Tim Dettmers's avatar
Tim Dettmers committed
409
410
411


n = 3
412
413
414
415
seq_dim = torch.randint(32, 512, size=(n,)).tolist()
hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
batch_dim = torch.randint(2, 16, size=(n,)).tolist()
values = list(product(seq_dim, hidden_dim, batch_dim))
416
names = [
417
    "seq_dim{}_hidden_dim{}_batch_dim{}".format(*vals) for vals in values
418
]
419
420


Tim Dettmers's avatar
Tim Dettmers committed
421
422
423
424
425
426
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
    seq_dim = seq_dim - (seq_dim % 32)
    hidden_dim = hidden_dim - (hidden_dim % 32)
    batch_dim = batch_dim - (batch_dim % 2)
    for i in range(25):
427
428
429
        A = torch.randint(
            -128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
        ).to(torch.int8)
430
431
432
        B = torch.randint(
            -128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
        ).to(torch.int8)
433
        out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
434
435
436
        iout = torch.empty(
            A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
        )
Tim Dettmers's avatar
Tim Dettmers committed
437
438
        out = F.igemm(A, B, out=iout)

439
        torch.testing.assert_close(out.float(), out2)
Tim Dettmers's avatar
Tim Dettmers committed
440

441

Tim Dettmers's avatar
Tim Dettmers committed
442
n = 2
443
444
445
seq_dim = torch.randint(32, 512, size=(n,)).tolist()
hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
batch_dim = torch.randint(2, 16, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
446
transpose = [False, True]
447
448
values = list(product(seq_dim, hidden_dim, batch_dim, transpose))
names = [
449
    "seq_dim={}_hidden_dim={}_batch_dim={}_transpose{}".format(*vals)
450
451
452
453
    for vals in values
]


454
455
456
@pytest.mark.parametrize(
    "seq_dim, hidden_dim, batch_dim, transpose", values, ids=names
)
Tim Dettmers's avatar
Tim Dettmers committed
457
458
459
460
def test_minmax_igemm(seq_dim, hidden_dim, batch_dim, transpose):
    def min_max(x):
        maxA = torch.amax(x, dim=2, keepdim=True)
        minA = torch.amin(x, dim=2, keepdim=True)
461
462
        scale = (maxA - minA) / 2.0
        return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale
Tim Dettmers's avatar
Tim Dettmers committed
463
464
465
466
467
468
469
470
471

    seq_dim = seq_dim - (seq_dim % 16)
    hidden_dim = hidden_dim - (hidden_dim % 16)
    batch_dim = batch_dim - (batch_dim % 2)
    errs = []
    relerrs = []
    errs2 = []
    relerrs2 = []
    for i in range(k):
472
473
474
        A = torch.normal(
            0.0, 0.5, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
        )
Tim Dettmers's avatar
Tim Dettmers committed
475
        if transpose:
476
            B = torch.normal(0, 0.5, size=(256, hidden_dim), device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
477
        else:
478
            B = torch.normal(0, 0.5, size=(hidden_dim, 256), device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
479
480
481
482
        Ac, minA, scale = min_max(A)
        if transpose:
            maxB, Bc = quant_multi(B, dim=(1 if transpose else 0))
            out = F.igemm(Ac, Bc.t())
483
484
            out2 = torch.matmul(A, B.t())
            offset = B.t().sum(0) * (minA + scale)
Tim Dettmers's avatar
Tim Dettmers committed
485
            out = out.float()
486
            out = (out * maxB.t() * scale / (127 * 127)) + offset
Tim Dettmers's avatar
Tim Dettmers committed
487
488
489
490
491
492

            maxA, Ac = quant_multi(A, dim=2)
            out3 = F.igemm(Ac, Bc.t())
            out3 = mm_dequant(maxA, maxB.t(), out3)
        else:
            maxB, Bc = quant_multi(B, dim=0)
493
            offset = B.sum(0) * (minA + scale)
Tim Dettmers's avatar
Tim Dettmers committed
494
            out = F.igemm(Ac, Bc)
495
            out2 = torch.matmul(A, B)
Tim Dettmers's avatar
Tim Dettmers committed
496
            out = out.float()
497
            out = (out * maxB * scale / (127 * 127)) + offset
Tim Dettmers's avatar
Tim Dettmers committed
498
499
500
501
502
503
504
505
506
507

            maxA, Ac = quant_multi(A, dim=2)
            out3 = F.igemm(Ac, Bc)
            out3 = mm_dequant(maxA, maxB, out3)

        std = out2.std()
        out2 /= std
        out /= std
        out3 /= std

508
509
        err = torch.abs(out - out2)
        relerr = err / (torch.abs(out2) + 1e-7)
Tim Dettmers's avatar
Tim Dettmers committed
510

511
512
        err2 = torch.abs(out3 - out2)
        relerr2 = err2 / (torch.abs(out2) + 1e-7)
Tim Dettmers's avatar
Tim Dettmers committed
513
514
515
516
517

        errs.append(err.mean().item())
        relerrs.append(relerr.mean().item())
        errs2.append(err2.mean().item())
        relerrs2.append(relerr2.mean().item())
518
519
520
521
    # print(mean(errs))
    # print(mean(relerrs))
    # print(mean(errs2))
    # print(mean(relerrs2))
Tim Dettmers's avatar
Tim Dettmers committed
522
523
524
    assert mean(errs) < 0.015
    assert mean(relerrs) < 0.3

525

Tim Dettmers's avatar
Tim Dettmers committed
526
n = 2
527
528
529
530
dim1 = torch.randint(1, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 128, size=(n,)).tolist()
dim3 = torch.randint(32, 256, size=(n,)).tolist()
dim4 = torch.randint(32, 256, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
531
transpose = [(False, False), (True, False), (False, True), (True, True)]
532
533
values = list(product(dim1, dim2, dim3, dim4, transpose))
names = [
534
    "dim1_{}_dim2_{}_dim3_{}_dim4_{}_transpose_{}".format(*vals)
535
    for vals in values
536
537
538
]


Tim Dettmers's avatar
Tim Dettmers committed
539
540
541
542
543
544
545
546
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
    dim2 = dim2 - (dim2 % 16)
    dim3 = dim3 - (dim3 % 16)
    dim4 = dim4 - (dim4 % 16)
    for i in range(k):
        shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)
        shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)
547
548
        A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
        B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
Tim Dettmers's avatar
Tim Dettmers committed
549
550
551
552
553
554
555
556
557
558
559

        if not transpose[0] and not transpose[1]:
            out2 = torch.bmm(A.float(), B.float())
            out = F.igemm(A, B)
        elif not transpose[0] and transpose[1]:
            out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())
            out = F.igemm(A, B.permute([0, 2, 1]))
        elif transpose[0] and not transpose[1]:
            out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())
            out = F.igemm(A.permute([0, 2, 1]), B)
        elif transpose[0] and transpose[1]:
560
561
562
            out2 = torch.bmm(
                A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float()
            )
Tim Dettmers's avatar
Tim Dettmers committed
563
            out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))
564
        torch.testing.assert_close(out.float(), out2.float())
Tim Dettmers's avatar
Tim Dettmers committed
565

566

Tim Dettmers's avatar
Tim Dettmers committed
567
n = 1
568
569
570
571
dim1 = torch.randint(1, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 128, size=(n,)).tolist()
dim3 = torch.randint(32, 256, size=(n,)).tolist()
values = list(product(dim1, dim2, dim3))
572
names = ["dim1_{}_dim2_{}_dim3_{}".format(*vals) for vals in values]
573
574


Tim Dettmers's avatar
Tim Dettmers committed
575
576
577
578
579
@pytest.mark.parametrize("dim1, dim2, dim3", values, ids=names)
def test_vector_quant(dim1, dim2, dim3):
    dim2 = dim2 - (dim2 % 16)
    dim3 = dim3 - (dim3 % 16)
    for i in range(k):
580
        A = torch.randn(size=(dim2, dim3), device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
581
582
        qA, SA = F.vectorwise_quant(A, dim=0)
        A1 = F.vectorwise_dequant(qA, SA)
583
584
585
586
        n = A1.numel()
        assert_all_approx_close(A1, A, atol=0.01, rtol=0.1, count=int(n*0.002))


Tim Dettmers's avatar
Tim Dettmers committed
587
588
589


n = 2
590
591
592
593
dim1 = torch.randint(2, 256, size=(n,)).tolist()
dim2 = torch.randint(2, 256, size=(n,)).tolist()
dim3 = torch.randint(2, 256, size=(n,)).tolist()
# dim1, dim2 = (256,), (256,)
Tim Dettmers's avatar
Tim Dettmers committed
594
dtype = [torch.int8, torch.int32]
595
596
a_order = ["row"]
out_order = ["col", "row", "col32"]
Tim Dettmers's avatar
Tim Dettmers committed
597
598
transpose = [False]
dims = [2, 3]
599
values = list(product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose))
600

601
names = ["dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_transpose_{}".format(*vals)for vals in values]
602

Tim Dettmers's avatar
Tim Dettmers committed
603

604
605
@pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
606
607
608
609
    if dims == 3 and out_order != "col32":
        return
    if dtype == torch.int32 and out_order != "col32":
        return
Tim Dettmers's avatar
Tim Dettmers committed
610
611
612
    func = F.get_transform_func(dtype, orderA, orderOut, transpose)

    if dims == 2:
613
        A = torch.randint(-128, 127, size=(dim1, dim2), device="cuda").to(dtype)
Tim Dettmers's avatar
Tim Dettmers committed
614
    elif dims == 3:
615
616
617
        A = torch.randint(-128, 127, size=(dim1, dim2, dim3), device="cuda").to(
            dtype
        )
Tim Dettmers's avatar
Tim Dettmers committed
618
619
620

    out, S = F.nvidia_transform(A, to_order=orderOut)

621
    if orderOut == "row":
622
        torch.testing.assert_close(A.flatten(), out.flatten())
623
    elif orderOut == "col":
624
        torch.testing.assert_close(A.t().flatten(), out.flatten())
625
    elif orderOut == "col32":
Tim Dettmers's avatar
Tim Dettmers committed
626
        if dims == 2:
627
            n = A.shape[0] * (A.shape[1] + (32 - (A.shape[1] % 32)))
Tim Dettmers's avatar
Tim Dettmers committed
628
        elif dims == 3:
629
630
631
632
633
            n = (
                A.shape[0]
                * A.shape[1]
                * (A.shape[2] + (32 - (A.shape[2] % 32)))
            )
Tim Dettmers's avatar
Tim Dettmers committed
634
        assert out.numel() == n
635
    elif orderOut == "col_turing":
Tim Dettmers's avatar
Tim Dettmers committed
636
        # 32 col 8 row tiles
637
638
639
        n = (A.shape[0] + (8 - A.shape[0] % 8)) * (
            A.shape[1] + (32 - (A.shape[1] % 32))
        )
Tim Dettmers's avatar
Tim Dettmers committed
640
641
642
643
        assert out.numel() == n
        total_coltile = (A.shape[1] // 32) + (1 if A.shape[1] % 32 != 0 else 0)
        for row in range(A.shape[0]):
            for col in range(A.shape[1]):
644
                i = row * A.shape[1]
Tim Dettmers's avatar
Tim Dettmers committed
645
646
647
                j = col

                coltile = (col // 32) + (1 if col % 32 != 0 else 0)
648
649
650
                rowtile = (
                    (row // 8) + (1 if row % 8 != 0 else 0)
                ) * total_coltile
651
                offset = 32 * 8 * (rowtile + coltile)
Tim Dettmers's avatar
Tim Dettmers committed
652
                col2 = col % 32
653
                row2 = (row % 8) * 32
Tim Dettmers's avatar
Tim Dettmers committed
654

655
656
                assert A.flatten()[i + j] == A[row, col]
                # assert A.flatten()[i+j] == out.flatten()[row2+col2]
657
658
                # torch.testing.assert_close(A.flatten()[i+j], A[row, col])
                # torch.testing.assert_close(A.flatten()[i+j], out.flatten()[row2+ col2+block_offset])
Tim Dettmers's avatar
Tim Dettmers committed
659

660
    if orderOut == "col32":
661
662
663
        out2, S = F.nvidia_transform(
            out, from_order=orderOut, to_order="row", state=S
        )
664
        torch.testing.assert_close(A, out2)
Tim Dettmers's avatar
Tim Dettmers committed
665
666
667


n = 1
668
669
670
671
dim1 = torch.randint(1, 256, size=(n,)).tolist()
dim2 = torch.randint(32, 512, size=(n,)).tolist()
dim3 = torch.randint(32, 1024, size=(n,)).tolist()
dim4 = torch.randint(32, 1024, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
672

673
674
675
676
# dim1 = [2]
# dim2 = [2]
# dim3 = [2]
# dim4 = [2]
Tim Dettmers's avatar
Tim Dettmers committed
677

678
dims = (2, 3)
Tim Dettmers's avatar
Tim Dettmers committed
679
ldb = [0]
680
681
682
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dim3, dim4, dims, ldb))
names = [
683
    "dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}_ldb_{}".format(*vals)
684
685
686
687
    for vals in values
]


Tim Dettmers's avatar
Tim Dettmers committed
688
689
690
691
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
    for i in range(k):
        if dims == 2:
692
693
694
            A = torch.randint(-128, 127, size=(dim1, dim3), device="cuda").to(
                torch.int8
            )
Tim Dettmers's avatar
Tim Dettmers committed
695
        elif dims == 3:
696
697
698
699
700
701
            A = torch.randint(
                -128, 127, size=(dim1, dim2, dim3), device="cuda"
            ).to(torch.int8)
        B = torch.randint(-128, 127, size=(dim4, dim3), device="cuda").to(
            torch.int8
        )
Tim Dettmers's avatar
Tim Dettmers committed
702
703
        C1 = torch.matmul(A.float(), B.t().float())

704
705
        A2, SA = F.transform(A, "col32")
        B2, SB = F.transform(B, "col_turing")
Tim Dettmers's avatar
Tim Dettmers committed
706
        C2, SC = F.igemmlt(A2, B2, SA, SB)
707
        C3, S = F.nvidia_transform(C2, "row", state=SC)
708
        torch.testing.assert_close(C1, C3.float())
Tim Dettmers's avatar
Tim Dettmers committed
709
710

        # transpose
711
712
713
        B = torch.randint(-128, 127, size=(dim3, dim4), device="cuda").to(
            torch.int8
        )
Tim Dettmers's avatar
Tim Dettmers committed
714
715
        C1 = torch.matmul(A.float(), B.float())

716
        B2t, SBt = F.transform(B, "col_turing", transpose=True)
Tim Dettmers's avatar
Tim Dettmers committed
717
        C2, SC = F.igemmlt(A2, B2t, SA, SBt)
718
        C3, S = F.nvidia_transform(C2, "row", state=SC)
719
        torch.testing.assert_close(C1, C3.float())
Tim Dettmers's avatar
Tim Dettmers committed
720

721

Tim Dettmers's avatar
Tim Dettmers committed
722
723
724
725
726
727
dim1 = [32]
dim2 = [32]
dim3 = [32]
dim4 = [32]

dims = (2,)
728
729
730
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dim3, dim4, dims))
names = [
731
    "dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}".format(*vals)
732
    for vals in values
733
734
735
]


Tim Dettmers's avatar
Tim Dettmers committed
736
737
738
739
740
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims", values, ids=names)
def test_igemmlt_half(dim1, dim2, dim3, dim4, dims):
    formatB = F.get_special_format_str()
    for i in range(k):
        if dims == 2:
741
            A = torch.normal(0, 0.5, size=(dim1, dim3), device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
742
        elif dims == 3:
743
744
745
            A = torch.normal(
                0, 0.5, size=(dim1, dim2, dim3), device="cuda"
            ).half()
746
        B = torch.randn((dim4, dim3), device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
747
748
749
750
751
752
753
754
        torch.nn.init.xavier_uniform_(B)
        C1 = torch.matmul(A, B.t())
        C2 = bnb.matmul(A, B.t())

        A = A.view(-1, A.shape[-1])

        CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
        CB, CBt, statsB, statsBt, coo_tensor = F.double_quant(B)
755
        C32A, SA = F.transform(CA, "col32")
Tim Dettmers's avatar
Tim Dettmers committed
756
757
758
759
        CxB, SB = F.transform(CB, to_order=formatB)
        out1_32, Sout1_32 = F.igemmlt(C32A, CxB, SA, SB)
        output = F.mm_dequant(out1_32, Sout1_32, statsAt, statsBt)

760
761
762
763
        # print('')
        # print(output.flatten()[:10])
        # print(C1.flatten()[:10])
        # print(C2.flatten()[:10])
Tim Dettmers's avatar
Tim Dettmers committed
764

765
        # torch.testing.assert_close(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)
Tim Dettmers's avatar
Tim Dettmers committed
766
767

        # transpose
768
769
770
771
772
773
        # B = torch.randint(-128, 127, size=(dim3, dim4), device='cuda').to(torch.int8)
        # C1 = torch.matmul(A.float(), B.float())

        # B2t, SBt = F.transform2(B, 'col_turing', transpose=True)
        # C2, SC = F.igemmlt(A2, B2t, SA, SBt)
        # C3, S = F.transform(C2, 'row', state=SC)
774
        # torch.testing.assert_close(C1, C3.float())
Tim Dettmers's avatar
Tim Dettmers committed
775
776
777
778


batch_size = 2
seqdim = 512
779
780
781
782
783
784
785
786
787
# values = [(batch_size, seqdim, 4*1024, 16*1024),(batch_size, seqdim, 5120, 4*5120),(batch_size, seqdim, 12*1024, 4*12*1024)]
values = [
    (batch_size, seqdim, 4 * 1024, 3 * 4 * 1024),
    (batch_size, seqdim, 5120, 3 * 5120),
    (batch_size, seqdim, 12 * 1024, 4 * 12 * 1024),
]


# values = list(product(batch, seq, model, hidden))
788
names = [
789
    "batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
790
]
Tim Dettmers's avatar
Tim Dettmers committed
791
792
793
794
795


@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_8bit_training(batch, seq, model, hidden):
    formatB = F.get_special_format_str()
796
797
798
799
800
    A = torch.randn(batch, seq, model, device="cuda").half()
    grad = torch.randn(batch, seq, model, device="cuda").half()
    w1 = torch.randint(-128, 127, size=(hidden, model), device="cuda").half()
    w2 = torch.randint(-128, 127, size=(model, hidden), device="cuda").half()
    print("")
Tim Dettmers's avatar
Tim Dettmers committed
801

802
    # torch.cuda.synchronize()
Tim Dettmers's avatar
Tim Dettmers committed
803
    ## warmup
804
    # for i in range(100):
Tim Dettmers's avatar
Tim Dettmers committed
805
    #    torch.matmul(A, w1.t())
806
    # torch.cuda.synchronize()
Tim Dettmers's avatar
Tim Dettmers committed
807
808
809
810
811
812
813
814

    dtype = torch.int8
    A = A.view(-1, A.shape[-1]).contiguous()
    grad = grad.view(-1, grad.shape[-1]).contiguous()
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(k):

815
816
        out1 = torch.matmul(A, w1.t())  # fc1
        # out2 = torch.matmul(out1, w2.t())# fc2
Tim Dettmers's avatar
Tim Dettmers committed
817

818
819
        # d1 = torch.matmul(grad, w2) # delta1
        # d2 = torch.matmul(d1, w1) # delta2
Tim Dettmers's avatar
Tim Dettmers committed
820

821
822
        # grad1 = torch.einsum('bo,bh->oh', out1, grad) # grad w2
        # grad2 = torch.einsum('bh,bo->ho', A, d2) # grad w1
Tim Dettmers's avatar
Tim Dettmers committed
823
824
825
826
827

    torch.cuda.synchronize()
    t16 = time.time() - t0
    print(t16)

828
    # torch.cuda.empty_cache()
Tim Dettmers's avatar
Tim Dettmers committed
829

830
831
    # Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
    # Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
Tim Dettmers's avatar
Tim Dettmers committed
832

833
834
835
836
    # CTw1, Sw1 = F.transform2(Cw1, formatB)
    # CTw2, Sw2 = F.transform2(Cw2, formatB)
    # CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
    # CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
Tim Dettmers's avatar
Tim Dettmers committed
837

838
839
    # CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
    # C32A, SA = F.transform2(CA, 'col32')
Tim Dettmers's avatar
Tim Dettmers committed
840
    ## fc1
841
    # out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)
Tim Dettmers's avatar
Tim Dettmers committed
842
843
844
    ##out1 = F.mm_dequant(out1_32, Sout1_32, statsAt, statsw1t)

    ## fc2
845
846
847
    # Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)
    # C32out1, Sout1 = F.transform2(Cout1, 'col32')
    # out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)
Tim Dettmers's avatar
Tim Dettmers committed
848
849
850
    ##out2 = F.mm_dequant(out2_32, Sout2_32, statsout1t, statsw2t)

    ## delta1
851
852
    # Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)
    # C32grad, Sgrad = F.transform2(Cgrad, 'col32')
Tim Dettmers's avatar
Tim Dettmers committed
853
854
855
856
    ##d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)
    ##d1 = F.mm_dequant(d1_32, Sd1_32, statsgradt, statsw2)

    ## delta2
857
858
    # Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)
    # C32d1, Sd1 = F.transform2(Cd1, 'col32')
Tim Dettmers's avatar
Tim Dettmers committed
859
860
861
862
    ##d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)
    ##d2 = F.mm_dequant(d2_32, Sd2_32, statsd1t, statsw1)

    ## grad1
863
864
    # C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)
    # CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)
Tim Dettmers's avatar
Tim Dettmers committed
865
866
867
868
    ##grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)
    ##grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1, statsgrad)

    ## grad2
869
870
    # C32At, SAt = F.transform2(CAt, 'col32', transpose=True)
    # CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)
Tim Dettmers's avatar
Tim Dettmers committed
871
872
873
    ##grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)
    ##grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsA, statsd1)

874
    # Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
Tim Dettmers's avatar
Tim Dettmers committed
875

876
877
    # Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
    # Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)
Tim Dettmers's avatar
Tim Dettmers committed
878

879
880
881
882
883
884
885
    # CTw1, Sw1 = F.transform2(Cw1, formatB)
    # CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
    # CTw2, Sw2 = F.transform2(Cw2, formatB)
    # CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)
    # torch.cuda.synchronize()
    # t0 = time.time()
    # for i in range(k):
Tim Dettmers's avatar
Tim Dettmers committed
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
    #    #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
    #    #CTw1, Sw1 = F.transform2(Cw1, formatB)
    #    #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
    #    #CTw1, Sw1 = F.transform2(Cw1, formatB)

    #    #CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A, threshold=3.5)
    #    CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
    #    #CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)
    #    #CTw2, Sw2 = F.transform2(Cw2, formatB)
    #    #CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)

    #    C32A, SA = F.transform2(CA, 'col32')

    #    # fc1
    #    out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)
    #    #out1dn = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)

    #    #print(coo_tensor.nnz)
    #    #out1sp = F.spmm_coo(coo_tensor, w1.t())
    #    #print(w1.t().shape)
    #    #out1 = out1dn + out1sp

    #    # fc2
    #    Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)
    #    C32out1, Sout1 = F.transform2(Cout1, 'col32')
    #    out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)
    #    #out2 = F.mm_dequant(out2_32, Sout2_32, statsout1, statsw2)

    #    # delta1
    #    Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)
    #    C32grad, Sgrad = F.transform2(Cgrad, 'col32')
    #    d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)
    #    #d1 = F.mm_dequant(d1_32, Sd1_32, statsgrad, statsw2t)

    #    # delta2
    #    Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)
    #    C32d1, Sd1 = F.transform2(Cd1, 'col32')
    #    d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)
    #    #d2 = F.mm_dequant(d2_32, Sd2_32, statsd1, statsw1t)

    #    # grad1
    #    #C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)
    #    #CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)
    #    #grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)
    #    #grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1t, statsgradt)

    #    ## grad2
    #    #C32At, SAt = F.transform2(CAt, 'col32', transpose=True)
    #    #CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)
    #    #grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)
    #    #grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsAt, statsd1t)

938
939
940
    # torch.cuda.synchronize()
    # t8 = time.time() - t0
    # print(t8)
Tim Dettmers's avatar
Tim Dettmers committed
941
942
943


n = 2
944
945
dim1 = torch.randint(64, 256, size=(n,)).tolist()
dim4 = torch.randint(64, 1024, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
946

947
948
#dim1 = [2*1024]
#dim4 = [2*1024]
Tim Dettmers's avatar
Tim Dettmers committed
949

Tim Dettmers's avatar
Tim Dettmers committed
950
951
#dim1 = [4]
#dim4 = [4]
Tim Dettmers's avatar
Tim Dettmers committed
952
953

dims = (2,)
954
formatB = ["col_turing", "col_ampere"]
955
956
has_bias = [True, False]
values = list(product(dim1, dim4, dims, formatB, has_bias))
957
names = ["dim1_{}_dim4_{}_dims_{}_formatB_{}_has_bias_{}".format(*vals) for vals in values]
958
959


960
961
@pytest.mark.parametrize("dim1, dim4, dims, formatB, has_bias", values, ids=names)
def test_dequant_mm(dim1, dim4, dims, formatB, has_bias):
Tim Dettmers's avatar
Tim Dettmers committed
962
    inner = torch.randint(1, 128, size=(1,)).item()
963
964
    bias = None
    if has_bias: bias = torch.randn(dim4, device='cuda', dtype=torch.float16)
Tim Dettmers's avatar
Tim Dettmers committed
965
    formatB = F.get_special_format_str()
Tim Dettmers's avatar
Tim Dettmers committed
966
    for i in range(1):
967
968
        A = torch.randn(dim1, inner, device="cuda")
        B = torch.randn(dim4, inner, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
969
        C1 = torch.matmul(A.half(), B.t().half())
970
        if has_bias: C1 += bias
Tim Dettmers's avatar
Tim Dettmers committed
971
972
973
974

        A1, maxA = F.vectorwise_quant(A, dim=1)
        B1, maxB = F.vectorwise_quant(B, dim=1)

975
        A2, SA = F.nvidia_transform(A1, "col32")
Tim Dettmers's avatar
Tim Dettmers committed
976
977
978
        B2, SB = F.nvidia_transform(B1, formatB)
        C2, SC = F.igemmlt(A2, B2, SA, SB)

979
        C3, S = F.nvidia_transform(C2, "row", state=SC)
Tim Dettmers's avatar
Tim Dettmers committed
980
        C4 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
981
        if has_bias: C4 += bias
Tim Dettmers's avatar
Tim Dettmers committed
982

983
984
985
986
987
988
989
        # TODO: is something wrong here? If so, the problem goes deeper
        #n = C1.numel()
        #p = 0.06
        std = C1.std(0).view(1, -1)
        C1 /= std
        C4 /= std
        #assert_all_approx_close(C1, C4, atol=0.02, rtol=0.1, count=int(n*0.06))
Tim Dettmers's avatar
Tim Dettmers committed
990
        #assert (count / n < p), f"error in more than {p} of elements: {count}/{n}={count/n}"
Tim Dettmers's avatar
Tim Dettmers committed
991

992
        C5 = F.mm_dequant(C2, SC, maxA.flatten(), maxB.flatten(), bias=bias)
993
        #torch.testing.assert_close(C5, C4, atol=0.015, rtol=0.1)
994
995
        n = C5.numel()
        assert_all_approx_close(C1, C4, atol=0.015, rtol=0.1, count=int(0.01*n))
Tim Dettmers's avatar
Tim Dettmers committed
996
997
998


n = 2
999
1000
1001
1002
dim1 = [1 * 1024]
dim2 = [1 * 1024]
# dim1 = torch.randint(1,4*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
1003
1004

dims = (2,)
1005
1006
# ldb = list(range(256, 1*1024, 256))
values = list(product(dim1, dim2, dims))
1007
names = ["dim1_{}_dim2_{}_dims_{}".format(*vals) for vals in values]
1008
1009


Tim Dettmers's avatar
Tim Dettmers committed
1010
1011
1012
1013
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
    for i in range(k):
        threshold = 3.0
1014
        A = torch.randn(dim1, dim2, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
        A_truncated = A.clone()
        A_truncated[torch.abs(A_truncated) >= 3.0] = 0.0
        if dims == 2:
            row_stats1, _ = torch.abs(A.float()).max(1)
            col_stats1, _ = torch.abs(A.float()).max(0)
            row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)
            col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)
        else:
            assert False

1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
        row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
            A, threshold=threshold
        )

        A_blocked = einops.rearrange(
            torch.abs(A),
            "(rows row_tiles) (cols block_size)-> rows cols row_tiles block_size",
            row_tiles=16,
            block_size=64 * 4,
        )
        nnz_rows1_counts = (torch.abs(A_blocked) >= threshold).sum(3).flatten()
        nnz_block_ptr1 = torch.zeros(
            nnz_rows1_counts.shape[0] + 1,
            dtype=nnz_rows1_counts.dtype,
            device=nnz_rows1_counts.device,
        )
Tim Dettmers's avatar
Tim Dettmers committed
1041
1042
        nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)

1043
1044
1045
        torch.testing.assert_close(col_stats1_trunc, col_stats2)
        torch.testing.assert_close(row_stats1_trunc, row_stats2)
        torch.testing.assert_close(nnz_block_ptr1.int(), nnz_block_ptr2)
Tim Dettmers's avatar
Tim Dettmers committed
1046

1047
1048
1049
        row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
            A, threshold=0.0
        )
Tim Dettmers's avatar
Tim Dettmers committed
1050

1051
1052
        torch.testing.assert_close(col_stats1, col_stats2)
        torch.testing.assert_close(row_stats1, row_stats2)
Tim Dettmers's avatar
Tim Dettmers committed
1053
1054
1055
1056
        assert nnz_block_ptr2 is None


n = 2
1057
1058
1059
1060
1061
1062
# dim1 = [8*1024]
# dim2 = [4*1024]
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()

values = list(product(dim1, dim2))
1063
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
1064

Tim Dettmers's avatar
Tim Dettmers committed
1065
1066
1067
1068

@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
    for i in range(k):
1069
        A = torch.randn(dim1, dim2, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1070
1071
1072
1073
1074
1075
        out_col1, Scol = F.vectorwise_quant(A, dim=0)
        out_row1, Srow = F.vectorwise_quant(A, dim=1)

        CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)

        # max difference is 1 due to rounding differences
1076
1077
        torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)
        torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)
Tim Dettmers's avatar
Tim Dettmers committed
1078
1079

        n = CAt.numel()
1080
1081
1082
1083
1084
1085
        num_not_close_rows = (
            (torch.isclose(CA, out_row1, atol=1) == 0).sum().item()
        )
        num_not_close_cols = (
            (torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()
        )
Tim Dettmers's avatar
Tim Dettmers committed
1086
1087

        # allow for 1:500 error due to rounding differences
1088
1089
1090
1091
1092
        min_error = 1 / 500
        if num_not_close_cols > (min_error * n):
            print(
                f"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols/n:.4f}"
            )
Tim Dettmers's avatar
Tim Dettmers committed
1093
            assert False
1094
1095
1096
1097
        if num_not_close_rows > (min_error * n):
            print(
                f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows/n:.4f}"
            )
Tim Dettmers's avatar
Tim Dettmers committed
1098
1099
            assert False

1100
1101
        torch.testing.assert_close(Srow.flatten().float(), statsA)
        torch.testing.assert_close(Scol.flatten().float(), statsAt)
Tim Dettmers's avatar
Tim Dettmers committed
1102
1103
1104


n = 4
1105
1106
1107
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
1108
1109

values = list(zip(dim1, dim4, inner))
1110
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
1111
1112


Tim Dettmers's avatar
Tim Dettmers committed
1113
1114
1115
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
    for i in range(k):
1116
1117
        A = torch.randn(dim1, inner, device="cuda").half()
        B = torch.randn(dim4, inner, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1118
1119
1120
1121
1122
1123
1124
1125

        out1 = torch.matmul(A.half(), B.t().half())

        C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
        C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
        A1, maxA = F.vectorwise_quant(A, dim=1)
        B1, maxB = F.vectorwise_quant(B, dim=1)

1126
1127
1128
1129
        torch.testing.assert_close(maxA.flatten().float(), stats1a)
        torch.testing.assert_close(maxB.flatten().float(), stats2a)
        torch.testing.assert_close(C1a, A1, rtol=0, atol=1)
        torch.testing.assert_close(C2a, B1, rtol=0, atol=1)
Tim Dettmers's avatar
Tim Dettmers committed
1130

1131
1132
        A2, SA = F.nvidia_transform(C1a, "col32")
        B2, SB = F.nvidia_transform(C2a, "col_turing")
Tim Dettmers's avatar
Tim Dettmers committed
1133
1134
1135
        outC32, SC = F.igemmlt(A2, B2, SA, SB)
        out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)

1136
1137
        A2, SA = F.nvidia_transform(A1, "col32")
        B2, SB = F.nvidia_transform(B1, "col_turing")
Tim Dettmers's avatar
Tim Dettmers committed
1138
1139
        C2, SC = F.igemmlt(A2, B2, SA, SB)

1140
        C3, S = F.nvidia_transform(C2, "row", state=SC)
Tim Dettmers's avatar
Tim Dettmers committed
1141
1142
        out3 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())

1143
1144
        err1 = torch.abs(out1 - out2).mean().item()
        err2 = torch.abs(out1 - out3).mean().item()
1145
        assert err2 <= err1 * 1.025
Tim Dettmers's avatar
Tim Dettmers committed
1146
1147
1148


n = 6
1149
1150
1151
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
1152
1153

values = list(zip(dim1, dim4, inner))
1154
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
1155
1156


Tim Dettmers's avatar
Tim Dettmers committed
1157
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
1158
@pytest.mark.skip("Row scale has some bugs for ampere")
Tim Dettmers's avatar
Tim Dettmers committed
1159
1160
1161
1162
1163
1164
def test_igemmlt_row_scale(dim1, dim4, inner):
    formatB = F.get_special_format_str()
    err1, err2, err3 = [], [], []
    relerr1, relerr2 = [], []
    scale = 1
    for i in range(k):
1165
1166
        A = torch.randn(dim1, inner, device="cuda").half()
        B = torch.randn(dim4, inner, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1167
1168
1169
1170
1171
1172
        torch.nn.init.xavier_uniform_(B)
        C1 = torch.matmul(A, B.t())

        out1 = torch.matmul(A.half(), B.t().half())

        C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
1173
1174
        CB, absmaxB = F.vectorwise_quant(B, quant_type="linear")
        A2, SA = F.nvidia_transform(C1a, "col32")
Tim Dettmers's avatar
Tim Dettmers committed
1175
1176
1177
        B2, SB = F.nvidia_transform(CB, formatB)
        A1, maxA = F.vectorwise_quant(A, dim=1)

1178
1179
        c = 10.0 * inner * scale
        row_scale = torch.ones_like(maxA) / c
1180
1181
1182
        outC32, SC = F.igemmlt(
            A2, B2, SA, SB, dtype=torch.int8, row_scale=row_scale
        )
1183
        C3, S = F.nvidia_transform(outC32, "row", state=SC)
Tim Dettmers's avatar
Tim Dettmers committed
1184
1185
1186
1187
        maxval = torch.abs(C3).max()
        if maxval == 127:
            scale = 1.5
        else:
1188
1189
            scale = maxval / 120
        out3 = C3 * maxA * absmaxB * c / (127 * 127)
Tim Dettmers's avatar
Tim Dettmers committed
1190
1191
1192
1193
1194
1195
1196
1197

        C4 = torch.matmul(C1a.float(), CB.float().t())

        C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
        B2, SB = F.nvidia_transform(C2a, formatB)
        outC32, SC = F.igemmlt(A2, B2, SA, SB)
        out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)

1198
1199
        CA, SA = F.vectorwise_quant(A, dim=1, quant_type="vector")
        CB, SB = F.vectorwise_quant(B, dim=1, quant_type="linear")
Tim Dettmers's avatar
Tim Dettmers committed
1200
1201

        C = torch.matmul(CA.float(), CB.t().float())
1202
1203
        out4 = C * SA * SB / (127 * 127)
        # out4 = torch.clip(torch.round(C*SA/c), -127, 127)*c*SB/(127*127)
Tim Dettmers's avatar
Tim Dettmers committed
1204

1205
1206
1207
1208
        # print('='*80)
        # print(out1)
        # print(out2)
        # print(out3)
Tim Dettmers's avatar
Tim Dettmers committed
1209

1210
1211
1212
1213
1214
1215
        # print(out1)
        # print(out2)
        # print(out3)
        err1.append(torch.abs(out1 - out2).mean().item())
        err2.append(torch.abs(out1 - out3).mean().item())
        err3.append(torch.abs(out1 - out4).mean().item())
Tim Dettmers's avatar
Tim Dettmers committed
1216

1217
1218
1219
1220
1221
        # assert_all_approx_close(C3.float(), torch.round(C4*row_scale), rtol=0, atol=0, count=10)
    print("")
    print(sum(err1) / len(err1))
    print(sum(err2) / len(err2))
    print(sum(err3) / len(err3))
Tim Dettmers's avatar
Tim Dettmers committed
1222
1223
1224


dim1 = [1024, 2048]
1225
inner = [12288 * 4, 4096 * 4]
Tim Dettmers's avatar
Tim Dettmers committed
1226
1227
1228
dim4 = [12288, 4096]

values = list(zip(dim1, dim4, inner))
1229
names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
1230
1231


Tim Dettmers's avatar
Tim Dettmers committed
1232
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
1233
@pytest.mark.skip("Row scale has some bugs for ampere")
Tim Dettmers's avatar
Tim Dettmers committed
1234
1235
1236
1237
def test_row_scale_bench(dim1, dim4, inner):
    err1, err2, err3 = [], [], []
    relerr1, relerr2 = [], []
    scale = 1
1238
1239
    A = torch.randn(dim1, inner, device="cuda").half()
    B = torch.randn(dim4, inner, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
    torch.nn.init.xavier_uniform_(B)
    # warmpup
    for i in range(k):
        C1 = torch.matmul(A, B.t())

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(k):
        C1 = torch.matmul(A, B.t())
    torch.cuda.synchronize()
1250
    print("16", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1251
1252

    C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
1253
1254
    CB, absmaxB = F.vectorwise_quant(B, quant_type="linear")
    A2, SA = F.nvidia_transform(C1a, "col32")
Tim Dettmers's avatar
Tim Dettmers committed
1255
1256
1257
    B2, SB = F.nvidia_transform(CB, formatB)
    A1, maxA = F.vectorwise_quant(A, dim=1)

1258
1259
    c = 10.0 * inner * scale
    row_scale = maxA / c
Tim Dettmers's avatar
Tim Dettmers committed
1260
1261
1262
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(k):
1263
1264
1265
        outC32, SC = F.igemmlt(
            A2, B2, SA, SB, dtype=torch.int8, row_scale=row_scale
        )
Tim Dettmers's avatar
Tim Dettmers committed
1266
    torch.cuda.synchronize()
1267
    print("row-wise", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1268
1269
1270
1271
1272
1273
1274
1275

    C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
    B2, SB = F.nvidia_transform(C2a, formatB)
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(k):
        outC32, SC = F.igemmlt(A2, B2, SA, SB)
    torch.cuda.synchronize()
1276
    print("vector-wise", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1277
1278
1279


n = 2
1280
1281
1282
1283
dim1 = torch.randint(2, 1024, size=(n,)).tolist()
dim2 = torch.randint(2, 1024, size=(n,)).tolist()
# dim1 = [8*1024]
# dim2 = [4*1024]
Tim Dettmers's avatar
Tim Dettmers committed
1284
1285
1286

dim3 = [0]
dtype = [torch.int8]
1287
1288
a_order = ["row"]
out_order = ["col32", "col_turing", "col_ampere"]
Tim Dettmers's avatar
Tim Dettmers committed
1289
1290
transpose = [False, True]
dims = [2]
1291
1292
1293
values = list(
    product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose)
)
1294
names = [
1295
    "dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_{}".format(
1296
1297
1298
1299
1300
1301
1302
        *vals
    )
    for vals in values
]


@pytest.mark.parametrize(
1303
1304
1305
    "dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",
    values,
    ids=names,
1306
)
Tim Dettmers's avatar
Tim Dettmers committed
1307
1308
1309
def test_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
    for i in range(k):
        if dims == 2:
1310
1311
1312
            A = torch.randint(10, 99, size=(dim1, dim2), device="cuda").to(
                dtype
            )
Tim Dettmers's avatar
Tim Dettmers committed
1313
        elif dims == 3:
1314
1315
1316
            A = torch.randint(
                10, 99, size=(dim1, dim2, dim3), device="cuda"
            ).to(dtype)
Tim Dettmers's avatar
Tim Dettmers committed
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327

        A.view(-1)[-1] = -1
        if transpose:
            At = A.t().contiguous()
            out1, S1 = F.nvidia_transform(At, to_order=orderOut)
        else:
            out1, S1 = F.nvidia_transform(A, to_order=orderOut)
        out2, S2 = F.transform(A, to_order=orderOut, transpose=transpose)

        assert S1[0][0] == S2[0][0]
        assert S1[0][1] == S2[0][1]
1328
1329
        # print(out1)
        # print(out2)
Tim Dettmers's avatar
Tim Dettmers committed
1330

1331
        torch.testing.assert_close(out1, out2)
Tim Dettmers's avatar
Tim Dettmers committed
1332

1333

Tim Dettmers's avatar
Tim Dettmers committed
1334
n = 2
1335
1336
# dim1 = torch.randint(2,1024, size=(n,)).tolist()
# dim2 = torch.randint(2,1024, size=(n,)).tolist()
Tim Dettmers's avatar
Tim Dettmers committed
1337
1338
1339
1340
dim1 = [1]
dim2 = [33]

dtype = [torch.int8]
1341
1342
1343
1344
1345
# a_order = ['col_turing', 'col_ampere']
a_order = ["col_turing"]
out_order = ["row"]
values = list(product(dim1, dim2, dtype, a_order, out_order))
names = [
1346
    "dim1_{}_dim2_{}_dtype_{}_orderA_{}_orderOut_{}".format(*vals)
1347
1348
1349
1350
    for vals in values
]


Tim Dettmers's avatar
Tim Dettmers committed
1351
1352
def test_overflow():
    formatB = F.get_special_format_str()
1353
    print(formatB)
Tim Dettmers's avatar
Tim Dettmers committed
1354
    for i in range(2):
1355
1356
        a = torch.arange(5, 15).cuda().to(torch.int8).view(-1, 1)
        b = torch.arange(5, 15).cuda().to(torch.int8).view(-1, 1)
Tim Dettmers's avatar
Tim Dettmers committed
1357

1358
        Ca, Sa = F.nvidia_transform(a, "col32")
Tim Dettmers's avatar
Tim Dettmers committed
1359
1360
1361
1362
1363
1364
1365
        Cb, Sb = F.nvidia_transform(b, formatB)

        c = F.igemmlt(Ca, Cb, Sa, Sb, dtype=torch.int8)
        c2 = torch.matmul(a.float(), b.float().t())


n = 2
1366
1367
1368
1369
1370
1371
dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
# dim1 = [4]
# dim2 = [5]

values = list(product(dim1, dim2))
1372
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
1373

Tim Dettmers's avatar
Tim Dettmers committed
1374
1375
1376
1377
1378

@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_coo_double_quant(dim1, dim2):
    threshold = 3.00
    for i in range(k):
1379
        A = torch.randn(dim1, dim2, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1380

1381
        idx = torch.abs(A) >= threshold
Tim Dettmers's avatar
Tim Dettmers committed
1382
        CA2, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
1383
1384
1385
        CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
            A, threshold=threshold
        )
Tim Dettmers's avatar
Tim Dettmers committed
1386
1387

        if coo_tensor is not None:
1388
            A1 = A * idx
Tim Dettmers's avatar
Tim Dettmers committed
1389
            A2 = torch.zeros_like(A)
1390
1391
1392
            A2[
                coo_tensor.rowidx.long(), coo_tensor.colidx.long()
            ] = coo_tensor.values
1393
            torch.testing.assert_close(A1, A2)
Tim Dettmers's avatar
Tim Dettmers committed
1394

1395
1396
            A1 = A * (idx == 0)
            A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()
1397
            torch.testing.assert_close(
1398
1399
                A * (idx == 0), A2, rtol=0.05, atol=1.5e-2
            )
1400

Tim Dettmers's avatar
Tim Dettmers committed
1401
1402

n = 2
1403
1404
1405
1406
dim1 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
dim2 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
# dim1 = [7]
# dim2 = [11]
Tim Dettmers's avatar
Tim Dettmers committed
1407
transposed_B = [False, True]
1408
values = list(product(dim1, dim2, transposed_B))
1409
names = ["dim1_{}_dim2_{}_transposed_B_{}".format(*vals) for vals in values]
1410
1411


Tim Dettmers's avatar
Tim Dettmers committed
1412
1413
1414
1415
@pytest.mark.parametrize("dim1, dim2, transposed_B", values, ids=names)
def test_spmm_coo(dim1, dim2, transposed_B):
    threshold = 1.5
    dim3 = torch.randint(32, 128, size=(1,)).item()
1416
    # dim3 = 17
Tim Dettmers's avatar
Tim Dettmers committed
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
    for i in range(k):
        A = torch.randn(dim1, dim2).cuda().half()
        if transposed_B:
            B = torch.randn(dim3, dim2).cuda().half()
        else:
            B = torch.randn(dim2, dim3).cuda().half()

        idx = torch.abs(A) >= threshold
        nnz = (idx == 1).sum().item()
        rows, cols = torch.where(idx)
        values = A[idx]
1428
1429
1430
1431
        cooA = F.COOSparseTensor(
            A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
        )
        A2 = A * idx
Tim Dettmers's avatar
Tim Dettmers committed
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444

        if transposed_B:
            out2 = F.spmm_coo(cooA, B.t())
            out1 = torch.matmul(A2, B.t())
        else:
            out2 = F.spmm_coo(cooA, B)
            out1 = torch.matmul(A2, B)

        assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=30)


def test_spmm_bench():
    batch = 2
1445
1446
    model = 1024 * 1
    hidden = model * 4
Tim Dettmers's avatar
Tim Dettmers committed
1447
    seq = 1024
1448
    dim1 = batch * seq
Tim Dettmers's avatar
Tim Dettmers committed
1449
1450
1451
    dim2 = model
    dim3 = hidden
    threshold = 4
1452
1453
    A = torch.randn(dim1, dim2, device="cuda").half()
    B = torch.randn(dim2, dim3, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1454
    for i in range(10):
1455
        C1 = bnb.matmul(A, B.t())
Tim Dettmers's avatar
Tim Dettmers committed
1456
1457
1458
1459

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(k):
1460
        C1 = bnb.matmul(A, B.t())
Tim Dettmers's avatar
Tim Dettmers committed
1461
    torch.cuda.synchronize()
1462
    t8 = time.time() - t0
Tim Dettmers's avatar
Tim Dettmers committed
1463
1464
1465

    idx = torch.abs(A) >= threshold
    nnz = (idx == 1).sum().item()
1466
    print(nnz / idx.numel())
Tim Dettmers's avatar
Tim Dettmers committed
1467
1468
    rows, cols = torch.where(idx)
    values = A[idx]
1469
1470
1471
    cooA = F.COOSparseTensor(
        A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
    )
Tim Dettmers's avatar
Tim Dettmers committed
1472
1473

    for i in range(10):
Tim Dettmers's avatar
Tim Dettmers committed
1474
1475
1476
1477
1478
1479
1480
        out2 = F.spmm_coo(cooA, B)

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(k):
        out2 = F.spmm_coo(cooA, B)
    torch.cuda.synchronize()
1481
    tsp = time.time() - t0
Tim Dettmers's avatar
Tim Dettmers committed
1482
    print(tsp, t8)
1483
    print(tsp / t8)
Tim Dettmers's avatar
Tim Dettmers committed
1484
1485
1486


n = 2
1487
1488
1489
dim1 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
dim2 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
values = list(product(dim1, dim2))
1490
names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
1491
1492


Tim Dettmers's avatar
Tim Dettmers committed
1493
1494
1495
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_integrated_sparse_decomp(dim1, dim2):
    threshold = 3.0
1496
    formatB = "col_turing"
Tim Dettmers's avatar
Tim Dettmers committed
1497
1498
1499
1500
1501
1502
1503
1504
1505
    for i in range(k):
        A = torch.randn(dim1, dim2).cuda().half()
        w1 = torch.randn(dim1, dim2).cuda().half()
        out1 = torch.matmul(A, w1.t())

        Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)
        CTw1, Sw1 = F.transform(Cw1, formatB)

        CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
1506
        C32A, SA = F.transform(CA, "col32")
Tim Dettmers's avatar
Tim Dettmers committed
1507
1508
1509
1510

        out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1)
        out2 = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)

1511
1512
1513
        CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
            A, threshold=threshold
        )
1514
        C32A, SA = F.transform(CA, "col32")
Tim Dettmers's avatar
Tim Dettmers committed
1515
1516
1517
1518
1519
1520
1521
1522
1523

        out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1)
        out3 = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)

        assert coo_tensor is not None

        out4 = F.spmm_coo(coo_tensor, w1.t())
        out5 = out3 + out4

1524
1525
        err1 = torch.abs(out1 - out2).mean().item()
        err2 = torch.abs(out1 - out5).mean().item()
Tim Dettmers's avatar
Tim Dettmers committed
1526
1527
1528
1529
        assert err2 < err1


def test_matmuls():
1530
1531
1532
    a = torch.randn(256, 512).half().cuda()
    b = torch.randn(256, 512).half().cuda()
    c1 = torch.matmul(a, b.t())
Tim Dettmers's avatar
Tim Dettmers committed
1533
    c2 = bnb.matmul(a, b)
1534
    c3 = bnb.matmul_cublas(a, b.t())
Tim Dettmers's avatar
Tim Dettmers committed
1535

1536
1537
    err1 = torch.abs(c1 - c2).mean().item()
    err2 = torch.abs(c1 - c3).mean().item()
Tim Dettmers's avatar
Tim Dettmers committed
1538
1539
    assert err1 < 0.2
    assert err2 < 0.2
1540
    print(err1, err2)
Tim Dettmers's avatar
Tim Dettmers committed
1541
1542
1543


n = 2
1544
1545
1546
# dim1 = torch.randint(1,1*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dim1 = [1 * 2048]
Tim Dettmers's avatar
Tim Dettmers committed
1547
dim2 = [12288]
1548
1549
1550
# dim1 = [32]
# dim2 = [32]
# dtype = [torch.float16, torch.int8]
Tim Dettmers's avatar
Tim Dettmers committed
1551
dtype = [torch.float16]
1552
1553
out_function = ["zeros", "ones"]
values = list(product(dim1, dim2, dtype, out_function))
1554
names = [
1555
    "dim1_{}_dim2_{}_dtype_{}_out_func_{}".format(*vals) for vals in values
1556
]
1557
1558


Tim Dettmers's avatar
Tim Dettmers committed
1559
1560
1561
1562
1563
@pytest.mark.parametrize("dim1, dim2, dtype, out_func", values, ids=names)
def test_spmm_coo_very_sparse(dim1, dim2, dtype, out_func):
    out_func = getattr(torch, out_func)

    threshold = 3.3
1564
1565
1566
    # threshold = 2.8
    # threshold = 0.0
    A = torch.randn(dim1, dim2, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1567
    if dtype == torch.float16:
1568
        B = torch.randn(dim2, dim2 * 4, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1569
1570
        torch.nn.init.xavier_uniform_(B)
    else:
1571
        B = torch.randn(dim2, dim2 * 4, device="cuda").half()
Tim Dettmers's avatar
Tim Dettmers committed
1572
        torch.nn.init.xavier_uniform_(B)
1573
1574
        B, SB = F.vectorwise_quant(B, quant_type="linear")
        # B = torch.randint(-127, 127, size=(dim2, dim2*4), device='cuda').to(torch.int8)
Tim Dettmers's avatar
Tim Dettmers committed
1575

1576
    print("")
Tim Dettmers's avatar
Tim Dettmers committed
1577
1578
1579
1580
    idx = torch.abs(A) >= threshold
    nnz = (idx == 1).sum().item()
    rows, cols = torch.where(idx)
    values = A[idx]
1581
1582
1583
1584
    cooA = F.COOSparseTensor(
        A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
    )
    A2 = A * idx
Tim Dettmers's avatar
Tim Dettmers committed
1585
1586
1587
1588
    out1 = torch.matmul(A2.half(), B.half())
    out = out_func(out1.shape, dtype=torch.float16, device=out1.device)
    out1 += out.clone()
    out2 = F.spmm_coo_very_sparse(cooA, B, out=out)
1589
1590
1591
1592
    # print(B)
    # print(out1)
    # print(out2)
    p = 200 / (2048 * 12288 * 4)
Tim Dettmers's avatar
Tim Dettmers committed
1593
    n = out1.numel()
1594
    count = math.ceil(p * n)
Tim Dettmers's avatar
Tim Dettmers committed
1595
1596
1597
    std = out1.std()
    out1 /= std
    out2 /= std
1598
1599
1600
    assert_all_approx_close(
        out1, out2.half(), rtol=0.01, atol=3.0e-2, count=count
    )
1601
    # assert_all_approx_close(out1, out2.half(), rtol=0.05, atol=0.01, count=count)
Tim Dettmers's avatar
Tim Dettmers committed
1602
1603
1604

    idx_col = torch.randint(0, A2.shape[-1], size=(15,))

1605
    # torch.testing.assert_close(out1, out2.half(), rtol=0.05, atol=0.001)
Tim Dettmers's avatar
Tim Dettmers committed
1606

1607
1608
1609
1610
1611
    # Bt = torch.randn(dim2*4, dim2, device='cuda').half()
    # torch.cuda.synchronize()
    # t0 = time.time()
    # print(A2.shape, B.shape)
    # for i in range(100):
Tim Dettmers's avatar
Tim Dettmers committed
1612
1613
1614
1615
1616
    #   #out3 = F.spmm_coo(cooA, Bt.t())
    #   #out2 = F.spmm_coo(cooA, B)
    #   #out2 = F.spmm_coo_very_sparse(cooA, B)
    #   #out1 = torch.matmul(A, Bt.t())

1617
1618
1619
    # torch.cuda.synchronize()
    # print(time.time() - t0)

Tim Dettmers's avatar
Tim Dettmers committed
1620
1621
1622
1623
1624
1625
1626
1627

def test_coo2csr():
    threshold = 1
    A = torch.randn(128, 128).half().cuda()
    idx = torch.abs(A) >= threshold
    nnz = (idx == 1).sum().item()
    rows, cols = torch.where(idx)
    values = A[idx]
1628
1629
1630
1631
    cooA = F.COOSparseTensor(
        A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
    )
    A2 = A * idx
Tim Dettmers's avatar
Tim Dettmers committed
1632
1633
1634
1635
    csrA = F.coo2csr(cooA)
    counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
    assert counts.numel() == A.shape[0]

1636
    torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
1637
    idx = A2 != 0
1638
    torch.testing.assert_close(A2[idx], csrA.values)
Tim Dettmers's avatar
Tim Dettmers committed
1639
1640
1641
1642
1643
1644
1645
1646
1647


def test_coo2csc():
    threshold = 1
    A = torch.randn(128, 128).half().cuda()
    idx = torch.abs(A) >= threshold
    nnz = (idx == 1).sum().item()
    rows, cols = torch.where(idx)
    values = A[idx]
1648
1649
1650
1651
    cooA = F.COOSparseTensor(
        A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
    )
    A2 = A * idx
Tim Dettmers's avatar
Tim Dettmers committed
1652
1653
1654
1655
    cscA = F.coo2csc(cooA)
    counts = cscA.colptr[1:] - cscA.colptr[:-1]
    assert counts.numel() == A.shape[1]

1656
    torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))
Tim Dettmers's avatar
Tim Dettmers committed
1657
    # torch uses row-major -> use transpose to transfer to col-major
1658
    idx = A2.t() != 0
1659
    torch.testing.assert_close(A2.t()[idx], cscA.values)
Tim Dettmers's avatar
Tim Dettmers committed
1660
1661
1662


n = 2
1663
1664
1665
1666
# dim1 = torch.randint(1,1*1024, size=(n,)).tolist()
# dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
dim1 = [1 * 2048]
# dim2 = [12288]
Tim Dettmers's avatar
Tim Dettmers committed
1667
dim2 = [2048]
1668
1669
# dim1 = [2]
# dim2 = [2]
Tim Dettmers's avatar
Tim Dettmers committed
1670
dtype = [torch.int8]
1671
values = list(product(dim1, dim2, dtype))
1672
names = ["dim1_{}_dim2_{}_dtype_{}".format(*vals) for vals in values]
1673
1674


Tim Dettmers's avatar
Tim Dettmers committed
1675
1676
1677
@pytest.mark.parametrize("dim1, dim2, dtype", values, ids=names)
def test_spmm_coo_dequant(dim1, dim2, dtype):
    threshold = 6.0
1678
1679
1680
1681
    # threshold = 2.8
    # threshold = 0.0
    A = torch.randn(dim1, dim2, device="cuda").half()
    B = torch.empty(dim2, dim2 * 4, device="cuda", dtype=torch.float16)
Tim Dettmers's avatar
Tim Dettmers committed
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
    torch.nn.init.xavier_uniform_(B)
    Bt = B.t().contiguous()

    CB, CBt, statsB, statsBt, coo_tensor = F.double_quant(B)

    rowidx = torch.randint(0, A.shape[-1], size=(15,))

    A[:, rowidx] = 8.0

    idx = torch.abs(A) >= threshold
    nnz = (idx == 1).sum().item()
    rows, cols = torch.where(idx)
    values = A[idx]
1695
1696
1697
1698
    cooA = F.COOSparseTensor(
        A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
    )
    A2 = A * idx
Tim Dettmers's avatar
Tim Dettmers committed
1699
1700
1701
    out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
    out1 = torch.matmul(A2, B.half())
    out3 = F.spmm_coo_very_sparse(cooA, CBt.half())
1702
    out3 = out3 * statsBt.half() / 127
Tim Dettmers's avatar
Tim Dettmers committed
1703
1704
1705
1706
1707
1708

    values, counts = torch.unique(cooA.rowidx, return_counts=True)
    offset = counts.cumsum(0).int()
    max_count, max_idx = torch.sort(counts, descending=True)
    print(torch.median(max_count.float()))

1709
    torch.testing.assert_close(out2, out3, rtol=0.05, atol=0.001)
Tim Dettmers's avatar
Tim Dettmers committed
1710

1711
    p = 200 / (2048 * 12288 * 4)
Tim Dettmers's avatar
Tim Dettmers committed
1712
    n = out1.numel()
1713
    count = math.ceil(p * n)
Tim Dettmers's avatar
Tim Dettmers committed
1714
1715
    assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=count)

1716
1717
1718
    # torch.cuda.synchronize()
    # t0 = time.time()
    # for i in range(100):
Tim Dettmers's avatar
Tim Dettmers committed
1719
    #   out2 = F.spmm_coo_very_sparse(cooA, B)
1720
1721
    # torch.cuda.synchronize()
    # print('fp16', time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1722
1723
1724
1725

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
1726
        out2 = F.spmm_coo(cooA, B)
Tim Dettmers's avatar
Tim Dettmers committed
1727
    torch.cuda.synchronize()
1728
    print("cusparse fp16", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1729
1730
1731
1732

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
1733
        out2 = F.spmm_coo_very_sparse(cooA, CBt)
Tim Dettmers's avatar
Tim Dettmers committed
1734
    torch.cuda.synchronize()
1735
    print("int8", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1736
1737
1738
1739

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
1740
        out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
Tim Dettmers's avatar
Tim Dettmers committed
1741
    torch.cuda.synchronize()
1742
    print("int8+dequant", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1743
1744
1745
1746

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
1747
        out2 = torch.matmul(A, B)
Tim Dettmers's avatar
Tim Dettmers committed
1748
    torch.cuda.synchronize()
1749
    print("matmul", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1750
1751
1752
1753
1754
1755

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
        out1 = bnb.matmul(A, Bt)
        out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
1756
        out = out1 + out2
Tim Dettmers's avatar
Tim Dettmers committed
1757
    torch.cuda.synchronize()
1758
    print("sparse+ matmul", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1759
1760
1761
1762
1763
1764
1765

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
        out1 = bnb.matmul(A, Bt)
        torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)
    torch.cuda.synchronize()
1766
    print("partial matmul", time.time() - t0)
Tim Dettmers's avatar
Tim Dettmers committed
1767
1768
1769
1770
1771
1772

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
        out1 = bnb.matmul(A, Bt)
    torch.cuda.synchronize()
1773
1774
    print("partial matmul", time.time() - t0)

Tim Dettmers's avatar
Tim Dettmers committed
1775

Tim Dettmers's avatar
Tim Dettmers committed
1776
1777
batch_size = 1
seqdim = 1
Tim Dettmers's avatar
Tim Dettmers committed
1778
values = []
Tim Dettmers's avatar
Tim Dettmers committed
1779
#values.append((batch_size, seqdim, 768, 4 * 768))
1780
1781
1782
1783
#values.append((batch_size, seqdim, 1024, 4*1024))
#values.append((batch_size, seqdim, 1536, 4*1536))
#values.append((batch_size, seqdim, 2048, 4*2048))
#values.append((batch_size, seqdim, 2560, 4*2560))
Tim Dettmers's avatar
Tim Dettmers committed
1784
1785
1786
1787
values.append((batch_size, seqdim, 4096, 4*4096))
values.append((batch_size, seqdim, 5120, 4*5120))
values.append((batch_size, seqdim, 6656, 4*6656))
values.append((batch_size, seqdim, 8192, 4*8192))
1788
#values.append((batch_size, seqdim, 5140, 4*5140))
1789
#values.append((batch_size, seqdim, 12288, 4*12288))
1790
names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
Tim Dettmers's avatar
Tim Dettmers committed
1791
1792
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
Tim Dettmers's avatar
Tim Dettmers committed
1793
    iters = 80
Tim Dettmers's avatar
Tim Dettmers committed
1794
1795
    formatB = F.get_special_format_str()

1796
1797
    A = torch.randn(batch, seq, model, device="cuda").half()
    B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
Tim Dettmers's avatar
Tim Dettmers committed
1798
1799
    torch.nn.init.xavier_uniform_(B)

1800
    B_fp4, state = F.quantize_fp4(B)
1801
    B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
1802

1803
1804
    B_nf4, state_nf4= F.quantize_nf4(B)

Tim Dettmers's avatar
Tim Dettmers committed
1805
    linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
Tim Dettmers's avatar
Tim Dettmers committed
1806
1807
1808
1809
1810
    linear8bit.eval()

    outliers = torch.randint(0, model, size=(5,)).cuda()
    A[:, :, outliers] = 8.0

Tim Dettmers's avatar
Tim Dettmers committed
1811
1812
    linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
    #linearMixedBit.eval()
Tim Dettmers's avatar
Tim Dettmers committed
1813

1814
1815
1816
    linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
    linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()

Tim Dettmers's avatar
Tim Dettmers committed
1817
    # warmup
1818
    for i in range(iters):
Tim Dettmers's avatar
Tim Dettmers committed
1819
1820
        torch.matmul(A, B.t())
    torch.cuda.synchronize()
1821
    print("")
Tim Dettmers's avatar
Tim Dettmers committed
1822
1823
1824

    torch.cuda.synchronize()
    t0 = time.time()
1825
    for i in range(iters):
Tim Dettmers's avatar
Tim Dettmers committed
1826
1827
        torch.matmul(A, B.t())
    torch.cuda.synchronize()
1828
1829
1830
1831
1832
    print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(iters):
1833
        bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)
1834
1835
    torch.cuda.synchronize()
    print( f"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
Tim Dettmers's avatar
Tim Dettmers committed
1836

1837
1838
1839
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(iters):
1840
        bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)
1841
1842
1843
    torch.cuda.synchronize()
    print( f"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )

1844
1845
1846
1847
1848
1849
1850
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(iters):
        bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
    torch.cuda.synchronize()
    print( f"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )

Tim Dettmers's avatar
Tim Dettmers committed
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    bnb.matmul(A, B)
    #torch.cuda.synchronize()
    #print(f"CB -> CxB conversion (each iteration): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")

    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    bnb.matmul(A, B, threshold=6.0)
    #torch.cuda.synchronize()
    #print(f"CB -> CxB conversion + threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")

    #CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)
    #C32A, SA = F.transform(CA, "col32")
    #CB, CBt, SCB, SCBt, coo_tensorB = F.double_quant(B)
    #CxB, SB = F.transform(CB, to_order=formatB)
    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
    #torch.cuda.synchronize()
    #print(f"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")

    #BA, statsB = F.vectorwise_quant(B, dim=1)
    #CxB, SB = F.nvidia_transform(CB, to_order=formatB)
    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    A2 = A.view(-1, A.shape[-1]).contiguous()
    #    CA, statsA = F.vectorwise_quant(A2, dim=1)
    #    C32A, SA = F.nvidia_transform(CA, "col32")
    #    out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
    #    Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
    #    F.vectorwise_mm_dequant(Cout, statsA, statsB.t())
    #torch.cuda.synchronize()
    #print(f"vector pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")

    #BA, statsB = F.vectorwise_quant(B, dim=1, quant_type="linear")
    #CxB, SB = F.nvidia_transform(CB, to_order=formatB)
    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    A2 = A.view(-1, A.shape[-1]).contiguous()
    #    CA, statsA = F.vectorwise_quant(A2, dim=1, quant_type="linear")
    #    C32A, SA = F.nvidia_transform(CA, "col32")
    #    out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
    #    Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
    #    out = Cout * statsB * statsA * (1.0 / (127 * 127))
    #torch.cuda.synchronize()
    #print(f"linear pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")

Tim Dettmers's avatar
Tim Dettmers committed
1904
1905
1906
1907
1908
1909
1910
    linear8bit(A)
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(iters):
        linear8bit(A)
    torch.cuda.synchronize()
    print( f"bnb linear8bitlt (eval): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
Tim Dettmers's avatar
Tim Dettmers committed
1911

Tim Dettmers's avatar
Tim Dettmers committed
1912
1913
1914
1915
1916
1917
1918
    linearMixedBit(A)
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(iters):
        linearMixedBit(A)
    torch.cuda.synchronize()
    print( f"bnb linear8bitlt with threshold (eval): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
Tim Dettmers's avatar
Tim Dettmers committed
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934

    #linear8bit_train(A)
    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    linear8bit_train(A)
    #torch.cuda.synchronize()
    #print( f"bnb linear8bitlt (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")

    #linear8bit_train_thresh(A)
    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    linear8bit_train(A)
    #torch.cuda.synchronize()
    #print( f"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
Tim Dettmers's avatar
Tim Dettmers committed
1935
1936
1937
1938
1939
1940

def test_zeropoint():
    def quant_zp(x):
        dtype = x.dtype
        x = x.float()
        dyna = x.max() - x.min()
1941
1942
1943
        if dyna == 0:
            dyna = 1
        qx = 254.0 / dyna
Tim Dettmers's avatar
Tim Dettmers committed
1944
        minx = x.min()
1945
1946
1947
1948
        # zpx = torch.round(minx* qx)
        # zpx = 127 - torch.round(x.max()* qx)
        zpx = torch.round(x.min() * qx) - 127
        x = (qx * x) + zpx
Tim Dettmers's avatar
Tim Dettmers committed
1949
        return x, qx, zpx
1950

Tim Dettmers's avatar
Tim Dettmers committed
1951
1952
1953
    batch = 2
    seq = 512
    model = 1024
1954
1955
1956
    hidden = 4 * model
    A = torch.randn(batch * seq, model, device="cuda").half() * 0.1
    B = torch.randn(model, hidden, device="cuda").half() * 0.1
Tim Dettmers's avatar
Tim Dettmers committed
1957
1958
1959

    C0 = torch.matmul(A, B)

1960
1961
    # A, SA = F.vectorwise_quant(A, quant_type='linear')
    # B, SB = F.vectorwise_quant(B, quant_type='linear')
Tim Dettmers's avatar
Tim Dettmers committed
1962
1963
1964
1965
1966
1967
1968
    A = A.float()
    B = B.float()

    C1 = torch.matmul(A, B)
    C3 = bnb.matmul(A.half(), B.t().contiguous().half())

    zp = 1
1969
1970
1971
1972
    # C2 = torch.matmul(A-zp, B)
    # C2 += B.sum(0).view(1, -1)*zp
    C2 = torch.matmul(A, B - zp)
    C2 -= A.sum(1).view(-1, 1) * zp
Tim Dettmers's avatar
Tim Dettmers committed
1973
1974
1975

    ca, cqa, cza = quant_zp(A)
    print(ca.min(), ca.max())
1976
    print((ca - cza).min(), (ca - cza).max())
Tim Dettmers's avatar
Tim Dettmers committed
1977
1978
1979

    zp = 1
    scale = 2.0
1980
1981
    C5 = torch.matmul((A * scale) - zp, B)
    C5 += B.sum(0) * zp
Tim Dettmers's avatar
Tim Dettmers committed
1982
1983
1984
1985
    C5 /= scale

    CA, qa, zpa = quant_zp(A)
    C4 = torch.matmul(CA, B)
1986
    C4 -= B.sum(0) * zpa
Tim Dettmers's avatar
Tim Dettmers committed
1987
    C4 /= qa
Tim Dettmers's avatar
Tim Dettmers committed
1988

Tim Dettmers's avatar
Tim Dettmers committed
1989
1990
1991
1992
    zpb = 1
    zpa = 1
    qa = 2
    qb = 2
1993
1994
1995
1996
    C6 = torch.matmul((A * qa) + zpa, (B * qb) + zpb)
    C6 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
    C6 -= zpa * zpb * A.shape[1]
    C6 /= qa * qb
Tim Dettmers's avatar
Tim Dettmers committed
1997

Tim Dettmers's avatar
Tim Dettmers committed
1998
1999
2000
    CA, qa, zpa = quant_zp(A)
    CB, qb, zpb = quant_zp(B)
    C7 = torch.matmul(CA, CB)
2001
2002
2003
    C7 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
    C7 -= zpa * zpb * A.shape[1]
    C7 /= qa * qb
Tim Dettmers's avatar
Tim Dettmers committed
2004

2005
2006
    print("")
    # print(C0.flatten()[:10])
Tim Dettmers's avatar
Tim Dettmers committed
2007
2008
2009
2010
2011
2012
    print(C1.flatten()[:10])
    print(C2.flatten()[:10])
    print(C3.flatten()[:10])
    print(C5.flatten()[:10])
    print(C6.flatten()[:10])
    print(C7.flatten()[:10])
2013
2014
2015
2016
2017
2018
    err1 = torch.abs(C1 - C2).mean().item()
    err2 = torch.abs(C1 - C3).mean().item()
    err3 = torch.abs(C1 - C4).mean().item()
    err4 = torch.abs(C1 - C5).mean().item()
    err5 = torch.abs(C1 - C6).mean().item()
    err6 = torch.abs(C1 - C7).mean().item()
Tim Dettmers's avatar
Tim Dettmers committed
2019
    print(err1, err2, err3, err4, err5, err6)
Tim Dettmers's avatar
Tim Dettmers committed
2020
2021


2022
def test_extract_outliers():
2023
    for i in range(k):
2024
        shapeA = (4096, 4096 * 4)
2025
        idx = torch.unique(torch.randint(0, shapeA[1], size=(10,)).int()).cuda()
2026
2027
        # idx = torch.Tensor([0]).int().cuda()
        A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
2028
        outliers1 = A[:, idx.long()]
2029

2030
        CA, SA = F.transform(A, "col_turing")
2031

2032
        outliers2 = F.extract_outliers(CA, SA, idx)
2033

2034
2035
        assert outliers2.shape[0] == shapeA[0]
        assert outliers2.shape[1] == idx.numel()
2036

2037
        torch.testing.assert_close(outliers1, outliers2)
2038

2039
        CA, SA = F.transform(A, "col_ampere")
2040
2041
2042
2043
2044

        outliers2 = F.extract_outliers(CA, SA, idx)

        assert outliers2.shape[0] == shapeA[0]
        assert outliers2.shape[1] == idx.numel()
2045

2046
        torch.testing.assert_close(outliers1, outliers2)
2047
2048
2049
2050
2051
2052
2053
2054



def test_blockwise_cpu_large():
    diffs = []
    reldiffs = []
    batch = 128
    seq = 128
2055
    for hidden in [128]:#, 14336]:
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
        for blocksize in [4096, 16384]:
            for i in range(2):
                A1 = torch.randn(batch, seq, hidden, device='cpu')
                t0 = time.time()
                C, S = F.quantize_blockwise(A1, blocksize=blocksize)
                A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
                print(time.time() - t0)
                diff = torch.abs(A1 - A2)
                reldiff = diff / torch.abs(A1 + 1e-8)
                diffs.append(diff.mean().item())
                reldiffs.append(reldiff.mean().item())
                assert diffs[-1] < 0.011
            # print(sum(diffs)/len(diffs))
            # print(sum(reldiffs)/len(reldiffs))
Tim Dettmers's avatar
Tim Dettmers committed
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088



def test_fp8_quant():
    for e_bits in range(1, 7):
        p_bits = 7-e_bits
        code = F.create_fp8_map(True, e_bits, p_bits).cuda()

        abserr = []
        relerr = []
        for i in range(100):
            A1 = torch.randn(1024, 1024, device="cuda")
            C, SC = F.quantize_blockwise(A1, code=code)
            A2 = F.dequantize_blockwise(C, SC)
            diff = torch.abs(A1 - A2)
            reldiff = diff/torch.abs(A1+1e-8)
            abserr.append(diff.mean().item())
            relerr.append(reldiff.mean().item())
            #assert diff < 0.0075
2089
2090
        #print(sum(abserr)/len(abserr))
        #print(sum(relerr)/len(relerr))
Tim Dettmers's avatar
Tim Dettmers committed
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102

        abserr = []
        relerr = []
        for i in range(100):
            A1 = torch.rand(1024, 1024, device="cuda")
            C, SC = F.quantize_blockwise(A1, code=code)
            A2 = F.dequantize_blockwise(C, SC)
            diff = torch.abs(A1 - A2)
            reldiff = diff/torch.abs(A1+1e-8)
            abserr.append(diff.mean().item())
            relerr.append(reldiff.mean().item())
            #assert diff < 0.0075
2103
2104
        #print(sum(abserr)/len(abserr))
        #print(sum(relerr)/len(relerr))
Tim Dettmers's avatar
Tim Dettmers committed
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116

        abserr = []
        relerr = []
        for i in range(100):
            A1 = torch.randn(1024, 1024, device="cuda")
            C, SC = F.quantize_blockwise(A1)
            A2 = F.dequantize_blockwise(C, SC)
            diff = torch.abs(A1 - A2)
            reldiff = diff/torch.abs(A1+1e-8)
            abserr.append(diff.mean().item())
            relerr.append(reldiff.mean().item())
            #assert diff < 0.0075
2117
2118
        #print(3, sum(abserr)/len(abserr))
        #print(3, sum(relerr)/len(relerr))
Tim Dettmers's avatar
Tim Dettmers committed
2119

2120
2121
2122

def test_few_bit_quant():

2123
    #print('')
2124
    for bits in range(2, 9):
2125
        #print('='*30, bits, '='*30)
Tim Dettmers's avatar
Tim Dettmers committed
2126
2127
2128
        for method in ['linear', 'fp8', 'dynamic', 'quantile']:
            abserrs = []
            relerrs = []
Tim Dettmers's avatar
Tim Dettmers committed
2129
2130
            code = None
            if method == 'linear':
2131
                code = F.create_linear_map(True, total_bits=bits).cuda()
Tim Dettmers's avatar
Tim Dettmers committed
2132
2133
2134
2135
            elif method == 'fp8':
                ebits = math.ceil(bits/2)
                pbits = bits-ebits-1
                code = F.create_fp8_map(True, ebits, pbits, bits).cuda()
Tim Dettmers's avatar
Tim Dettmers committed
2136
2137
2138
2139
            elif method == 'dynamic':
                code = F.create_dynamic_map(True, bits-0, bits).cuda()
            elif method == 'quantile':
                values = torch.randn(2048, 2048, device='cuda')
Tim Dettmers's avatar
Tim Dettmers committed
2140
2141
2142
2143
2144
                code = F.create_quantile_map(values, bits).cuda()
            # for some data types we have no zero
            # for some data types we have one zero
            # for some data types we have two zeros
            assert torch.unique(code).numel() in [2**bits, 2**bits-1], f'bits: {bits}, method: {method}'
2145
            #print(method, (code==0).sum())
Tim Dettmers's avatar
Tim Dettmers committed
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
            assert code.numel() == 256
            for i in range(10):

                values = torch.randn(1, 32, device='cuda')
                values /= values.abs().max()
                #values[values.abs() < 1e-6] += 1e-5

                q1 = []
                v1 = []
                for v in values[0]:
                    idx = torch.abs(v-code).argmin()
                    q1.append(idx.item())
                    v1.append(code[idx].item())

                q1 = torch.Tensor(q1).cuda()
                v1 = torch.Tensor(v1).cuda()

Tim Dettmers's avatar
Tim Dettmers committed
2163
2164
                q2, S2 = F.quantize_blockwise(values, code=code)
                v2 = F.dequantize_blockwise(q2, S2)
Tim Dettmers's avatar
Tim Dettmers committed
2165
2166

                idx = torch.isclose(q1.int(), q2.int())
Tim Dettmers's avatar
Tim Dettmers committed
2167
2168
2169
                err2 = torch.abs(v2-values)
                abserrs.append(err2.mean().item())
                relerrs.append((err2/(1e-10+values).abs()).mean().item())
Tim Dettmers's avatar
Tim Dettmers committed
2170
2171
2172
                if idx.sum():
                    # some weird cases
                    err1 = torch.abs(v1-values).mean()
Tim Dettmers's avatar
Tim Dettmers committed
2173
                    #assert err2.mean() <= err1
Tim Dettmers's avatar
Tim Dettmers committed
2174
2175

                else:
2176
                    torch.testing.assert_close(q1, q2)
2177
            #print(method, 'abserr:', sum(abserrs)/len(abserrs), 'relerr:', sum(relerrs)/len(relerrs))
Tim Dettmers's avatar
Tim Dettmers committed
2178
    #assert False
Tim Dettmers's avatar
Tim Dettmers committed
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188


def test_kbit_quantile_estimation():
    for i in range(100):
        data = torch.randn(1024, 1024, device='cuda')
        for bits in range(2, 9):
            p = np.linspace(1.3e-4, 1-1.3e-4, 2**bits)
            val1 = torch.Tensor(norm.ppf(p)).cuda()
            val2 = F.estimate_quantiles(data, offset=0, num_quantiles=2**bits)
            err = torch.abs(val1-val2).mean()
Tim Dettmers's avatar
Tim Dettmers committed
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
            assert err < 0.038

    for i in range(100):
        data = torch.randn(1024, 1024, device='cuda')
        for bits in range(2, 4):
            total_values = 2**bits-1
            p = np.linspace(0, 1, 2*total_values+1)
            idx = np.arange(1, 2*total_values+1, 2)
            p = p[idx]
            offset = 1/(2*total_values)
            p = np.linspace(offset, 1-offset, total_values)
            val1 = torch.Tensor(norm.ppf(p)).cuda()
            val2 = F.estimate_quantiles(data, num_quantiles=2**bits-1)
            err = torch.abs(val1-val2).mean()
Tim Dettmers's avatar
Tim Dettmers committed
2203
            assert err < 0.035
2204
2205
2206
2207


def test_bench_dequantization():
    a = torch.rand(1024, 1024, device='cuda').half()
2208
2209
2210
    code =F.create_fp8_map(True, 3, 0, 4).cuda()
    qa, SA = F.quantize_blockwise(a, code=code)
    print(qa.max())
2211
2212
2213
2214
2215
2216
2217

    max_theoretical_mu =  1024*1024*2/1024**3/672*1000*1000
    #print(max_theoretical_mu)

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(100):
2218
        qa, SA = F.quantize_blockwise(a)
2219
2220
2221
    torch.cuda.synchronize()
    #print((time.time()-t0)/1e6)

2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251


def test_fp4_quant():
    vals = list(product([0, 1], repeat=4))

    code = {}
    for bits in vals:
        result = 0
        bias = 3
        sign, e1, e2, p1 = bits
        idx = sign*8 + e1*4 + e2*2 + p1*1
        sign = -1.0 if sign else 1.0
        exp = e1*2 + e2*1
        if exp == 0:
            # sub-normal
            if p1 == 0: result = 0
            else: result = sign*0.0625
        else:
            # normal
            exp = 2**(-exp + bias + 1)
            frac = 1.5 if p1 else 1.0
            result = sign*exp*frac
        code[idx] = result

    A1 = torch.randn(1024, 1024, device='cuda').half()
    qa, SA = F.quantize_fp4(A1, blocksize=64)
    A2 = F.dequantize_fp4(qa, SA)

    err = (A1 - A2).abs().float()
    relerr = (err/A1.abs().float()).mean()
Tim Dettmers's avatar
Tim Dettmers committed
2252
    idx = err > 1.0
2253
2254
    err = err.mean()

Tim Dettmers's avatar
Tim Dettmers committed
2255

Tim Dettmers's avatar
Tim Dettmers committed
2256
2257
    assert err.item() < 0.1
    assert relerr.item() < 0.28
2258
2259


Tim Dettmers's avatar
Tim Dettmers committed
2260
2261
2262
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize("quant_type", ['fp4', 'nf4'])
def test_4bit_compressed_stats(quant_type):
2263
2264
2265
    for blocksize in [128, 64]:
        errs1 = []
        errs2 = []
Tim Dettmers's avatar
Tim Dettmers committed
2266
        for i in range(10):
2267
            A1 = torch.randn(1024, 1024, device='cuda').half()
2268
2269
2270
2271
            q2, SA2 = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)
            q3, SA3= F.quantize_4bit(A1, blocksize=blocksize, compress_statistics=True, quant_type=quant_type)
            A2 = F.dequantize_4bit(q2, SA2, quant_type=quant_type)
            A3 = F.dequantize_4bit(q3, SA3, quant_type=quant_type)
2272
2273
2274
2275
2276
2277


            err = (A1 - A2).abs().float()
            relerr = (err/(A1.abs().float()+1e-15)).mean()
            err = err.mean()

2278
2279
            errs1.append(err.item())

2280
2281
2282
2283
2284
2285
2286
2287

            assert err.item() < 0.11
            assert relerr.item() < 0.28

            err = (A1 - A3).abs().float()
            relerr = (err/(A1.abs().float()+1e-15)).mean()
            err = err.mean()

2288
            errs2.append(err.item())
2289
2290
2291
2292

            assert err.item() < 0.11
            assert relerr.item() < 0.28

2293
2294
        #print(sum(errs1)/len(errs1), blocksize, quant_type)
        #print(sum(errs2)/len(errs2), blocksize, quant_type)
2295
2296
2297
2298




Tim Dettmers's avatar
Tim Dettmers committed
2299
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
Tim Dettmers's avatar
Tim Dettmers committed
2300
2301
#@pytest.mark.parametrize("quant_type", ['fp4', 'nf4'])
@pytest.mark.parametrize("quant_type", ['nf4'])
2302
def test_bench_4bit_dequant(quant_type):
2303
2304
    blocksize = 256
    a = torch.rand(1024*12*4, 1024*12, device='cuda').half()
2305
    qa, SA = F.quantize_4bit(a, blocksize=blocksize, quant_type=quant_type)
2306
2307
2308
2309
2310
2311

    input_size = a.numel()/2
    output_size = a.numel()*2
    num_bytes = input_size+output_size
    GB = num_bytes/1e9
    max_theoretical_s =  GB/768
2312
    #print(max_theoretical_s*1e6)
2313
2314
    b = torch.randn(128, 1024*12, device='cuda').half()

Tim Dettmers's avatar
Tim Dettmers committed
2315
    iters = 100
2316
2317
2318
    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(iters):
2319
        F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)
2320
2321
        #b.copy_(a)
    torch.cuda.synchronize()
2322
2323
2324
2325
2326
2327
2328
2329
    #print((time.time()-t0)/iters*1e6)

    #torch.cuda.synchronize()
    #t0 = time.time()
    #for i in range(iters):
    #    torch.matmul(b, a.t())
    #torch.cuda.synchronize()
    #print((time.time()-t0)/iters*1e6)
2330
2331
2332
2333
2334
2335
2336



def test_normal_map_tree():
    code = F.create_normal_map()
    values =code[:8].tolist() + code[-8:].tolist()
    num_pivots = 1
Tim Dettmers's avatar
Tim Dettmers committed
2337
    print(values)
2338
2339
2340
2341
2342
2343
2344
2345
2346
    while num_pivots <16:
        idx = list(range(16//num_pivots//2, 16, 16//num_pivots))
        print(idx)
        num_pivots *= 2
        pivots = []
        for i in idx:
            pivots.append((values[i-1]+values[i])/2)
        print(pivots)

Tim Dettmers's avatar
Tim Dettmers committed
2347

Tim Dettmers's avatar
Tim Dettmers committed
2348
2349
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
2350
def test_cutlass3_gemm(dtype):
2351
2352
    debug = True
    #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
Tim Dettmers's avatar
Tim Dettmers committed
2353
    #for dim in [4096, 5120, 6656, 8192]:
2354
2355
    for dim in [4096]:
    #for dim in [128+1]:
2356
2357
2358
2359
2360
        errs = []
        relerrs = []
        max_err = 0
        max_relerr = 0
        for i in range(100):
2361
            A = torch.randn(1, dim, dtype=dtype, device='cuda')
Tim Dettmers's avatar
Tim Dettmers committed
2362
            B = torch.randn(4*dim, dim+0, dtype=dtype, device='cuda')/math.sqrt(dim)
2363
            #B = torch.randn(1, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
2364
2365
2366
2367

            #print('')
            #print(A)
            #print(B.t())
Tim Dettmers's avatar
Tim Dettmers committed
2368
2369
            #A[:, :-1] = 0
            #B[:, :-1] = 0
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388


            C1 = torch.matmul(A, B.t())
            C2 = F.cutlass3_gemm(A, B.t())

            # tensor cores are non-deterministic
            # so we need to analyze errors around the mean
            # to test our implementation
            err = torch.abs(C1-C2)
            mag = torch.abs(C1)+1e-8
            relerr = err/mag
            max_err = max(err.max(), max_err)
            max_relerr = max(relerr.max(), max_relerr)
            err = err.mean().item()
            relerr = relerr.mean().item()

            errs.append(err)
            relerrs.append(relerr)

2389
            #if not debug and err/torch.abs(C1).mean() > 5e-5 or err > 3.2e-5:
2390
            #    print('')
Tim Dettmers's avatar
Tim Dettmers committed
2391
            #    print(i, err, relerr)
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
            #    print(A.flatten()[-6:])
            #    print(B.flatten()[-6:])
            #    out = A.flatten()[-6:]*B.flatten()[-6:]
            #    print(out)
            #    print(out[:-1].sum())
            #    print('='*80)
            #    print(C1.flatten()[-6:])
            #    print(C2.flatten()[-6:])
            #    #assert False, 'ERROR'

Tim Dettmers's avatar
Tim Dettmers committed
2402
            c = int(C1.numel()*0.0014*(dim/256))+1
Tim Dettmers's avatar
Tim Dettmers committed
2403

2404
            c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=not debug)
Tim Dettmers's avatar
Tim Dettmers committed
2405
            #print(c/math.sqrt(dim))
2406
2407
2408
2409
        print('')
        print(dim, sum(errs)/len(errs)/math.sqrt(dim))
        print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
        print(dim, (max_err.item(), max_relerr.item()))
Tim Dettmers's avatar
Tim Dettmers committed
2410

Tim Dettmers's avatar
Tim Dettmers committed
2411
2412
2413
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
    #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
    #for dim in [4096, 5120, 6656, 8192]:
    #for dim in [32]:
    for dim in [4096]:
        errs = []
        relerrs = []
        max_err = 0
        max_relerr = 0
        for i in range(1):
            #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
            #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
            #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
            #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
            A = torch.randn(1, dim+0, dtype=dtype, device='cuda')
            B = torch.randn(4*dim, dim+0, dtype=dtype, device='cuda')/math.sqrt(dim)

            #print('')
            #print(A)
            #print(B.t())
            #A[:, :-1] = 0
            #B[:, :-1] = 0
Tim Dettmers's avatar
Tim Dettmers committed
2435

2436
2437
            qB, state = F.quantize_nf4(B)
            F.dequantize_nf4(qB, state)
Tim Dettmers's avatar
Tim Dettmers committed
2438

2439
2440
2441
            C3 = torch.matmul(A, B.t())
            C2 = F.cutlass3_gemm(A, qB.t(), state=state)
            C1 = bnb.matmul_4bit(A, qB.t(), state)
Tim Dettmers's avatar
Tim Dettmers committed
2442

Tim Dettmers's avatar
Tim Dettmers committed
2443
2444
2445
2446
            print(C1)
            print(C2)

            #print(C1.shape, C2.shape)
Tim Dettmers's avatar
Tim Dettmers committed
2447

2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
            # tensor cores are non-deterministic
            # so we need to analyze errors around the mean
            # to test our implementation
            err = torch.abs(C1-C2)
            mag = torch.abs(C1)+1e-8
            relerr = err/mag
            max_err = max(err.max(), max_err)
            max_relerr = max(relerr.max(), max_relerr)
            err = err.mean().item()
            relerr = relerr.mean().item()
Tim Dettmers's avatar
Tim Dettmers committed
2458
            print(err)
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474

            errs.append(err)
            relerrs.append(relerr)

            if err/torch.abs(C1).mean() > 5e-5 or err > 3.2e-5:
                print('')
                print(i, err, relerr)
                print(A.flatten()[-6:])
                print(B.flatten()[-6:])
                out = A.flatten()[-6:]*B.flatten()[-6:]
                print(out)
                print(out[:-1].sum())
                print('='*80)
                print(C1.flatten()[-6:])
                print(C2.flatten()[-6:])
                #assert False, 'ERROR'
Tim Dettmers's avatar
Tim Dettmers committed
2475

2476
            c = int(C1.numel()*0.0014*(dim/256))+1
Tim Dettmers's avatar
Tim Dettmers committed
2477

2478
2479
2480
2481
2482
2483
            c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
            #print(c/math.sqrt(dim))
        print('')
        print(dim, sum(errs)/len(errs)/math.sqrt(dim))
        print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
        print(dim, (max_err.item(), max_relerr.item()))
Tim Dettmers's avatar
Tim Dettmers committed
2484

2485
@pytest.mark.skip("Row scale has some bugs for ampere")
Tim Dettmers's avatar
Tim Dettmers committed
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
def test_managed():
    n = 32*10
    A = F.get_paged(n, n, dtype=torch.float32)
    B = F.get_paged(n, n, dtype=torch.uint8)
    B2 = F.get_paged(n, n, dtype=torch.float32)
    assert A.is_paged
    assert B.is_paged
    assert A.page_deviceid==0
    assert B.page_deviceid==0
    F.fill(A, 17.0)
    F.fill(B, 17)
    F.fill(B2, 2)
    assert (A==17).sum().item() == n*n
    assert (B==17).sum().item() == n*n
    C = A*B.float()
    assert (C==289).sum().item() == n*n
    F._mul(A, B2)
    F._mul(A, B2)
    F._mul(A, B2)
    assert (A==17*(2**3)).sum().item() == n*n
   # F.prefetch_tensor(A)
   # F.prefetch_tensor(B)


   # F.fill(B2, 17.0)
   # F._mul(A, B2)

   # F.prefetch_tensor(A, to_cpu=True)
   # F.prefetch_tensor(B, to_cpu=True)
   # F.prefetch_tensor(B2, to_cpu=True)
   # torch.cuda.synchronize()

   # assert (A==17).sum().item() == n*n

2520
   # torch.testing.assert_close(A, torch.ones(A.shape)*289)