"git@developer.sourcefind.cn:orangecat/ollama.git" did not exist on "f42f3d9b2760f6b6dd26b738794657ca98fe9042"
example_dequant_gemm_fp4_hopper.py 11.7 KB
Newer Older
1
2
3
4
5
6
7
8
import tilelang
import tilelang.language as T
from tilelang.autotuner import *
from tvm import tir
import itertools
import torch
import argparse

9

10
11
12
13
14
def _tir_u8_to_f4_to_f16(nbit: int, val: tir.PrimExpr, pos: tir.PrimExpr, dtype: str):
    assert nbit == 4
    assert dtype == "float16"
    assert val.dtype == "uint8"
    # e_f4 == 0 -> e_f16 = 0
15
16
    # e_f4 != 0 -> e_f16 = e_f4 + ExponentialBias(f16, f4) = e_f4 + (2^4 - 2^1) = e_f4 + 14
    # s1e2m1
17
18
19
    mask = tir.const((1 << nbit) - 1, "uint16")
    f4 = (val >> (pos.astype("uint16") * tir.const(nbit, "uint16"))) & mask
    s = f4 >> tir.const(3, "uint16")
20
21
22
23
    e_f4 = (f4 & tir.const(6, "uint16")) >> tir.const(1, "uint16")
    e_f16 = e_f4 + tir.const(14, "uint16")
    m_f4 = f4 & tir.const(1, "uint16")
    m_f16 = m_f4
24
25
26
    val_f16 = tir.reinterpret(
        "float16", ((e_f16 | (s << tir.const(5, "uint16"))) << tir.const(10, "uint16") | m_f16 << tir.const(9, "uint16")).astype("uint16")
    )
27
28
29
    # return tir.Select(e_f4 == tir.const(0, "uint32"), tir.const(0, "float16"), val_f16)
    return val_f16

30

31
32
33
def torch_convert(tensor):
    def print_bit(name, val):
        val_cpu = val.cpu().item()
34
        binary_repr = f"{val_cpu:032b}"
35
36
37
38
39
40
41
42
        print(name, binary_repr)

    def _convert(val, pos):
        assert val.dtype == torch.uint8
        val = val.view(torch.int8)
        mask = (1 << 4) - 1
        f4 = ((val >> (pos * 4)) & mask).to(torch.int16)
        s = f4 >> 3
43
44
45
46
47
        e_f4 = (f4 & 6) >> 1
        e_f16 = e_f4 + 14
        m_f4 = f4 & 1
        m_f16 = m_f4
        val_f16 = (((e_f16 | (s << 5)) << 10) | (m_f16 << 9)) & 0xFFFF
48
49
        lower_16_bits = (val_f16 & 0xFFFF).to(torch.uint16)
        return lower_16_bits.view(torch.float16)
50

51
52
53
54
55
56
57
58
    N = tensor.shape[0]
    K = tensor.shape[1]
    new_tensor = torch.empty(N, K * 2, dtype=torch.float16, device=tensor.device)
    for i in range(new_tensor.shape[0]):
        for j in range(new_tensor.shape[1]):
            new_tensor[i][j] = _convert(tensor[i][j // 2], j % 2)
    return new_tensor

59

60
@tilelang.jit(out_idx=[1])
61
62
63
64
65
66
67
68
69
def test_convert(N, K, block_N, block_K, in_dtype, num_bits=4, threads=128):
    num_elems_per_byte = 8 // num_bits
    storage_dtype = "uint8"
    B_shape = (N, K // num_elems_per_byte)
    B_shared_shape = (block_N, block_K // num_elems_per_byte)
    B_dequantize_shared_shape = (block_N, block_K)

    @T.prim_func
    def main(
70
71
        B: T.Tensor(B_shape, storage_dtype),
        C: T.Tensor((N, K), in_dtype),
72
73
74
75
76
77
    ):
        with T.Kernel(T.ceildiv(N, block_N), threads=threads) as (bx):
            B_shared = T.alloc_shared(B_shared_shape, storage_dtype)
            B_local = T.alloc_fragment(B_shared_shape, storage_dtype)
            B_dequantize_local = T.alloc_fragment(B_dequantize_shared_shape, in_dtype)

78
            for k in T.Pipelined(T.ceildiv(K, block_K), num_stages=1):
79
80
81
82
83
84
85
86
87
88
89
90
91
                T.copy(B[bx * block_N, k * block_K // num_elems_per_byte], B_shared)
                T.copy(B_shared, B_local)
                for i, j in T.Parallel(block_N, block_K):
                    B_dequantize_local[i, j] = _tir_u8_to_f4_to_f16(
                        num_bits,
                        B_local[i, j // num_elems_per_byte],
                        j % num_elems_per_byte,
                        dtype=in_dtype,
                    )
                T.copy(B_dequantize_local, C[bx * block_N, k * block_K])

    return main

92

93
94
95
def test_fp4_fp16_convert_close():
    N, K = 256, 256
    block_N, block_K = 64, 64
96
    kernel = test_convert(
97
        N,
98
        K,
99
100
101
102
103
104
        block_N,
        block_K,
        "float16",
    )

    B = torch.randint(0, 16, (N, K // 2), dtype=torch.uint8, device="cuda").to(torch.uint8)
105
    tl_out = kernel(B)
106
107
108
109
    ref_out = torch_convert(B)
    assert torch.allclose(tl_out, ref_out, rtol=0.01, atol=0.01), (tl_out, ref_out)
    print("Pass")

110

111
def get_configs():
112
113
114
115
116
    block_M = [64, 128]
    block_N = [64, 128]
    block_K = [128, 256]
    num_stages = [1, 2]
    threads = [128, 256]
117
118
119
    splits = [1]
    _configs = list(itertools.product(block_M, block_N, block_K, num_stages, threads, splits))

120
    configs = [{"block_M": c[0], "block_N": c[1], "block_K": c[2], "num_stages": c[3], "threads": c[4], "split": c[5]} for c in _configs]
121
122
    return configs

123

124
def matmul(M, N, K, in_dtype, out_dtype, accum_dtype, num_bits=4, tune=False):
125
    @tilelang.jit(out_idx=[2])
126
127
128
129
130
131
132
133
134
135
136
137
138
    def kernel_func(block_M, block_N, block_K, num_stages, threads, split=1):
        num_elems_per_byte = 8 // num_bits
        storage_dtype = "uint8"
        A_shape = (M, K)
        B_shape = (N, K // num_elems_per_byte)
        A_shared_shape = (block_M, block_K)
        B_shared_shape = (block_N, block_K // num_elems_per_byte)
        B_dequantize_shared_shape = (block_N, block_K)
        assert K % (block_K * split) == 0
        KK = K // split

        @T.prim_func
        def main_split(
139
140
141
            A: T.Tensor(A_shape, in_dtype),
            B: T.Tensor(B_shape, storage_dtype),
            Ct: T.Tensor((N, M), out_dtype),
142
        ):
143
144
            SplitC = T.alloc_buffer([split, (N + block_N - 1) // block_N * block_N, (M + block_M - 1) // block_M * block_M], out_dtype)
            with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), split, threads=threads) as (bx, by, bz):
145
146
147
148
149
150
151
152
                A_shared = T.alloc_shared(A_shared_shape, in_dtype)
                B_shared = T.alloc_shared(B_shared_shape, storage_dtype)
                B_local = T.alloc_fragment(B_shared_shape, storage_dtype)
                B_dequantize_local = T.alloc_fragment(B_dequantize_shared_shape, in_dtype)
                B_dequantize_prev_local = T.alloc_fragment(B_dequantize_shared_shape, in_dtype)
                Ct_local = T.alloc_fragment((block_N, block_M), accum_dtype)
                Ct_shared = T.alloc_shared((block_N, block_M), out_dtype)

153
154
155
156
157
158
                T.annotate_layout(
                    {
                        B_shared: tilelang.layout.make_swizzled_layout(B_shared),
                        Ct_shared: tilelang.layout.make_swizzled_layout(Ct_shared),
                    }
                )
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

                T.clear(Ct_local)
                for k in T.Pipelined(K // (block_K * split), num_stages=num_stages):
                    T.copy(A[by * block_M, KK * bz + k * block_K], A_shared)
                    T.copy(B[bx * block_N, (KK * bz + k * block_K) // num_elems_per_byte], B_shared)
                    T.copy(B_shared, B_local)
                    for i, j in T.Parallel(block_N, block_K):
                        B_dequantize_local[i, j] = _tir_u8_to_f4_to_f16(
                            num_bits,
                            B_local[i, j // num_elems_per_byte],
                            j % num_elems_per_byte,
                            dtype=in_dtype,
                        )
                    T.copy(B_dequantize_local, B_dequantize_prev_local)
                    T.gemm(B_dequantize_prev_local, A_shared, Ct_local, transpose_B=True)
174
                T.copy(Ct_local, SplitC[bz, bx * block_N : (bx + 1) * block_N, by * block_M : (by + 1) * block_M])
175
176
177
178
179
180
181
            with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M)) as (bx, by):
                acc = T.alloc_fragment((block_N, block_M), out_dtype)
                T.clear(acc)
                for k in range(split):
                    for i, j in T.Parallel(block_N, block_M):
                        acc[i, j] += SplitC[k, bx * block_N + i, by * block_M + j]
                T.copy(acc, Ct[bx * block_N, by * block_M])
182

183
184
        @T.prim_func
        def main(
185
186
187
            A: T.Tensor(A_shape, in_dtype),
            B: T.Tensor(B_shape, storage_dtype),
            Ct: T.Tensor((N, M), out_dtype),
188
        ):
189
            with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=threads) as (bx, by):
190
191
192
193
194
195
196
197
                A_shared = T.alloc_shared(A_shared_shape, in_dtype)
                B_shared = T.alloc_shared(B_shared_shape, storage_dtype)
                B_local = T.alloc_fragment(B_shared_shape, storage_dtype)
                B_dequantize_local = T.alloc_fragment(B_dequantize_shared_shape, in_dtype)
                B_dequantize_prev_local = T.alloc_fragment(B_dequantize_shared_shape, in_dtype)
                Ct_local = T.alloc_fragment((block_N, block_M), accum_dtype)
                Ct_shared = T.alloc_shared((block_N, block_M), out_dtype)

198
199
200
201
202
203
                T.annotate_layout(
                    {
                        B_shared: tilelang.layout.make_swizzled_layout(B_shared),
                        Ct_shared: tilelang.layout.make_swizzled_layout(Ct_shared),
                    }
                )
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219

                T.clear(Ct_local)
                for k in T.Pipelined(K // block_K, num_stages=num_stages):
                    T.copy(A[by * block_M, k * block_K], A_shared)
                    T.copy(B[bx * block_N, k * block_K // num_elems_per_byte], B_shared)
                    T.copy(B_shared, B_local)
                    for i, j in T.Parallel(block_N, block_K):
                        B_dequantize_local[i, j] = _tir_u8_to_f4_to_f16(
                            num_bits,
                            B_local[i, j // num_elems_per_byte],
                            j % num_elems_per_byte,
                            dtype=in_dtype,
                        )
                    T.copy(B_dequantize_local, B_dequantize_prev_local)
                    T.gemm(B_dequantize_prev_local, A_shared, Ct_local, transpose_B=True)
                T.copy(Ct_local, Ct_shared)
220
                T.copy(Ct_shared, Ct[bx * block_N : (bx + 1) * block_N, by * block_M : (by + 1) * block_M])
221
222
223
224
225

        if split == 1:
            return main
        else:
            return main_split
226

227
    if tune:
228

229
        @autotune(configs=get_configs(), warmup=10, rep=10)
230
        @tilelang.jit(out_idx=[2])
231
        def kernel(block_M=None, block_N=None, block_K=None, num_stages=None, threads=None, split=None):
232
            return kernel_func(block_M, block_N, block_K, num_stages, threads, split).prim_func
233
234
235

        return kernel()
    else:
236

237
238
239
240
241
        def kernel(block_M, block_N, block_K, num_stages, threads, split=1):
            return kernel_func(block_M, block_N, block_K, num_stages, threads, split)

        return kernel

242

243
244
245
246
247
248
249
def ref_program(A, qB):
    dtypeC = "float16"
    B = torch_convert(qB)
    C = torch.matmul(A.to(torch.float), B.T.to(torch.float))
    C = C.to(torch.__getattribute__(dtypeC))
    return C.transpose(0, 1)

250

251
252
def main(m=256, n=256, k=256, tune=False):
    total_flops = 2 * m * n * k
253

254
255
256
257
    if not tune:
        kernel = matmul(m, n, k, "float16", "float16", "float32", num_bits=4, tune=tune)(
            block_M=128, block_N=128, block_K=128, num_stages=2, threads=256, split=1
        )
258
259
        profiler = kernel.get_profiler(tilelang.TensorSupplyType.Integer)
        profiler.assert_allclose(ref_program, rtol=0.01, atol=0.01)
260
        print("All checks pass.")
261
        latency = profiler.do_bench(ref_program, warmup=500)
262
263
        print("Ref: {:.2f} ms".format(latency))
        print("Ref: {:.2f} TFlops".format(total_flops / latency * 1e-9))
264
        latency = profiler.do_bench(warmup=500)
265
266
267
        print("Tile-lang: {:.2f} ms".format(latency))
        print("Tile-lang: {:.2f} TFlops".format(total_flops / latency * 1e-9))
    else:
268
        best_result = matmul(m, n, k, "float16", "float16", "float32", num_bits=4, tune=tune)
269
270
        best_latency = best_result.latency
        best_config = best_result.config
271
272
273
        print(f"Best latency: {best_latency}")
        print(f"Best TFlops: {total_flops / best_latency * 1e-9}")
        print(f"Best config: {best_config}")
274
275
276
277


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
278
279
280
281
    parser.add_argument("--m", type=int, default=256, help="M")
    parser.add_argument("--n", type=int, default=256, help="N")
    parser.add_argument("--k", type=int, default=256, help="K")
    parser.add_argument("--tune", action="store_true", help="tune configs")
282
283
284
    args = parser.parse_args()
    M, N, K = args.m, args.n, args.k
    main(M, N, K, args.tune)