example_gemm.py 1.62 KB
Newer Older
1
import tilelang
2
import tilelang.language as T
3
4


5
@tilelang.jit(out_idx=[-1])
6
def matmul(M, N, K, block_M, block_N, block_K, dtype="float16", accum_dtype="float"):
7

8
    @T.prim_func
9
    def gemm(
10
            A: T.Tensor((M, K), dtype),
11
            B: T.Tensor((K, N), dtype),
12
            C: T.Tensor((M, N), dtype),
13
    ):
14
        with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=128) as (bx, by):
15
            A_shared = T.alloc_shared((block_M, block_K), dtype)
16
            B_shared = T.alloc_shared((block_K, block_N), dtype)
17
            C_local = T.alloc_fragment((block_M, block_N), accum_dtype)
18

19
            T.clear(C_local)
20
            for k in T.Pipelined(T.ceildiv(K, block_K), num_stages=3):
21
                T.copy(A[by * block_M, k * block_K], A_shared)
22
23
24
25
                T.copy(B[k * block_K, bx * block_N], B_shared)
                T.gemm(A_shared, B_shared, C_local)

            T.copy(C_local, C[by * block_M, bx * block_N])
26

27
    return gemm
28

29

30
def main():
31
    kernel = matmul(1024, 1024, 1024, 128, 128, 32)
32

33
    import torch
34

35
36
    a = torch.randn(1024, 1024).cuda().half()
    b = torch.randn(1024, 1024).cuda().half()
37

38
    c = kernel(a, b)
39

40
    ref_c = a @ b
41

42
43
44
45
    print("c:")
    print(c)
    print("ref_c:")
    print(ref_c)
46

47
48
    torch.testing.assert_close(c, ref_c, rtol=1e-2, atol=1e-2)
    print("All check passed.")
49

50
51
52
53
    # Get CUDA Source
    print("CUDA Source:")
    print(kernel.get_kernel_source())

54
55
56
57
58
59
    # benchmark
    profiler = kernel.get_profiler()
    latency = profiler.do_bench(backend="cupti")
    # latency = profiler.do_bench()
    print(f"tilelang Latency: {latency}ms")

60
61
62

if __name__ == "__main__":
    main()