test_tilelang_language_clear.py 2.09 KB
Newer Older
1
2
3
4
import tilelang
import tilelang.language as T


5
6
# add decorator @tilelang.jit if you want to return a torch function
# @tilelang.jit
7
def matmul(M, N, K, block_M, block_N, block_K, dtype=T.float16, accum_dtype=T.float32):
8
9
    @T.prim_func
    def main(
10
11
12
        A: T.Tensor((M, K), dtype),
        B: T.Tensor((N, K), dtype),
        C: T.Tensor((M, N), dtype),
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
    ):
        # Initialize Kernel Context
        with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=128) as (bx, by):
            A_shared = T.alloc_shared((block_M, block_K), dtype)
            B_shared = T.alloc_shared((block_N, block_K), dtype)
            C_local = T.alloc_fragment((block_M, block_N), accum_dtype)

            T.clear(C_local)

            for ko in T.Pipelined(T.ceildiv(K, block_K), num_stages=0):
                # Copy tile of A
                # This is a sugar syntax for parallelized copy
                T.copy(A[by * block_M, ko * block_K], A_shared)

                T.clear(A_shared)

                # Demonstrate parallelized copy from global to shared for B
                T.copy(B[bx * block_N, ko * block_K], B_shared)

                # Perform a tile-level GEMM on the shared buffers
                # Currently we dispatch to the cute/hip on Nvidia/AMD GPUs
                T.gemm(A_shared, B_shared, C_local, transpose_B=True)

            # Copy result back to global memory
            T.copy(C_local, C[by * block_M, bx * block_N])

    return main


42
def run_matmul(M, N, K, block_M, block_N, block_K, dtype=T.float16, accum_dtype=T.float32):
43
    program = matmul(M, N, K, block_M, block_N, block_K, dtype, accum_dtype)
44
    kernel = tilelang.compile(program, out_idx=[2], target="cuda", pass_configs={"tl.disable_tma_lower": True})
45
46
    import torch
    from tilelang.utils import map_torch_type
47

48
49
50
51
52
53
54
55
56
57
58
59
    a = torch.randn((M, K), dtype=map_torch_type(dtype)).cuda()
    b = torch.randn((N, K), dtype=map_torch_type(dtype)).cuda()
    c = kernel(a, b)
    assert torch.allclose(c, torch.zeros_like(c))


def test_matmul():
    run_matmul(1024, 1024, 1024, 128, 128, 32)


if __name__ == "__main__":
    test_matmul()