"git@developer.sourcefind.cn:cnjsdfcy/simbricks.git" did not exist on "1118220ed577610d533b92c0cc35495463a6ad91"
test_tilelang_language_tma_1d.py 1.9 KB
Newer Older
1
import torch
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import tilelang
import tilelang.language as T


def ref_program(x, y):
    return x + y


@tilelang.jit(out_idx=[-1])
def elementwise_add(M, N, block_M, block_N, in_dtype, out_dtype, threads):

    @T.prim_func
    def elem_add(A: T.Tensor((M, N), in_dtype), B: T.Tensor((M, N), in_dtype), C: T.Tensor(
        (M, N), out_dtype)):
        with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=threads) as (bx, by):
            A_shared = T.alloc_shared((block_M, block_N), in_dtype)
            B_shared = T.alloc_shared((block_M, block_N), in_dtype)
            C_local = T.alloc_fragment((block_M, block_N), out_dtype)
            C_shared = T.alloc_shared((block_M, block_N), out_dtype)

            T.copy(A[by * block_M, bx * block_N], A_shared)
            T.copy(B[by * block_M, bx * block_N], B_shared)
            for (local_y, local_x) in T.Parallel(block_M, block_N):
                C_local[local_y, local_x] = A_shared[local_y, local_x] + B_shared[local_y, local_x]
            T.copy(C_local, C_shared)
            T.copy(C_shared, C[by * block_M, bx * block_N])

    return elem_add


32
def run_elementwise_add(M, N):
33
34
35
36
    a = torch.randn(M, N, dtype=torch.float32, device="cuda")
    b = torch.randn(M, N, dtype=torch.float32, device="cuda")

    # Default config
37
38
    block_M, block_N = 128, 128
    config = {"block_M": block_M, "block_N": block_N, "threads": 128}
39
40
41
42
    kernel = elementwise_add(M, N, **config, in_dtype="float32", out_dtype="float32")

    out = kernel(a, b)
    torch.testing.assert_close(out, ref_program(a, b), rtol=1e-2, atol=1e-2)
43
44
45
46
47
48
49
50
51
52
53
54

    code = kernel.get_kernel_source()
    if block_N == N:
        assert "tma_load" in code and "CUtensorMap" not in code
    else:
        assert "tma_load" in code and "CUtensorMap" in code


def main():
    run_elementwise_add(128, 128)
    run_elementwise_add(256, 128)
    run_elementwise_add(256, 256)
55
56
57
58


if __name__ == "__main__":
    main()