"...composable_kernel_onnxruntime.git" did not exist on "e1a67b693efe3b58d3088101d8a8138f05d3c7f5"
example_elementwise_add.py 2.82 KB
Newer Older
1
2
3
import argparse
import itertools
import torch
4
5
import tilelang
import tilelang.language as T
6
from tilelang.autotuner import AutoTuner
7

8
9
tilelang.disable_cache()

10

11
12
13
14
def ref_program(x, y):
    return x + y


15
@tilelang.jit(out_idx=[-1])
16
def elementwise_add(M, N, block_M, block_N, in_dtype, out_dtype, threads):
17
18

    @T.prim_func
19
20
    def elem_add(A: T.Tensor((M, N), in_dtype), B: T.Tensor((M, N), in_dtype), C: T.Tensor(
        (M, N), out_dtype)):
21
        with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=threads) as (bx, by):
22
23
24
25
26
27
28
            A_shared = T.alloc_shared((block_M, block_N), in_dtype)
            B_shared = T.alloc_shared((block_M, block_N), in_dtype)
            C_local = T.alloc_fragment((block_M, block_N), out_dtype)
            C_shared = T.alloc_shared((block_M, block_N), out_dtype)

            T.copy(A[by * block_M, bx * block_N], A_shared)
            T.copy(B[by * block_M, bx * block_N], B_shared)
29
            for (local_y, local_x) in T.Parallel(block_M, block_N):
30
31
32
                C_local[local_y, local_x] = A_shared[local_y, local_x] + B_shared[local_y, local_x]
            T.copy(C_local, C_shared)
            T.copy(C_shared, C[by * block_M, bx * block_N])
33

34
    return elem_add
35
36


37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def get_configs(M, N):
    block_M = [64, 128, 256]
    block_N = [64, 128, 256]
    threads = [64, 128, 256]
    configs = list(itertools.product(block_M, block_N, threads))
    return [{"block_M": bm, "block_N": bn, "threads": th} for bm, bn, th in configs]


def get_best_config(M, N):

    def kernel(block_M=None, block_N=None, threads=None):
        return elementwise_add(M, N, block_M, block_N, "float32", "float32", threads)

    autotuner = AutoTuner.from_kernel(
        kernel=kernel, configs=get_configs(M, N)).set_compile_args(
            out_idx=[-1],
53
54
            target="cuda",
        ).set_profile_args(
55
56
57
58
59
            supply_type=tilelang.TensorSupplyType.Auto,
            ref_prog=ref_program,
            skip_check=False,
        )
    return autotuner.run(warmup=3, rep=20)
60

61

62
def main():
63
    parser = argparse.ArgumentParser()
64
    parser.add_argument("--m", type=int, default=1024)
65
66
    parser.add_argument("--n", type=int, default=1024)
    parser.add_argument("--use_autotune", action="store_true", default=False)
67
    args, _ = parser.parse_known_args()
68
69
70
71
72
73
74
75
76
77
    M, N = args.m, args.n

    a = torch.randn(M, N, dtype=torch.float32, device="cuda")
    b = torch.randn(M, N, dtype=torch.float32, device="cuda")

    if args.use_autotune:
        result = get_best_config(M, N)
        kernel = result.kernel
    else:
        # Default config
78
        config = {"block_M": 128, "block_N": 128, "threads": 128}
79
        kernel = elementwise_add(M, N, **config, in_dtype="float32", out_dtype="float32")
80
81
    out = kernel(a, b)
    torch.testing.assert_close(out, ref_program(a, b), rtol=1e-2, atol=1e-2)
82
83
84
85


if __name__ == "__main__":
    main()