"benchmark/README.md" did not exist on "fe5cd1fcc6d5ddf0e39a41a33223cf3377548c7f"
mul.py 4.84 KB
Newer Older
Graylatzhou's avatar
Graylatzhou committed
1
2
import torch
import ctypes
3
from ctypes import c_uint64
Graylatzhou's avatar
Graylatzhou committed
4
from libinfiniop import (
5
6
    LIBINFINIOP,
    TestTensor,
Graylatzhou's avatar
Graylatzhou committed
7
8
9
10
11
12
13
    get_test_devices,
    check_error,
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
14
15
16
17
18
    TestWorkspace,
    InfiniDtype,
    InfiniDtypeNames,
    InfiniDeviceNames,
    infiniopOperatorDescriptor_t,
Graylatzhou's avatar
Graylatzhou committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
)
from enum import Enum, auto

# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
_TEST_CASES_ = [
    # shape, a_stride, b_stride, c_stride
    ((13, 4), None, None, None),
    ((13, 4), (10, 1), (10, 1), (10, 1)),
    ((13, 4), (0, 1), None, None),
    ((13, 4, 4), None, None, None),
    ((13, 4, 4), (20, 4, 1), (20, 4, 1), (20, 4, 1)),
    ((13, 4, 4), (4, 0, 1), (0, 4, 1), None),
    ((16, 5632), None, None, None),
    ((16, 5632), (13312, 1), (13312, 1), (13312, 1)),
    ((4, 4, 5632), None, None, None),
    ((4, 4, 5632), (45056, 5632, 1), (45056, 5632, 1), (45056, 5632, 1)),
]


class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_A = auto()
    INPLACE_B = auto()


# Inplace options applied for each test case in _TEST_CASES_
_INPLACE = [
    Inplace.OUT_OF_PLACE,
    Inplace.INPLACE_A,
    Inplace.INPLACE_B,
]

# Form the test cases by appending each element of _INPLACE to each tuple in _TEST_CASES_
_TEST_CASES = [
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
]

# Data types used for testing
62
_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.F32, InfiniDtype.BF16]
Graylatzhou's avatar
Graylatzhou committed
63
64
65

# Tolerance map for different data types
_TOLERANCE_MAP = {
66
67
    InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3},
    InfiniDtype.F32: {"atol": 1e-7, "rtol": 1e-7},
68
    InfiniDtype.BF16: {"atol": 1e-3, "rtol": 1e-3},
Graylatzhou's avatar
Graylatzhou committed
69
70
71
72
73
74
75
76
}

DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000


77
78
def mul(c, a, b):
    torch.mul(a, b, out=c)
Graylatzhou's avatar
Graylatzhou committed
79
80
81
82


def test(
    handle,
83
    device,
Graylatzhou's avatar
Graylatzhou committed
84
85
86
87
88
    shape,
    a_stride=None,
    b_stride=None,
    c_stride=None,
    inplace=Inplace.OUT_OF_PLACE,
89
    dtype=InfiniDtype.F16,
Graylatzhou's avatar
Graylatzhou committed
90
91
    sync=None,
):
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
    a = TestTensor(shape, a_stride, dtype, device)
    b = TestTensor(shape, b_stride, dtype, device)
    if inplace == Inplace.INPLACE_A:
        if c_stride is not None and c_stride != a_stride:
            return
        c = a
    elif inplace == Inplace.INPLACE_B:
        if c_stride is not None and c_stride != b_stride:
            return
        c = b
    else:
        c = TestTensor(shape, c_stride, dtype, device)

    if c.is_broadcast():
        return

Graylatzhou's avatar
Graylatzhou committed
108
    print(
109
110
        f"Testing Mul on {InfiniDeviceNames[device]} with shape:{shape} a_stride:{a_stride} b_stride:{b_stride} c_stride:{c_stride} "
        f"dtype:{InfiniDtypeNames[dtype]} inplace:{inplace}"
Graylatzhou's avatar
Graylatzhou committed
111
    )
112
    mul(c.torch_tensor(), a.torch_tensor(), b.torch_tensor())
Graylatzhou's avatar
Graylatzhou committed
113
114
115
116

    if sync is not None:
        sync()

117
    descriptor = infiniopOperatorDescriptor_t()
Graylatzhou's avatar
Graylatzhou committed
118
    check_error(
119
        LIBINFINIOP.infiniopCreateMulDescriptor(
Graylatzhou's avatar
Graylatzhou committed
120
121
            handle,
            ctypes.byref(descriptor),
122
123
124
            c.descriptor,
            a.descriptor,
            b.descriptor,
Graylatzhou's avatar
Graylatzhou committed
125
126
127
128
        )
    )

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
129
130
    for tensor in [a, b, c]:
        tensor.destroy_desc()
Graylatzhou's avatar
Graylatzhou committed
131
132
133

    workspace_size = c_uint64(0)
    check_error(
134
135
136
        LIBINFINIOP.infiniopGetMulWorkspaceSize(
            descriptor, ctypes.byref(workspace_size)
        )
Graylatzhou's avatar
Graylatzhou committed
137
    )
138
    workspace = TestWorkspace(workspace_size.value, c.device)
Graylatzhou's avatar
Graylatzhou committed
139
140
141

    def lib_mul():
        check_error(
142
            LIBINFINIOP.infiniopMul(
Graylatzhou's avatar
Graylatzhou committed
143
                descriptor,
144
                workspace.data(),
Graylatzhou's avatar
Graylatzhou committed
145
                workspace_size.value,
146
147
148
                c.data(),
                a.data(),
                b.data(),
Graylatzhou's avatar
Graylatzhou committed
149
150
151
152
153
154
155
156
                None,
            )
        )

    lib_mul()

    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
157
158
        debug(c.actual_tensor(), c.torch_tensor(), atol=atol, rtol=rtol)
    assert torch.allclose(c.actual_tensor(), c.torch_tensor(), atol=atol, rtol=rtol)
Graylatzhou's avatar
Graylatzhou committed
159
160
161
162

    # Profiling workflow
    if PROFILE:
        # fmt: off
163
164
        profile_operation("PyTorch", lambda: mul(c.torch_tensor(), a.torch_tensor(), b.torch_tensor()), device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_mul(), device, NUM_PRERUN, NUM_ITERATIONS)
Graylatzhou's avatar
Graylatzhou committed
165
        # fmt: on
166
    check_error(LIBINFINIOP.infiniopDestroyMulDescriptor(descriptor))
Graylatzhou's avatar
Graylatzhou committed
167
168
169
170
171
172
173
174
175
176
177
178


if __name__ == "__main__":
    args = get_args()

    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations

    for device in get_test_devices(args):
179
        test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES)
Graylatzhou's avatar
Graylatzhou committed
180

Graylatzhou's avatar
Graylatzhou committed
181
    print("\033[92mTest passed!\033[0m")