swiglu.py 4.84 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
3
from ctypes import c_uint64
xgqdut2016's avatar
xgqdut2016 committed
4
from libinfiniop import (
5
6
    LIBINFINIOP,
    TestTensor,
xgqdut2016's avatar
xgqdut2016 committed
7
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
8
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
9
10
11
12
13
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
14
15
16
17
18
    TestWorkspace,
    InfiniDtype,
    InfiniDtypeNames,
    InfiniDeviceNames,
    infiniopOperatorDescriptor_t,
PanZezhongQY's avatar
PanZezhongQY committed
19
)
xgqdut2016's avatar
xgqdut2016 committed
20
from enum import Enum, auto
PanZezhongQY's avatar
PanZezhongQY committed
21

xgqdut2016's avatar
xgqdut2016 committed
22
23
24
25
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
26
_TEST_CASES_ = [
27
    # shape, a_stride, b_stride, c_stride
28
29
    ((13, 4), None, None, None),
    ((13, 4), (10, 1), (10, 1), (10, 1)),
30
    ((13, 4), (0, 1), None, None),
31
32
    ((13, 4, 4), None, None, None),
    ((13, 4, 4), (20, 4, 1), (20, 4, 1), (20, 4, 1)),
33
    ((13, 4, 4), (4, 0, 1), (0, 4, 1), None),
34
35
    ((16, 5632), None, None, None),
    ((16, 5632), (13312, 1), (13312, 1), (13312, 1)),
36
37
    ((4, 4, 5632), None, None, None),
    ((4, 4, 5632), (45056, 5632, 1), (45056, 5632, 1), (45056, 5632, 1)),
38
39
]

40

41
42
43
44
45
46
class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_A = auto()
    INPLACE_B = auto()


47
48
# Inplace options applied for each test case in _TEST_CASES_
_INPLACE = [
49
50
51
    Inplace.OUT_OF_PLACE,
    Inplace.INPLACE_A,
    Inplace.INPLACE_B,
52
53
54
]

# Form the test cases by appending each element of _INPLACE to each tuple in _TEST_CASES_
xgqdut2016's avatar
xgqdut2016 committed
55
_TEST_CASES = [
56
57
58
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
xgqdut2016's avatar
xgqdut2016 committed
59
]
xgqdut2016's avatar
xgqdut2016 committed
60

xgqdut2016's avatar
xgqdut2016 committed
61
# Data types used for testing
62
_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.BF16, InfiniDtype.F32]
xgqdut2016's avatar
xgqdut2016 committed
63
64
65

# Tolerance map for different data types
_TOLERANCE_MAP = {
66
67
68
    InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-3},
    InfiniDtype.BF16: {"atol": 5e-3, "rtol": 5e-3},
    InfiniDtype.F32: {"atol": 2e-7, "rtol": 1e-7},
xgqdut2016's avatar
xgqdut2016 committed
69
70
71
72
73
74
}

DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
75

xgqdut2016's avatar
xgqdut2016 committed
76

PanZezhongQY's avatar
PanZezhongQY committed
77
78
def swiglu(a, b):
    return a * b / (1 + torch.exp(-b.float()).to(b.dtype))
79
80


xgqdut2016's avatar
xgqdut2016 committed
81
def test(
PanZezhongQY's avatar
PanZezhongQY committed
82
    handle,
83
    device,
PanZezhongQY's avatar
PanZezhongQY committed
84
85
86
87
    shape,
    a_stride=None,
    b_stride=None,
    c_stride=None,
xgqdut2016's avatar
xgqdut2016 committed
88
    inplace=Inplace.OUT_OF_PLACE,
89
    dtype=InfiniDtype.F16,
PanZezhongQY's avatar
PanZezhongQY committed
90
91
    sync=None,
):
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
    a = TestTensor(shape, a_stride, dtype, device)
    b = TestTensor(shape, b_stride, dtype, device)
    if inplace == Inplace.INPLACE_A:
        if c_stride is not None and c_stride != a_stride:
            return
        c = a
    elif inplace == Inplace.INPLACE_B:
        if c_stride is not None and c_stride != b_stride:
            return
        c = b
    else:
        c = TestTensor(shape, c_stride, dtype, device)

    if c.is_broadcast():
        return

PanZezhongQY's avatar
PanZezhongQY committed
108
    print(
109
110
        f"Testing SwiGLU on {InfiniDeviceNames[device]} with shape:{shape} a_stride:{a_stride} b_stride:{b_stride} c_stride:{c_stride} "
        f"dtype:{InfiniDtypeNames[dtype]} inplace:{inplace}"
PanZezhongQY's avatar
PanZezhongQY committed
111
    )
xgqdut2016's avatar
xgqdut2016 committed
112

113
    ans = swiglu(a.torch_tensor(), b.torch_tensor())
PanZezhongQY's avatar
PanZezhongQY committed
114
115
116
117

    if sync is not None:
        sync()

118
    descriptor = infiniopOperatorDescriptor_t()
PanZezhongQY's avatar
PanZezhongQY committed
119
    check_error(
120
        LIBINFINIOP.infiniopCreateSwiGLUDescriptor(
PanZezhongQY's avatar
PanZezhongQY committed
121
122
            handle,
            ctypes.byref(descriptor),
123
124
125
            c.descriptor,
            a.descriptor,
            b.descriptor,
PanZezhongQY's avatar
PanZezhongQY committed
126
127
128
129
        )
    )

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
130
131
    for tensor in [a, b, c]:
        tensor.destroy_desc()
xgqdut2016's avatar
xgqdut2016 committed
132

133
134
    workspace_size = c_uint64(0)
    check_error(
135
136
137
        LIBINFINIOP.infiniopGetSwiGLUWorkspaceSize(
            descriptor, ctypes.byref(workspace_size)
        )
138
    )
139
    workspace = TestWorkspace(workspace_size.value, c.device)
140

xgqdut2016's avatar
xgqdut2016 committed
141
142
    def lib_swiglu():
        check_error(
143
            LIBINFINIOP.infiniopSwiGLU(
144
                descriptor,
145
                workspace.data(),
146
                workspace_size.value,
147
148
149
                c.data(),
                a.data(),
                b.data(),
150
                None,
xgqdut2016's avatar
xgqdut2016 committed
151
            )
PanZezhongQY's avatar
PanZezhongQY committed
152
        )
xgqdut2016's avatar
xgqdut2016 committed
153

xgqdut2016's avatar
xgqdut2016 committed
154
    lib_swiglu()
PanZezhongQY's avatar
PanZezhongQY committed
155

xgqdut2016's avatar
xgqdut2016 committed
156
157
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
158
159
        debug(c.actual_tensor(), ans, atol=atol, rtol=rtol)
    assert torch.allclose(c.actual_tensor(), ans, atol=atol, rtol=rtol)
PanZezhongQY's avatar
PanZezhongQY committed
160

xgqdut2016's avatar
xgqdut2016 committed
161
162
163
    # Profiling workflow
    if PROFILE:
        # fmt: off
164
165
        profile_operation("PyTorch", lambda: swiglu(a.torch_tensor(), b.torch_tensor()), device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_swiglu(), device, NUM_PRERUN, NUM_ITERATIONS)
xgqdut2016's avatar
xgqdut2016 committed
166
        # fmt: on
167
    check_error(LIBINFINIOP.infiniopDestroySwiGLUDescriptor(descriptor))
PanZezhongQY's avatar
PanZezhongQY committed
168
169
170
171


if __name__ == "__main__":
    args = get_args()
xgqdut2016's avatar
xgqdut2016 committed
172

xgqdut2016's avatar
xgqdut2016 committed
173
174
175
176
177
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations
xgqdut2016's avatar
xgqdut2016 committed
178

xgqdut2016's avatar
xgqdut2016 committed
179
    for device in get_test_devices(args):
180
        test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES)
PanZezhongQY's avatar
PanZezhongQY committed
181
182

    print("\033[92mTest passed!\033[0m")