swiglu.py 8.42 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
xgqdut2016's avatar
xgqdut2016 committed
3
4
from ctypes import POINTER, Structure, c_int32, c_size_t, c_uint64, c_void_p, c_float
from libinfiniop import (
PanZezhongQY's avatar
PanZezhongQY committed
5
6
    infiniopHandle_t,
    infiniopTensorDescriptor_t,
xgqdut2016's avatar
xgqdut2016 committed
7
8
9
    open_lib,
    to_tensor,
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
10
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
11
12
13
14
15
16
17
    rearrange_if_needed,
    create_workspace,
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
PanZezhongQY's avatar
PanZezhongQY committed
18
19
)

xgqdut2016's avatar
xgqdut2016 committed
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
_TEST_CASES = [
    # shape, a_stride, b_stride, c_stride
    ((13, 4), None, None, None),
    ((13, 4), (10, 1), (10, 1), (10, 1)),
    ((13, 4, 4), None, None, None),
    ((13, 4, 4), (20, 4, 1), (20, 4, 1), (20, 4, 1)),
    ((16, 5632), None, None, None),
    ((16, 5632), (13312, 1), (13312, 1), (13312, 1)),
    ((4, 4, 5632), None, None, None),
    ((4, 4, 5632), (45056, 5632, 1), (45056, 5632, 1), (45056, 5632, 1)),
]
# Data types used for testing
_TENSOR_DTYPES = [torch.float16, torch.float32]

# Tolerance map for different data types
_TOLERANCE_MAP = {
    torch.float16: {'atol': 0, 'rtol': 1e-2},
    torch.float32: {'atol': 0, 'rtol': 1e-3},
}

DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
48
49
50
51
52
53
54
55
56

class SwiGLUDescriptor(Structure):
    _fields_ = [("device", c_int32)]


infiniopSwiGLUDescriptor_t = POINTER(SwiGLUDescriptor)


def swiglu(a, b):
57

PanZezhongQY's avatar
PanZezhongQY committed
58
59
    return a * b / (1 + torch.exp(-b.float()).to(b.dtype))

60

PanZezhongQY's avatar
PanZezhongQY committed
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def test_out_of_place(
    lib,
    handle,
    torch_device,
    shape,
    a_stride=None,
    b_stride=None,
    c_stride=None,
    dtype=torch.float16,
    sync=None,
):
    print(
        f"Testing SwiGLU on {torch_device} with shape:{shape} a_stride:{a_stride} b_stride:{b_stride} c_stride:{c_stride} dtype:{dtype}"
    )
    a = torch.rand(shape, dtype=dtype).to(torch_device)
    b = torch.rand(shape, dtype=dtype).to(torch_device)
    c = torch.rand(shape, dtype=dtype).to(torch_device)

    ans = swiglu(a, b)

xgqdut2016's avatar
xgqdut2016 committed
81
82
83
84
85
86
87
    a, b, c = [
        rearrange_if_needed(tensor, stride)
        for tensor, stride in zip([a, b, c], [a_stride, b_stride, c_stride])
    ]
    a_tensor, b_tensor, c_tensor = [to_tensor(tensor, lib) for tensor in [a, b, c]]


PanZezhongQY's avatar
PanZezhongQY committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
    if sync is not None:
        sync()

    descriptor = infiniopSwiGLUDescriptor_t()
    check_error(
        lib.infiniopCreateSwiGLUDescriptor(
            handle,
            ctypes.byref(descriptor),
            c_tensor.descriptor,
            a_tensor.descriptor,
            b_tensor.descriptor,
        )
    )

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
xgqdut2016's avatar
xgqdut2016 committed
103
104
105
106
107
108
109
110
111
112
113
114
    for tensor in [a_tensor, b_tensor, c_tensor]:
        tensor.descriptor.contents.invalidate()

    def lib_swiglu():
        check_error(
            lib.infiniopSwiGLU(
                descriptor, 
                c_tensor.data, 
                a_tensor.data, 
                b_tensor.data, 
                None
            )
PanZezhongQY's avatar
PanZezhongQY committed
115
        )
xgqdut2016's avatar
xgqdut2016 committed
116
    lib_swiglu()
PanZezhongQY's avatar
PanZezhongQY committed
117

xgqdut2016's avatar
xgqdut2016 committed
118
119
120
121
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
        debug(c, ans, atol=atol, rtol=rtol)
    assert torch.allclose(c, ans, atol=atol, rtol=rtol)
PanZezhongQY's avatar
PanZezhongQY committed
122
123
    print("out-of-place Test passed!")

xgqdut2016's avatar
xgqdut2016 committed
124
125
126
127
128
129
    # Profiling workflow
    if PROFILE:
        # fmt: off
        profile_operation("PyTorch", lambda: swiglu(a, b), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_swiglu(), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    check_error(lib.infiniopDestroySwiGLUDescriptor(descriptor))


def test_in_place1(
    lib,
    handle,
    torch_device,
    shape,
    a_stride=None,
    b_stride=None,
    dtype=torch.float16,
    sync=None,
):
    a = torch.rand(shape, dtype=dtype).to(torch_device)
    b = torch.rand(shape, dtype=dtype).to(torch_device)

    ans = swiglu(a, b)

    if sync is not None:
        sync()

xgqdut2016's avatar
xgqdut2016 committed
151
152
153
154
155
156
    a, b = [
        rearrange_if_needed(tensor, stride)
        for tensor, stride in zip([a, b], [a_stride, b_stride])
    ]
    a_tensor, b_tensor = [to_tensor(tensor, lib) for tensor in [a, b]]

PanZezhongQY's avatar
PanZezhongQY committed
157
    descriptor = infiniopSwiGLUDescriptor_t()
xgqdut2016's avatar
xgqdut2016 committed
158
    
PanZezhongQY's avatar
PanZezhongQY committed
159
160
161
162
163
164
165
166
167
168
169
    check_error(
        lib.infiniopCreateSwiGLUDescriptor(
            handle,
            ctypes.byref(descriptor),
            a_tensor.descriptor,
            a_tensor.descriptor,
            b_tensor.descriptor,
        )
    )

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
xgqdut2016's avatar
xgqdut2016 committed
170
171
172
173
174
175
176
    for tensor in [a_tensor, b_tensor]:
        tensor.descriptor.contents.invalidate()
    def lib_swiglu():
        check_error(
            lib.infiniopSwiGLU(
                descriptor, a_tensor.data, a_tensor.data, b_tensor.data, None
            )
PanZezhongQY's avatar
PanZezhongQY committed
177
        )
xgqdut2016's avatar
xgqdut2016 committed
178
    lib_swiglu()
PanZezhongQY's avatar
PanZezhongQY committed
179

xgqdut2016's avatar
xgqdut2016 committed
180
181
182
183
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
        debug(a, ans, atol=atol, rtol=rtol)
    assert torch.allclose(a, ans, atol=atol, rtol=rtol)
PanZezhongQY's avatar
PanZezhongQY committed
184
    print("in-place1 Test passed!")
xgqdut2016's avatar
xgqdut2016 committed
185
186
187
188
189
190
    # Profiling workflow
    if PROFILE:
        # fmt: off
        profile_operation("PyTorch", lambda: swiglu(a, b), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_swiglu(), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    check_error(lib.infiniopDestroySwiGLUDescriptor(descriptor))


def test_in_place2(
    lib,
    handle,
    torch_device,
    shape,
    a_stride=None,
    b_stride=None,
    dtype=torch.float16,
    sync=None,
):
    a = torch.rand(shape, dtype=dtype).to(torch_device)
    b = torch.rand(shape, dtype=dtype).to(torch_device)

    ans = swiglu(a, b)

    if sync is not None:
        sync()

xgqdut2016's avatar
xgqdut2016 committed
212
213
214
215
216
217
    a, b = [
        rearrange_if_needed(tensor, stride)
        for tensor, stride in zip([a, b], [a_stride, b_stride])
    ]
    a_tensor, b_tensor = [to_tensor(tensor, lib) for tensor in [a, b]]

PanZezhongQY's avatar
PanZezhongQY committed
218
219
220
221
222
223
224
225
226
227
228
229
    descriptor = infiniopSwiGLUDescriptor_t()
    check_error(
        lib.infiniopCreateSwiGLUDescriptor(
            handle,
            ctypes.byref(descriptor),
            b_tensor.descriptor,
            a_tensor.descriptor,
            b_tensor.descriptor,
        )
    )

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
xgqdut2016's avatar
xgqdut2016 committed
230
231
232
233
234
235
236
237
    for tensor in [a_tensor, b_tensor]:
        tensor.descriptor.contents.invalidate()

    def lib_swiglu():
        check_error(
            lib.infiniopSwiGLU(
                descriptor, b_tensor.data, a_tensor.data, b_tensor.data, None
            )
PanZezhongQY's avatar
PanZezhongQY committed
238
        )
xgqdut2016's avatar
xgqdut2016 committed
239
240
241
242
243
244
245
246
247
248
249
250
251
    lib_swiglu()

    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
        debug(b, ans, atol=atol, rtol=rtol)
    assert torch.allclose(b, ans, atol=atol, rtol=rtol)
    print("in-place2 Test passed!")
    # Profiling workflow
    if PROFILE:
        # fmt: off
        profile_operation("PyTorch", lambda: swiglu(a, b), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_swiglu(), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
252
253
254
    check_error(lib.infiniopDestroySwiGLUDescriptor(descriptor))


xgqdut2016's avatar
xgqdut2016 committed
255
256
257
258
259
260
def test(lib, handle, torch_device, shape, a_stride, b_stride, c_stride, dtype, sync = None):
    test_out_of_place(
        lib, handle, torch_device, shape, a_stride, b_stride, c_stride, dtype, sync
    )
    test_in_place1(lib, handle, torch_device, shape, a_stride, b_stride, dtype, sync)
    test_in_place2(lib, handle, torch_device, shape, a_stride, b_stride, dtype, sync)
PanZezhongQY's avatar
PanZezhongQY committed
261
262
263
264



if __name__ == "__main__":
xgqdut2016's avatar
xgqdut2016 committed
265
    
PanZezhongQY's avatar
PanZezhongQY committed
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
    args = get_args()
    lib = open_lib()

    lib.infiniopCreateSwiGLUDescriptor.restype = c_int32
    lib.infiniopCreateSwiGLUDescriptor.argtypes = [
        infiniopHandle_t,
        POINTER(infiniopSwiGLUDescriptor_t),
        infiniopTensorDescriptor_t,
        infiniopTensorDescriptor_t,
        infiniopTensorDescriptor_t,
    ]

    lib.infiniopSwiGLU.restype = c_int32
    lib.infiniopSwiGLU.argtypes = [
        infiniopSwiGLUDescriptor_t,
        c_void_p,
        c_void_p,
        c_void_p,
        c_void_p,
    ]

    lib.infiniopDestroySwiGLUDescriptor.restype = c_int32
    lib.infiniopDestroySwiGLUDescriptor.argtypes = [
        infiniopSwiGLUDescriptor_t,
    ]
xgqdut2016's avatar
xgqdut2016 committed
291
292
293
294
295
296
297
298
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations
    
    for device in get_test_devices(args):
        test_operator(lib, device, test, _TEST_CASES, _TENSOR_DTYPES)
PanZezhongQY's avatar
PanZezhongQY committed
299
300

    print("\033[92mTest passed!\033[0m")