causal_softmax.py 5.56 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
xgqdut2016's avatar
xgqdut2016 committed
3
4
from ctypes import POINTER, Structure, c_int32, c_size_t, c_uint64, c_void_p, c_float
from libinfiniop import (
PanZezhongQY's avatar
PanZezhongQY committed
5
6
    infiniopHandle_t,
    infiniopTensorDescriptor_t,
xgqdut2016's avatar
xgqdut2016 committed
7
8
9
    open_lib,
    to_tensor,
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
10
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
11
    rearrange_if_needed,
PanZezhongQY's avatar
PanZezhongQY committed
12
    create_workspace,
xgqdut2016's avatar
xgqdut2016 committed
13
14
15
16
17
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
PanZezhongQY's avatar
PanZezhongQY committed
18
)
19
from enum import Enum, auto
PanZezhongQY's avatar
PanZezhongQY committed
20

xgqdut2016's avatar
xgqdut2016 committed
21
22
23
24
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
25
26
27
28
29
30
31
_TEST_CASES_ = [
    # shape, x_stride, y_stride
    ((3, 3), None, None),
    ((32, 512), None, None),
    ((32, 512), (1024, 1), (1024, 1)),
    ((32, 5, 5), None, None),
    ((32, 20, 512), None, None),
32
    ((32, 20, 512), (20480, 512, 1), None),
33
    ((28, 15, 15), None, None),
xgqdut2016's avatar
xgqdut2016 committed
34
35
]

xgqdut2016's avatar
xgqdut2016 committed
36
# Data types used for testing
37
_TENSOR_DTYPES = [torch.float16, torch.bfloat16, torch.float32]
xgqdut2016's avatar
xgqdut2016 committed
38
39
40

# Tolerance map for different data types
_TOLERANCE_MAP = {
41
    torch.float16: {"atol": 1e-3, "rtol": 1e-2},
42
43
    torch.bfloat16: {"atol": 5e-3, "rtol": 5e-2},
    torch.float32: {"atol": 1e-5, "rtol": 1e-5},
xgqdut2016's avatar
xgqdut2016 committed
44
45
}

46
47
48
49
50
51
52
53

class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_X = auto()


_INPLACE = [
    Inplace.INPLACE_X,
54
    Inplace.OUT_OF_PLACE,
55
56
57
58
59
60
61
62
]

_TEST_CASES = [
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
]

xgqdut2016's avatar
xgqdut2016 committed
63
64
65
66
DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
67

xgqdut2016's avatar
xgqdut2016 committed
68

PanZezhongQY's avatar
PanZezhongQY committed
69
70
71
72
73
74
75
76
77
78
class CausalSoftmaxDescriptor(Structure):
    _fields_ = [("device", c_int32)]


infiniopCausalSoftmaxDescriptor_t = POINTER(CausalSoftmaxDescriptor)


def causal_softmax(x):
    type = x.dtype
    mask = torch.tril(torch.ones_like(x), diagonal=-1).flip(dims=[-2, -1])
79
80
    masked = torch.where(mask == 1, -torch.inf, x.to(torch.float32))
    return torch.nn.functional.softmax(masked, dim=-1, dtype=type)
PanZezhongQY's avatar
PanZezhongQY committed
81
82


83
84
85
86
87
88
89
90
91
def test(
    lib,
    handle,
    torch_device,
    shape,
    x_stride=None,
    y_stride=None,
    inplace=Inplace.OUT_OF_PLACE,
    dtype=torch.float16,
92
    sync=None,
93
):
PanZezhongQY's avatar
PanZezhongQY committed
94
    print(
95
        f"Testing CausalSoftmax on {torch_device} with shape:{shape} x_stride:{x_stride} y_stride:{y_stride} dtype:{dtype} inplace:{inplace}"
PanZezhongQY's avatar
PanZezhongQY committed
96
    )
xgqdut2016's avatar
xgqdut2016 committed
97

98
    x = torch.rand(shape, dtype=dtype).to(torch_device)
99
100
    mask = torch.tril(torch.ones_like(x), diagonal=-1).flip(dims=[-2, -1])
    x = torch.where(mask == 1, torch.full_like(x, torch.finfo(x.dtype).max), x)
PanZezhongQY's avatar
PanZezhongQY committed
101
    ans = causal_softmax(x)
xgqdut2016's avatar
xgqdut2016 committed
102
103

    x = rearrange_if_needed(x, x_stride)
xgqdut2016's avatar
xgqdut2016 committed
104

PanZezhongQY's avatar
PanZezhongQY committed
105
    x_tensor = to_tensor(x, lib)
xgqdut2016's avatar
xgqdut2016 committed
106

107
108
109
110
111
112
113
    if inplace == Inplace.INPLACE_X:
        y = x
        y_tensor = x_tensor
    else:
        y = torch.zeros(shape, dtype=dtype).to(torch_device)
        y = rearrange_if_needed(y, y_stride)
        y_tensor = to_tensor(y, lib)
114

115
116
    if sync is not None:
        sync()
117

PanZezhongQY's avatar
PanZezhongQY committed
118
119
120
    descriptor = infiniopCausalSoftmaxDescriptor_t()
    check_error(
        lib.infiniopCreateCausalSoftmaxDescriptor(
121
            handle, ctypes.byref(descriptor), y_tensor.descriptor, x_tensor.descriptor
PanZezhongQY's avatar
PanZezhongQY committed
122
123
        )
    )
xgqdut2016's avatar
xgqdut2016 committed
124
125

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
126
    x_tensor.destroyDesc(lib)
xgqdut2016's avatar
xgqdut2016 committed
127

PanZezhongQY's avatar
PanZezhongQY committed
128
129
130
131
132
133
134
    workspace_size = c_uint64(0)
    check_error(
        lib.infiniopGetCausalSoftmaxWorkspaceSize(
            descriptor, ctypes.byref(workspace_size)
        )
    )
    workspace = create_workspace(workspace_size.value, x.device)
xgqdut2016's avatar
xgqdut2016 committed
135

xgqdut2016's avatar
xgqdut2016 committed
136
137
138
139
140
141
    def lib_causal_softmax():
        check_error(
            lib.infiniopCausalSoftmax(
                descriptor,
                workspace.data_ptr() if workspace is not None else None,
                workspace_size.value,
142
                y_tensor.data,
xgqdut2016's avatar
xgqdut2016 committed
143
144
145
                x_tensor.data,
                None,
            )
PanZezhongQY's avatar
PanZezhongQY committed
146
        )
xgqdut2016's avatar
xgqdut2016 committed
147

xgqdut2016's avatar
xgqdut2016 committed
148
    lib_causal_softmax()
149

150
    if sync is not None:
151
        sync()
xgqdut2016's avatar
xgqdut2016 committed
152

xgqdut2016's avatar
xgqdut2016 committed
153
154
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
155
156
        debug(y, ans, atol=atol, rtol=rtol)
    assert torch.allclose(y, ans, atol=atol, rtol=rtol)
xgqdut2016's avatar
xgqdut2016 committed
157
158
159
160
161
162
163

    # Profiling workflow
    if PROFILE:
        # fmt: off
        profile_operation("PyTorch", lambda: causal_softmax(x), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_causal_softmax(), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
164

xgqdut2016's avatar
xgqdut2016 committed
165
    check_error(lib.infiniopDestroyCausalSoftmaxDescriptor(descriptor))
166

PanZezhongQY's avatar
PanZezhongQY committed
167
168
169
170

if __name__ == "__main__":
    args = get_args()
    lib = open_lib()
xgqdut2016's avatar
xgqdut2016 committed
171

PanZezhongQY's avatar
PanZezhongQY committed
172
173
174
175
176
177
    lib.infiniopCreateCausalSoftmaxDescriptor.restype = c_int32
    lib.infiniopCreateCausalSoftmaxDescriptor.argtypes = [
        infiniopHandle_t,
        POINTER(infiniopCausalSoftmaxDescriptor_t),
        infiniopTensorDescriptor_t,
    ]
xgqdut2016's avatar
xgqdut2016 committed
178

PanZezhongQY's avatar
PanZezhongQY committed
179
180
181
182
183
    lib.infiniopGetCausalSoftmaxWorkspaceSize.restype = c_int32
    lib.infiniopGetCausalSoftmaxWorkspaceSize.argtypes = [
        infiniopCausalSoftmaxDescriptor_t,
        POINTER(c_uint64),
    ]
xgqdut2016's avatar
xgqdut2016 committed
184

PanZezhongQY's avatar
PanZezhongQY committed
185
186
187
188
189
190
191
192
    lib.infiniopCausalSoftmax.restype = c_int32
    lib.infiniopCausalSoftmax.argtypes = [
        infiniopCausalSoftmaxDescriptor_t,
        c_void_p,
        c_uint64,
        c_void_p,
        c_void_p,
    ]
xgqdut2016's avatar
xgqdut2016 committed
193

PanZezhongQY's avatar
PanZezhongQY committed
194
195
196
197
    lib.infiniopDestroyCausalSoftmaxDescriptor.restype = c_int32
    lib.infiniopDestroyCausalSoftmaxDescriptor.argtypes = [
        infiniopCausalSoftmaxDescriptor_t,
    ]
xgqdut2016's avatar
xgqdut2016 committed
198

xgqdut2016's avatar
xgqdut2016 committed
199
200
201
202
203
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations
xgqdut2016's avatar
xgqdut2016 committed
204

xgqdut2016's avatar
xgqdut2016 committed
205
206
    for device in get_test_devices(args):
        test_operator(lib, device, test, _TEST_CASES, _TENSOR_DTYPES)
PanZezhongQY's avatar
PanZezhongQY committed
207
208

    print("\033[92mTest passed!\033[0m")