causal_softmax.py 5.46 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
xgqdut2016's avatar
xgqdut2016 committed
3
4
from ctypes import POINTER, Structure, c_int32, c_size_t, c_uint64, c_void_p, c_float
from libinfiniop import (
PanZezhongQY's avatar
PanZezhongQY committed
5
6
    infiniopHandle_t,
    infiniopTensorDescriptor_t,
xgqdut2016's avatar
xgqdut2016 committed
7
8
9
    open_lib,
    to_tensor,
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
10
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
11
    rearrange_if_needed,
PanZezhongQY's avatar
PanZezhongQY committed
12
    create_workspace,
xgqdut2016's avatar
xgqdut2016 committed
13
14
15
16
17
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
PanZezhongQY's avatar
PanZezhongQY committed
18
)
19
from enum import Enum, auto
PanZezhongQY's avatar
PanZezhongQY committed
20

xgqdut2016's avatar
xgqdut2016 committed
21
22
23
24
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
25
26
27
28
29
30
31
_TEST_CASES_ = [
    # shape, x_stride, y_stride
    ((3, 3), None, None),
    ((32, 512), None, None),
    ((32, 512), (1024, 1), (1024, 1)),
    ((32, 5, 5), None, None),
    ((32, 20, 512), None, None),
32
    ((32, 20, 512), (20480, 512, 1), None),
33
    ((28, 15, 15), None, None),
xgqdut2016's avatar
xgqdut2016 committed
34
35
]

xgqdut2016's avatar
xgqdut2016 committed
36
# Data types used for testing
37
_TENSOR_DTYPES = [torch.float16, torch.float32]
xgqdut2016's avatar
xgqdut2016 committed
38
39
40

# Tolerance map for different data types
_TOLERANCE_MAP = {
41
    torch.float16: {"atol": 1e-3, "rtol": 1e-2},
xgqdut2016's avatar
xgqdut2016 committed
42
43
}

44
45
46
47
48
49
50
51

class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_X = auto()


_INPLACE = [
    Inplace.INPLACE_X,
52
    Inplace.OUT_OF_PLACE,
53
54
55
56
57
58
59
60
]

_TEST_CASES = [
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
]

xgqdut2016's avatar
xgqdut2016 committed
61
62
63
64
DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
65

xgqdut2016's avatar
xgqdut2016 committed
66

PanZezhongQY's avatar
PanZezhongQY committed
67
68
69
70
71
72
73
74
75
76
class CausalSoftmaxDescriptor(Structure):
    _fields_ = [("device", c_int32)]


infiniopCausalSoftmaxDescriptor_t = POINTER(CausalSoftmaxDescriptor)


def causal_softmax(x):
    type = x.dtype
    mask = torch.tril(torch.ones_like(x), diagonal=-1).flip(dims=[-2, -1])
77
78
    masked = torch.where(mask == 1, -torch.inf, x.to(torch.float32))
    return torch.nn.functional.softmax(masked, dim=-1, dtype=type)
PanZezhongQY's avatar
PanZezhongQY committed
79
80


81
82
83
84
85
86
87
88
89
def test(
    lib,
    handle,
    torch_device,
    shape,
    x_stride=None,
    y_stride=None,
    inplace=Inplace.OUT_OF_PLACE,
    dtype=torch.float16,
90
    sync=None
91
):
PanZezhongQY's avatar
PanZezhongQY committed
92
    print(
93
        f"Testing CausalSoftmax on {torch_device} with shape:{shape} x_stride:{x_stride} y_stride:{y_stride} dtype:{dtype} inplace:{inplace}"
PanZezhongQY's avatar
PanZezhongQY committed
94
    )
xgqdut2016's avatar
xgqdut2016 committed
95

96
    x = torch.rand(shape, dtype=dtype).to(torch_device)
97
98
    mask = torch.tril(torch.ones_like(x), diagonal=-1).flip(dims=[-2, -1])
    x = torch.where(mask == 1, torch.full_like(x, torch.finfo(x.dtype).max), x)
PanZezhongQY's avatar
PanZezhongQY committed
99
    ans = causal_softmax(x)
xgqdut2016's avatar
xgqdut2016 committed
100
101

    x = rearrange_if_needed(x, x_stride)
xgqdut2016's avatar
xgqdut2016 committed
102

PanZezhongQY's avatar
PanZezhongQY committed
103
    x_tensor = to_tensor(x, lib)
xgqdut2016's avatar
xgqdut2016 committed
104

105
106
107
108
109
110
111
    if inplace == Inplace.INPLACE_X:
        y = x
        y_tensor = x_tensor
    else:
        y = torch.zeros(shape, dtype=dtype).to(torch_device)
        y = rearrange_if_needed(y, y_stride)
        y_tensor = to_tensor(y, lib)
112
113
114
        
    if sync is not None:
        sync()
115

PanZezhongQY's avatar
PanZezhongQY committed
116
117
118
    descriptor = infiniopCausalSoftmaxDescriptor_t()
    check_error(
        lib.infiniopCreateCausalSoftmaxDescriptor(
119
            handle, ctypes.byref(descriptor), y_tensor.descriptor, x_tensor.descriptor
PanZezhongQY's avatar
PanZezhongQY committed
120
121
        )
    )
xgqdut2016's avatar
xgqdut2016 committed
122
123

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
124
    x_tensor.destroyDesc(lib)
xgqdut2016's avatar
xgqdut2016 committed
125

PanZezhongQY's avatar
PanZezhongQY committed
126
127
128
129
130
131
132
    workspace_size = c_uint64(0)
    check_error(
        lib.infiniopGetCausalSoftmaxWorkspaceSize(
            descriptor, ctypes.byref(workspace_size)
        )
    )
    workspace = create_workspace(workspace_size.value, x.device)
xgqdut2016's avatar
xgqdut2016 committed
133

xgqdut2016's avatar
xgqdut2016 committed
134
135
136
137
138
139
    def lib_causal_softmax():
        check_error(
            lib.infiniopCausalSoftmax(
                descriptor,
                workspace.data_ptr() if workspace is not None else None,
                workspace_size.value,
140
                y_tensor.data,
xgqdut2016's avatar
xgqdut2016 committed
141
142
143
                x_tensor.data,
                None,
            )
PanZezhongQY's avatar
PanZezhongQY committed
144
        )
xgqdut2016's avatar
xgqdut2016 committed
145

xgqdut2016's avatar
xgqdut2016 committed
146
    lib_causal_softmax()
147
148
149
    
    if sync is not None:
        sync() 
xgqdut2016's avatar
xgqdut2016 committed
150

xgqdut2016's avatar
xgqdut2016 committed
151
152
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
153
154
        debug(y, ans, atol=atol, rtol=rtol)
    assert torch.allclose(y, ans, atol=atol, rtol=rtol)
xgqdut2016's avatar
xgqdut2016 committed
155
156
157
158
159
160
161

    # Profiling workflow
    if PROFILE:
        # fmt: off
        profile_operation("PyTorch", lambda: causal_softmax(x), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_causal_softmax(), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
162

xgqdut2016's avatar
xgqdut2016 committed
163
    check_error(lib.infiniopDestroyCausalSoftmaxDescriptor(descriptor))
164

PanZezhongQY's avatar
PanZezhongQY committed
165
166
167
168

if __name__ == "__main__":
    args = get_args()
    lib = open_lib()
xgqdut2016's avatar
xgqdut2016 committed
169

PanZezhongQY's avatar
PanZezhongQY committed
170
171
172
173
174
175
    lib.infiniopCreateCausalSoftmaxDescriptor.restype = c_int32
    lib.infiniopCreateCausalSoftmaxDescriptor.argtypes = [
        infiniopHandle_t,
        POINTER(infiniopCausalSoftmaxDescriptor_t),
        infiniopTensorDescriptor_t,
    ]
xgqdut2016's avatar
xgqdut2016 committed
176

PanZezhongQY's avatar
PanZezhongQY committed
177
178
179
180
181
    lib.infiniopGetCausalSoftmaxWorkspaceSize.restype = c_int32
    lib.infiniopGetCausalSoftmaxWorkspaceSize.argtypes = [
        infiniopCausalSoftmaxDescriptor_t,
        POINTER(c_uint64),
    ]
xgqdut2016's avatar
xgqdut2016 committed
182

PanZezhongQY's avatar
PanZezhongQY committed
183
184
185
186
187
188
189
190
    lib.infiniopCausalSoftmax.restype = c_int32
    lib.infiniopCausalSoftmax.argtypes = [
        infiniopCausalSoftmaxDescriptor_t,
        c_void_p,
        c_uint64,
        c_void_p,
        c_void_p,
    ]
xgqdut2016's avatar
xgqdut2016 committed
191

PanZezhongQY's avatar
PanZezhongQY committed
192
193
194
195
    lib.infiniopDestroyCausalSoftmaxDescriptor.restype = c_int32
    lib.infiniopDestroyCausalSoftmaxDescriptor.argtypes = [
        infiniopCausalSoftmaxDescriptor_t,
    ]
xgqdut2016's avatar
xgqdut2016 committed
196

xgqdut2016's avatar
xgqdut2016 committed
197
198
199
200
201
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations
xgqdut2016's avatar
xgqdut2016 committed
202

xgqdut2016's avatar
xgqdut2016 committed
203
204
    for device in get_test_devices(args):
        test_operator(lib, device, test, _TEST_CASES, _TENSOR_DTYPES)
PanZezhongQY's avatar
PanZezhongQY committed
205
206

    print("\033[92mTest passed!\033[0m")