causal_softmax.py 4.24 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
3
from ctypes import c_uint64
xgqdut2016's avatar
xgqdut2016 committed
4
from libinfiniop import (
5
6
    LIBINFINIOP,
    TestTensor,
xgqdut2016's avatar
xgqdut2016 committed
7
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
8
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
9
10
11
12
13
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
14
15
16
17
18
    TestWorkspace,
    InfiniDtype,
    InfiniDtypeNames,
    InfiniDeviceNames,
    infiniopOperatorDescriptor_t,
PanZezhongQY's avatar
PanZezhongQY committed
19
)
20
from enum import Enum, auto
PanZezhongQY's avatar
PanZezhongQY committed
21

xgqdut2016's avatar
xgqdut2016 committed
22
23
24
25
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
26
27
28
29
30
31
32
_TEST_CASES_ = [
    # shape, x_stride, y_stride
    ((3, 3), None, None),
    ((32, 512), None, None),
    ((32, 512), (1024, 1), (1024, 1)),
    ((32, 5, 5), None, None),
    ((32, 20, 512), None, None),
33
    ((32, 20, 512), (20480, 512, 1), None),
34
    ((28, 15, 15), None, None),
xgqdut2016's avatar
xgqdut2016 committed
35
36
]

xgqdut2016's avatar
xgqdut2016 committed
37
# Data types used for testing
38
_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.BF16, InfiniDtype.F32]
xgqdut2016's avatar
xgqdut2016 committed
39
40
41

# Tolerance map for different data types
_TOLERANCE_MAP = {
42
43
    InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-2},
    InfiniDtype.BF16: {"atol": 5e-3, "rtol": 5e-2},
44
    InfiniDtype.F32: {"atol": 3e-5, "rtol": 1e-5},
xgqdut2016's avatar
xgqdut2016 committed
45
46
}

47
48
49
50
51
52
53
54

class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_X = auto()


_INPLACE = [
    Inplace.INPLACE_X,
55
    Inplace.OUT_OF_PLACE,
56
57
58
59
60
61
62
63
]

_TEST_CASES = [
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
]

xgqdut2016's avatar
xgqdut2016 committed
64
65
66
67
DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
68

xgqdut2016's avatar
xgqdut2016 committed
69

PanZezhongQY's avatar
PanZezhongQY committed
70
71
72
def causal_softmax(x):
    type = x.dtype
    mask = torch.tril(torch.ones_like(x), diagonal=-1).flip(dims=[-2, -1])
73
74
    masked = torch.where(mask == 1, -torch.inf, x.to(torch.float32))
    return torch.nn.functional.softmax(masked, dim=-1, dtype=type)
PanZezhongQY's avatar
PanZezhongQY committed
75
76


77
78
def test(
    handle,
79
    device,
80
81
82
83
    shape,
    x_stride=None,
    y_stride=None,
    inplace=Inplace.OUT_OF_PLACE,
84
    dtype=InfiniDtype.F16,
85
    sync=None,
86
):
PanZezhongQY's avatar
PanZezhongQY committed
87
    print(
88
        f"Testing CausalSoftmax on {InfiniDeviceNames[device]} with shape:{shape} x_stride:{x_stride} y_stride:{y_stride} dtype:{InfiniDtypeNames[dtype]} inplace:{inplace}"
PanZezhongQY's avatar
PanZezhongQY committed
89
    )
xgqdut2016's avatar
xgqdut2016 committed
90

91
92
    x = TestTensor(shape, x_stride, dtype, device)
    ans = causal_softmax(x.torch_tensor())
xgqdut2016's avatar
xgqdut2016 committed
93

94
95
96
    if inplace == Inplace.INPLACE_X:
        y = x
    else:
97
        y = TestTensor(shape, x_stride, dtype, device)
98

99
100
    if sync is not None:
        sync()
101

102
    descriptor = infiniopOperatorDescriptor_t()
PanZezhongQY's avatar
PanZezhongQY committed
103
    check_error(
104
105
        LIBINFINIOP.infiniopCreateCausalSoftmaxDescriptor(
            handle, ctypes.byref(descriptor), y.descriptor, x.descriptor
PanZezhongQY's avatar
PanZezhongQY committed
106
107
        )
    )
xgqdut2016's avatar
xgqdut2016 committed
108
109

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
110
111
    x.destroy_desc()
    y.destroy_desc()
xgqdut2016's avatar
xgqdut2016 committed
112

PanZezhongQY's avatar
PanZezhongQY committed
113
114
    workspace_size = c_uint64(0)
    check_error(
115
        LIBINFINIOP.infiniopGetCausalSoftmaxWorkspaceSize(
PanZezhongQY's avatar
PanZezhongQY committed
116
117
118
            descriptor, ctypes.byref(workspace_size)
        )
    )
119
    workspace = TestWorkspace(workspace_size.value, x.device)
xgqdut2016's avatar
xgqdut2016 committed
120

xgqdut2016's avatar
xgqdut2016 committed
121
122
    def lib_causal_softmax():
        check_error(
123
            LIBINFINIOP.infiniopCausalSoftmax(
xgqdut2016's avatar
xgqdut2016 committed
124
                descriptor,
125
                workspace.data(),
xgqdut2016's avatar
xgqdut2016 committed
126
                workspace_size.value,
127
128
                y.data(),
                x.data(),
xgqdut2016's avatar
xgqdut2016 committed
129
130
                None,
            )
PanZezhongQY's avatar
PanZezhongQY committed
131
        )
xgqdut2016's avatar
xgqdut2016 committed
132

xgqdut2016's avatar
xgqdut2016 committed
133
    lib_causal_softmax()
134

135
    if sync is not None:
136
        sync()
xgqdut2016's avatar
xgqdut2016 committed
137

xgqdut2016's avatar
xgqdut2016 committed
138
139
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
140
141
        debug(y.actual_tensor(), ans, atol=atol, rtol=rtol)
    assert torch.allclose(y.actual_tensor(), ans, atol=atol, rtol=rtol)
xgqdut2016's avatar
xgqdut2016 committed
142
143
144
145

    # Profiling workflow
    if PROFILE:
        # fmt: off
146
147
        profile_operation("PyTorch", lambda: causal_softmax(x.torch_tensor()), device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_causal_softmax(), device, NUM_PRERUN, NUM_ITERATIONS)
xgqdut2016's avatar
xgqdut2016 committed
148
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
149

150
    check_error(LIBINFINIOP.infiniopDestroyCausalSoftmaxDescriptor(descriptor))
151

PanZezhongQY's avatar
PanZezhongQY committed
152
153
154

if __name__ == "__main__":
    args = get_args()
xgqdut2016's avatar
xgqdut2016 committed
155

xgqdut2016's avatar
xgqdut2016 committed
156
157
158
159
160
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations
xgqdut2016's avatar
xgqdut2016 committed
161

xgqdut2016's avatar
xgqdut2016 committed
162
    for device in get_test_devices(args):
163
        test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES)
PanZezhongQY's avatar
PanZezhongQY committed
164
165

    print("\033[92mTest passed!\033[0m")