causal_softmax.py 4.75 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
3
from ctypes import c_uint64
xgqdut2016's avatar
xgqdut2016 committed
4
from libinfiniop import (
5
6
    LIBINFINIOP,
    TestTensor,
xgqdut2016's avatar
xgqdut2016 committed
7
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
8
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
9
10
11
12
13
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
14
15
16
17
18
    TestWorkspace,
    InfiniDtype,
    InfiniDtypeNames,
    InfiniDeviceNames,
    infiniopOperatorDescriptor_t,
PanZezhongQY's avatar
PanZezhongQY committed
19
)
20
from enum import Enum, auto
PanZezhongQY's avatar
PanZezhongQY committed
21

xgqdut2016's avatar
xgqdut2016 committed
22
23
24
25
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
26
27
28
29
30
31
32
_TEST_CASES_ = [
    # shape, x_stride, y_stride
    ((3, 3), None, None),
    ((32, 512), None, None),
    ((32, 512), (1024, 1), (1024, 1)),
    ((32, 5, 5), None, None),
    ((32, 20, 512), None, None),
33
    ((32, 20, 512), (20480, 512, 1), None),
34
    ((28, 15, 15), None, None),
35
36
37
    ((28, 1024, 1024), None, None),
    ((28, 1025, 1025), None, None),
    ((28, 1031, 1031), None, None),
xgqdut2016's avatar
xgqdut2016 committed
38
39
]

xgqdut2016's avatar
xgqdut2016 committed
40
# Data types used for testing
41
_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.BF16, InfiniDtype.F32]
xgqdut2016's avatar
xgqdut2016 committed
42
43
44

# Tolerance map for different data types
_TOLERANCE_MAP = {
45
46
    InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-2},
    InfiniDtype.BF16: {"atol": 5e-3, "rtol": 5e-2},
47
    InfiniDtype.F32: {"atol": 3e-5, "rtol": 1e-5},
xgqdut2016's avatar
xgqdut2016 committed
48
49
}

50
51
52
53
54
55
56
57

class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_X = auto()


_INPLACE = [
    Inplace.INPLACE_X,
58
    Inplace.OUT_OF_PLACE,
59
60
61
62
63
64
65
66
]

_TEST_CASES = [
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
]

xgqdut2016's avatar
xgqdut2016 committed
67
68
69
70
DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
71

xgqdut2016's avatar
xgqdut2016 committed
72

PanZezhongQY's avatar
PanZezhongQY committed
73
74
def causal_softmax(x):
    type = x.dtype
75
76
77
78
79
80
81
82
83
84
85

    # Issue: torch_musa's implementation of `torch.tril` has a known bug for certain shapes (e.g., (32, 5, 5)).
    # Workaround: Generate the lower triangular mask on the CPU and then transfer it to the MUSA device.
    if x.device.type == "musa":
        mask = (
            torch.tril(torch.ones_like(x).to("cpu"), diagonal=-1)
            .flip(dims=[-2, -1])
            .to("musa")
        )
    else:
        mask = torch.tril(torch.ones_like(x), diagonal=-1).flip(dims=[-2, -1])
86
87
    masked = torch.where(mask == 1, -torch.inf, x.to(torch.float32))
    return torch.nn.functional.softmax(masked, dim=-1, dtype=type)
PanZezhongQY's avatar
PanZezhongQY committed
88
89


90
91
def test(
    handle,
92
    device,
93
94
95
96
    shape,
    x_stride=None,
    y_stride=None,
    inplace=Inplace.OUT_OF_PLACE,
97
    dtype=InfiniDtype.F16,
98
    sync=None,
99
):
PanZezhongQY's avatar
PanZezhongQY committed
100
    print(
101
        f"Testing CausalSoftmax on {InfiniDeviceNames[device]} with shape:{shape} x_stride:{x_stride} y_stride:{y_stride} dtype:{InfiniDtypeNames[dtype]} inplace:{inplace}"
PanZezhongQY's avatar
PanZezhongQY committed
102
    )
xgqdut2016's avatar
xgqdut2016 committed
103

104
105
    x = TestTensor(shape, x_stride, dtype, device)
    ans = causal_softmax(x.torch_tensor())
xgqdut2016's avatar
xgqdut2016 committed
106

107
108
109
    if inplace == Inplace.INPLACE_X:
        y = x
    else:
110
        y = TestTensor(shape, x_stride, dtype, device)
111

112
113
    if sync is not None:
        sync()
114

115
    descriptor = infiniopOperatorDescriptor_t()
PanZezhongQY's avatar
PanZezhongQY committed
116
    check_error(
117
118
        LIBINFINIOP.infiniopCreateCausalSoftmaxDescriptor(
            handle, ctypes.byref(descriptor), y.descriptor, x.descriptor
PanZezhongQY's avatar
PanZezhongQY committed
119
120
        )
    )
xgqdut2016's avatar
xgqdut2016 committed
121
122

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
123
124
    x.destroy_desc()
    y.destroy_desc()
xgqdut2016's avatar
xgqdut2016 committed
125

PanZezhongQY's avatar
PanZezhongQY committed
126
127
    workspace_size = c_uint64(0)
    check_error(
128
        LIBINFINIOP.infiniopGetCausalSoftmaxWorkspaceSize(
PanZezhongQY's avatar
PanZezhongQY committed
129
130
131
            descriptor, ctypes.byref(workspace_size)
        )
    )
132
    workspace = TestWorkspace(workspace_size.value, x.device)
xgqdut2016's avatar
xgqdut2016 committed
133

xgqdut2016's avatar
xgqdut2016 committed
134
135
    def lib_causal_softmax():
        check_error(
136
            LIBINFINIOP.infiniopCausalSoftmax(
xgqdut2016's avatar
xgqdut2016 committed
137
                descriptor,
138
                workspace.data(),
xgqdut2016's avatar
xgqdut2016 committed
139
                workspace_size.value,
140
141
                y.data(),
                x.data(),
xgqdut2016's avatar
xgqdut2016 committed
142
143
                None,
            )
PanZezhongQY's avatar
PanZezhongQY committed
144
        )
xgqdut2016's avatar
xgqdut2016 committed
145

xgqdut2016's avatar
xgqdut2016 committed
146
    lib_causal_softmax()
147

148
    if sync is not None:
149
        sync()
xgqdut2016's avatar
xgqdut2016 committed
150

xgqdut2016's avatar
xgqdut2016 committed
151
152
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
153
154
        debug(y.actual_tensor(), ans, atol=atol, rtol=rtol)
    assert torch.allclose(y.actual_tensor(), ans, atol=atol, rtol=rtol)
xgqdut2016's avatar
xgqdut2016 committed
155
156
157
158

    # Profiling workflow
    if PROFILE:
        # fmt: off
159
160
        profile_operation("PyTorch", lambda: causal_softmax(x.torch_tensor()), device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_causal_softmax(), device, NUM_PRERUN, NUM_ITERATIONS)
xgqdut2016's avatar
xgqdut2016 committed
161
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
162

163
    check_error(LIBINFINIOP.infiniopDestroyCausalSoftmaxDescriptor(descriptor))
164

PanZezhongQY's avatar
PanZezhongQY committed
165
166
167

if __name__ == "__main__":
    args = get_args()
xgqdut2016's avatar
xgqdut2016 committed
168

xgqdut2016's avatar
xgqdut2016 committed
169
170
171
172
173
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations
xgqdut2016's avatar
xgqdut2016 committed
174

xgqdut2016's avatar
xgqdut2016 committed
175
    for device in get_test_devices(args):
176
        test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES)
PanZezhongQY's avatar
PanZezhongQY committed
177
178

    print("\033[92mTest passed!\033[0m")