causal_softmax.py 4.64 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
3
from ctypes import c_uint64
xgqdut2016's avatar
xgqdut2016 committed
4
from libinfiniop import (
5
6
    LIBINFINIOP,
    TestTensor,
xgqdut2016's avatar
xgqdut2016 committed
7
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
8
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
9
10
11
12
13
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
14
15
16
17
18
    TestWorkspace,
    InfiniDtype,
    InfiniDtypeNames,
    InfiniDeviceNames,
    infiniopOperatorDescriptor_t,
PanZezhongQY's avatar
PanZezhongQY committed
19
)
20
from enum import Enum, auto
PanZezhongQY's avatar
PanZezhongQY committed
21

xgqdut2016's avatar
xgqdut2016 committed
22
23
24
25
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
26
27
28
29
30
31
32
_TEST_CASES_ = [
    # shape, x_stride, y_stride
    ((3, 3), None, None),
    ((32, 512), None, None),
    ((32, 512), (1024, 1), (1024, 1)),
    ((32, 5, 5), None, None),
    ((32, 20, 512), None, None),
33
    ((32, 20, 512), (20480, 512, 1), None),
34
    ((28, 15, 15), None, None),
xgqdut2016's avatar
xgqdut2016 committed
35
36
]

xgqdut2016's avatar
xgqdut2016 committed
37
# Data types used for testing
38
_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.BF16, InfiniDtype.F32]
xgqdut2016's avatar
xgqdut2016 committed
39
40
41

# Tolerance map for different data types
_TOLERANCE_MAP = {
42
43
    InfiniDtype.F16: {"atol": 1e-3, "rtol": 1e-2},
    InfiniDtype.BF16: {"atol": 5e-3, "rtol": 5e-2},
44
    InfiniDtype.F32: {"atol": 3e-5, "rtol": 1e-5},
xgqdut2016's avatar
xgqdut2016 committed
45
46
}

47
48
49
50
51
52
53
54

class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_X = auto()


_INPLACE = [
    Inplace.INPLACE_X,
55
    Inplace.OUT_OF_PLACE,
56
57
58
59
60
61
62
63
]

_TEST_CASES = [
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
]

xgqdut2016's avatar
xgqdut2016 committed
64
65
66
67
DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
68

xgqdut2016's avatar
xgqdut2016 committed
69

PanZezhongQY's avatar
PanZezhongQY committed
70
71
def causal_softmax(x):
    type = x.dtype
72
73
74
75
76
77
78
79
80
81
82

    # Issue: torch_musa's implementation of `torch.tril` has a known bug for certain shapes (e.g., (32, 5, 5)).
    # Workaround: Generate the lower triangular mask on the CPU and then transfer it to the MUSA device.
    if x.device.type == "musa":
        mask = (
            torch.tril(torch.ones_like(x).to("cpu"), diagonal=-1)
            .flip(dims=[-2, -1])
            .to("musa")
        )
    else:
        mask = torch.tril(torch.ones_like(x), diagonal=-1).flip(dims=[-2, -1])
83
84
    masked = torch.where(mask == 1, -torch.inf, x.to(torch.float32))
    return torch.nn.functional.softmax(masked, dim=-1, dtype=type)
PanZezhongQY's avatar
PanZezhongQY committed
85
86


87
88
def test(
    handle,
89
    device,
90
91
92
93
    shape,
    x_stride=None,
    y_stride=None,
    inplace=Inplace.OUT_OF_PLACE,
94
    dtype=InfiniDtype.F16,
95
    sync=None,
96
):
PanZezhongQY's avatar
PanZezhongQY committed
97
    print(
98
        f"Testing CausalSoftmax on {InfiniDeviceNames[device]} with shape:{shape} x_stride:{x_stride} y_stride:{y_stride} dtype:{InfiniDtypeNames[dtype]} inplace:{inplace}"
PanZezhongQY's avatar
PanZezhongQY committed
99
    )
xgqdut2016's avatar
xgqdut2016 committed
100

101
102
    x = TestTensor(shape, x_stride, dtype, device)
    ans = causal_softmax(x.torch_tensor())
xgqdut2016's avatar
xgqdut2016 committed
103

104
105
106
    if inplace == Inplace.INPLACE_X:
        y = x
    else:
107
        y = TestTensor(shape, x_stride, dtype, device)
108

109
110
    if sync is not None:
        sync()
111

112
    descriptor = infiniopOperatorDescriptor_t()
PanZezhongQY's avatar
PanZezhongQY committed
113
    check_error(
114
115
        LIBINFINIOP.infiniopCreateCausalSoftmaxDescriptor(
            handle, ctypes.byref(descriptor), y.descriptor, x.descriptor
PanZezhongQY's avatar
PanZezhongQY committed
116
117
        )
    )
xgqdut2016's avatar
xgqdut2016 committed
118
119

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
120
121
    x.destroy_desc()
    y.destroy_desc()
xgqdut2016's avatar
xgqdut2016 committed
122

PanZezhongQY's avatar
PanZezhongQY committed
123
124
    workspace_size = c_uint64(0)
    check_error(
125
        LIBINFINIOP.infiniopGetCausalSoftmaxWorkspaceSize(
PanZezhongQY's avatar
PanZezhongQY committed
126
127
128
            descriptor, ctypes.byref(workspace_size)
        )
    )
129
    workspace = TestWorkspace(workspace_size.value, x.device)
xgqdut2016's avatar
xgqdut2016 committed
130

xgqdut2016's avatar
xgqdut2016 committed
131
132
    def lib_causal_softmax():
        check_error(
133
            LIBINFINIOP.infiniopCausalSoftmax(
xgqdut2016's avatar
xgqdut2016 committed
134
                descriptor,
135
                workspace.data(),
xgqdut2016's avatar
xgqdut2016 committed
136
                workspace_size.value,
137
138
                y.data(),
                x.data(),
xgqdut2016's avatar
xgqdut2016 committed
139
140
                None,
            )
PanZezhongQY's avatar
PanZezhongQY committed
141
        )
xgqdut2016's avatar
xgqdut2016 committed
142

xgqdut2016's avatar
xgqdut2016 committed
143
    lib_causal_softmax()
144

145
    if sync is not None:
146
        sync()
xgqdut2016's avatar
xgqdut2016 committed
147

xgqdut2016's avatar
xgqdut2016 committed
148
149
    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
150
151
        debug(y.actual_tensor(), ans, atol=atol, rtol=rtol)
    assert torch.allclose(y.actual_tensor(), ans, atol=atol, rtol=rtol)
xgqdut2016's avatar
xgqdut2016 committed
152
153
154
155

    # Profiling workflow
    if PROFILE:
        # fmt: off
156
157
        profile_operation("PyTorch", lambda: causal_softmax(x.torch_tensor()), device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_causal_softmax(), device, NUM_PRERUN, NUM_ITERATIONS)
xgqdut2016's avatar
xgqdut2016 committed
158
        # fmt: on
PanZezhongQY's avatar
PanZezhongQY committed
159

160
    check_error(LIBINFINIOP.infiniopDestroyCausalSoftmaxDescriptor(descriptor))
161

PanZezhongQY's avatar
PanZezhongQY committed
162
163
164

if __name__ == "__main__":
    args = get_args()
xgqdut2016's avatar
xgqdut2016 committed
165

xgqdut2016's avatar
xgqdut2016 committed
166
167
168
169
170
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations
xgqdut2016's avatar
xgqdut2016 committed
171

xgqdut2016's avatar
xgqdut2016 committed
172
    for device in get_test_devices(args):
173
        test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES)
PanZezhongQY's avatar
PanZezhongQY committed
174
175

    print("\033[92mTest passed!\033[0m")