clip.py 6.59 KB
Newer Older
goldenfox2025's avatar
goldenfox2025 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#!/usr/bin/env python3

import torch
import ctypes
from ctypes import POINTER, Structure, c_int32, c_size_t, c_uint64, c_void_p, c_float
from libinfiniop import (
    infiniopHandle_t,
    infiniopTensorDescriptor_t,
    open_lib,
    to_tensor,
    get_test_devices,
    check_error,
    rearrange_if_needed,
    create_workspace,
    test_operator,
    get_args,
    debug,
    get_tolerance,
    profile_operation,
)
from enum import Enum, auto

# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
_TEST_CASES_ = [
    # shape, x_stride, y_stride, min_val, max_val
    # 基本形状测试
    ((10,), None, None, -1.0, 1.0),
    ((5, 10), None, None, -1.0, 1.0),
    ((2, 3, 4), None, None, -1.0, 1.0),
    # 不同的min_val和max_val
    ((10,), None, None, 0.0, 2.0),
    ((5, 10), None, None, 0.0, 2.0),
    ((2, 3, 4), None, None, 0.0, 2.0),
    ((10,), None, None, -2.0, 0.0),
    ((5, 10), None, None, -2.0, 0.0),
    ((2, 3, 4), None, None, -2.0, 0.0),
    # 奇怪形状测试
PanZezhong's avatar
PanZezhong committed
41
42
    ((7, 13), None, None, -1.0, 1.0),  # 质数维度
    ((3, 5, 7), None, None, -1.0, 1.0),  # 三维质数
goldenfox2025's avatar
goldenfox2025 committed
43
    # 非标准形状测试
PanZezhong's avatar
PanZezhong committed
44
45
46
    ((1, 1), None, None, -1.0, 1.0),  # 最小形状
    ((100, 100), None, None, -1.0, 1.0),  # 大形状
    ((16, 16, 16), None, None, -1.0, 1.0),  # 大三维
goldenfox2025's avatar
goldenfox2025 committed
47
48
    # 极端值测试
    ((10,), None, None, -1000.0, 1000.0),  # 大范围
PanZezhong's avatar
PanZezhong committed
49
50
    ((10,), None, None, -0.001, 0.001),  # 小范围
    ((10,), None, None, 0.0, 0.0),  # min=max
goldenfox2025's avatar
goldenfox2025 committed
51
52
]

goldenfox2025's avatar
goldenfox2025 committed
53
54

_TENSOR_DTYPES = [torch.float16, torch.float32]
goldenfox2025's avatar
goldenfox2025 committed
55
56
57


_TOLERANCE_MAP = {
goldenfox2025's avatar
goldenfox2025 committed
58
    torch.float16: {"atol": 1e-3, "rtol": 1e-3},
goldenfox2025's avatar
goldenfox2025 committed
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
    torch.float32: {"atol": 1e-7, "rtol": 1e-6},
}


class Inplace(Enum):
    OUT_OF_PLACE = auto()
    INPLACE_X = auto()


_INPLACE = [
    Inplace.INPLACE_X,
    Inplace.OUT_OF_PLACE,
]

_TEST_CASES = [
    test_case + (inplace_item,)
    for test_case in _TEST_CASES_
    for inplace_item in _INPLACE
]

DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000


class ClipDescriptor(Structure):
    _fields_ = [("device_type", c_int32), ("device_id", c_int32)]
PanZezhong's avatar
PanZezhong committed
87
88


goldenfox2025's avatar
goldenfox2025 committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
infiniopClipDescriptor_t = POINTER(ClipDescriptor)


def clip(x, min_val, max_val):
    return torch.clamp(x, min_val, max_val)


def test(
    lib,
    handle,
    torch_device,
    shape,
    x_stride=None,
    y_stride=None,
    min_val=-1.0,
    max_val=1.0,
    inplace=Inplace.OUT_OF_PLACE,
    dtype=torch.float32,
PanZezhong's avatar
PanZezhong committed
107
    sync=None,
goldenfox2025's avatar
goldenfox2025 committed
108
109
110
111
112
):
    print(
        f"Testing Clip on {torch_device} with shape:{shape} x_stride:{x_stride} y_stride:{y_stride} "
        f"min_val:{min_val} max_val:{max_val} dtype:{dtype} inplace:{inplace}"
    )
PanZezhong's avatar
PanZezhong committed
113
    x = torch.rand(shape, dtype=dtype).to(torch_device)
goldenfox2025's avatar
goldenfox2025 committed
114
115
116
117
118
119
120
121
122
123
    ans = clip(x, min_val, max_val)
    x = rearrange_if_needed(x, x_stride)
    x_tensor = to_tensor(x, lib)
    if inplace == Inplace.INPLACE_X:
        y = x
        y_tensor = x_tensor
    else:
        y = torch.zeros(shape, dtype=dtype).to(torch_device)
        y = rearrange_if_needed(y, y_stride)
        y_tensor = to_tensor(y, lib)
PanZezhong's avatar
PanZezhong committed
124
125
126
127

    if sync is not None:
        sync()

goldenfox2025's avatar
goldenfox2025 committed
128
    descriptor = infiniopClipDescriptor_t()
PanZezhong's avatar
PanZezhong committed
129
130
131
132
133
134
135
136
137
    min_, max_ = torch.tensor([min_val], dtype=dtype).to(torch_device), torch.tensor(
        [max_val], dtype=dtype
    ).to(torch_device)
    min_tensor = to_tensor(
        min_, lib, force_shape=shape, force_strides=[0 for _ in shape]
    )
    max_tensor = to_tensor(
        max_, lib, force_shape=shape, force_strides=[0 for _ in shape]
    )
goldenfox2025's avatar
goldenfox2025 committed
138
139
    check_error(
        lib.infiniopCreateClipDescriptor(
PanZezhong's avatar
PanZezhong committed
140
141
142
143
144
145
            handle,
            ctypes.byref(descriptor),
            y_tensor.descriptor,
            x_tensor.descriptor,
            min_tensor.descriptor,
            max_tensor.descriptor,
goldenfox2025's avatar
goldenfox2025 committed
146
147
148
149
150
        )
    )

    workspace_size = c_uint64(0)
    check_error(
PanZezhong's avatar
PanZezhong committed
151
        lib.infiniopGetClipWorkspaceSize(descriptor, ctypes.byref(workspace_size))
goldenfox2025's avatar
goldenfox2025 committed
152
153
154
155
156
157
158
159
160
161
162
    )
    workspace = create_workspace(workspace_size.value, x.device)

    def lib_clip():
        check_error(
            lib.infiniopClip(
                descriptor,
                workspace.data_ptr() if workspace is not None else None,
                workspace_size.value,
                y_tensor.data,
                x_tensor.data,
PanZezhong's avatar
PanZezhong committed
163
164
                min_tensor.data,
                max_tensor.data,
goldenfox2025's avatar
goldenfox2025 committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
                None,
            )
        )

    lib_clip()

    # Now we can destroy the tensor descriptors
    x_tensor.destroyDesc(lib)
    if inplace != Inplace.INPLACE_X:
        y_tensor.destroyDesc(lib)

    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG or not torch.allclose(y, ans, atol=atol, rtol=rtol):
        print("\nExpected:")
        print(ans)
        print("\nActual:")
        print(y)
        print("\nDifference:")
        print(torch.abs(y - ans))
        print("\nMax difference:", torch.max(torch.abs(y - ans)).item())
        debug(y, ans, atol=atol, rtol=rtol)
    assert torch.allclose(y, ans, atol=atol, rtol=rtol)

    # Profiling workflow
    if PROFILE:
        # fmt: off
        profile_operation("PyTorch", lambda: clip(x, min_val, max_val), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_clip(), torch_device, NUM_PRERUN, NUM_ITERATIONS)
        # fmt: on

    check_error(lib.infiniopDestroyClipDescriptor(descriptor))


if __name__ == "__main__":
    args = get_args()
    lib = open_lib()

    lib.infiniopCreateClipDescriptor.restype = c_int32
    lib.infiniopCreateClipDescriptor.argtypes = [
        infiniopHandle_t,
        POINTER(infiniopClipDescriptor_t),
        infiniopTensorDescriptor_t,
        infiniopTensorDescriptor_t,
PanZezhong's avatar
PanZezhong committed
208
209
        infiniopTensorDescriptor_t,
        infiniopTensorDescriptor_t,
goldenfox2025's avatar
goldenfox2025 committed
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
    ]

    lib.infiniopGetClipWorkspaceSize.restype = c_int32
    lib.infiniopGetClipWorkspaceSize.argtypes = [
        infiniopClipDescriptor_t,
        POINTER(c_uint64),
    ]

    lib.infiniopClip.restype = c_int32
    lib.infiniopClip.argtypes = [
        infiniopClipDescriptor_t,
        c_void_p,
        c_uint64,
        c_void_p,
        c_void_p,
PanZezhong's avatar
PanZezhong committed
225
226
        c_void_p,
        c_void_p,
goldenfox2025's avatar
goldenfox2025 committed
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
        c_void_p,
    ]

    lib.infiniopDestroyClipDescriptor.restype = c_int32
    lib.infiniopDestroyClipDescriptor.argtypes = [
        infiniopClipDescriptor_t,
    ]

    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations

    for device in get_test_devices(args):
        test_operator(lib, device, test, _TEST_CASES, _TENSOR_DTYPES)

    print("\033[92mTest passed!\033[0m")