random_sample.py 10.5 KB
Newer Older
xgqdut2016's avatar
xgqdut2016 committed
1
import torch
PanZezhongQY's avatar
PanZezhongQY committed
2
import ctypes
3
from ctypes import c_uint64
xgqdut2016's avatar
xgqdut2016 committed
4
from libinfiniop import (
5
6
    LIBINFINIOP,
    TestTensor,
xgqdut2016's avatar
xgqdut2016 committed
7
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
8
    check_error,
xgqdut2016's avatar
xgqdut2016 committed
9
10
    test_operator,
    get_args,
xgqdut2016's avatar
xgqdut2016 committed
11
    debug_all,
xgqdut2016's avatar
xgqdut2016 committed
12
13
    get_tolerance,
    profile_operation,
14
15
16
17
18
    TestWorkspace,
    InfiniDtype,
    InfiniDtypeNames,
    InfiniDeviceNames,
    infiniopOperatorDescriptor_t,
PanZezhongQY's avatar
PanZezhongQY committed
19
20
)

xgqdut2016's avatar
xgqdut2016 committed
21
22
23
24
25
26
# ==============================================================================
#  Configuration (Internal Use Only)
# ==============================================================================
# These are not meant to be imported from other modules
_TEST_CASES = [
    # voc, random_val, topp, topk, temperature
xgqdut2016's avatar
xgqdut2016 committed
27
28
29
30
31
32
33
34
35
36
    (512, 0.8, 0.8, 3, 0.5),
    (4096, 0.05, 0.9, 5, 1.0),
    (16384, 0.15, 0.85, 10, 2.0),
    (512, 0.08, 0, 3, 0.5),
    (4096, 0.5, 0.9, 1, 1.0),
    (16384, 0.15, 0, 1, 2.0),
    (16384, 0.15, 0, 1, 2.0),
    (32000, 0.08, 0.8, 50, 1.0),
    (32000, 0.08, 1.0, 25, 1.0),
    # (119696, 0.01, 1.0, 100, 1.0),
xgqdut2016's avatar
xgqdut2016 committed
37
38
]

39
40
41
42
43
44
45
46
# Batch test cases: (batch_size, voc, list of (random_val, topp, topk, temperature))
_BATCH_TEST_CASES = [
    # batch_size, voc, [(random_val, topp, topk, temperature), ...]
    (4, 512, [(0.8, 0.8, 3, 0.5), (0.05, 0.9, 5, 1.0), (0.15, 0.85, 10, 2.0), (0.08, 0, 3, 0.5)]),
    (8, 4096, [(0.5, 0.9, 1, 1.0), (0.15, 0, 1, 2.0), (0.08, 0.8, 50, 1.0), (0.08, 1.0, 25, 1.0), (0.8, 0.8, 3, 0.5), (0.05, 0.9, 5, 1.0), (0.15, 0.85, 10, 2.0), (0.08, 0, 3, 0.5)]),
    (2, 16384, [(0.15, 0.85, 10, 2.0), (0.5, 0.9, 1, 1.0)]),
]

xgqdut2016's avatar
xgqdut2016 committed
47
# Data types used for testing
48
_TENSOR_DTYPES = [InfiniDtype.F16, InfiniDtype.BF16]
xgqdut2016's avatar
xgqdut2016 committed
49
50

_TOLERANCE_MAP = {
51
52
    InfiniDtype.F16: {"atol": 0, "rtol": 0},
    InfiniDtype.BF16: {"atol": 0, "rtol": 0},
xgqdut2016's avatar
xgqdut2016 committed
53
}
xgqdut2016's avatar
xgqdut2016 committed
54

55

xgqdut2016's avatar
xgqdut2016 committed
56
DEBUG = False
xgqdut2016's avatar
xgqdut2016 committed
57
58
59
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000
PanZezhongQY's avatar
PanZezhongQY committed
60
61


62
def random_sample(data, random_val, topp, topk, voc, temperature):
63
    if topp > 0 and topk > 1:
64
        sorted_vals, sorted_indices = torch.sort(data, descending=True)
65

66
        scaled_vals = (sorted_vals - sorted_vals[0]) / temperature
thatPepe's avatar
thatPepe committed
67
68
69
70
71
72
73
74
        try:
            probs = torch.softmax(scaled_vals, dim=0)
        except RuntimeError as e:
            if "not implemented for 'Half'" in str(e):
                scaled_vals = scaled_vals.to(torch.float32)
                probs = torch.softmax(scaled_vals, dim=0)
            else:
                raise
75
        cum_probs = torch.cumsum(probs, dim=0)
76

77
78
        k_index = min(topk, voc) - 1
        threshold = min(cum_probs[k_index], topp) * random_val
79

80
81
82
83
84
        try:
            idx = torch.searchsorted(cum_probs, threshold)
        except Exception:
            # Fallback for manual search if torch.searchsorted is not supported
            indices = (cum_probs >= threshold).nonzero(as_tuple=True)[0]
85
86
87
88
89
            idx = (
                indices[0]
                if indices.numel() > 0
                else torch.tensor(len(cum_probs) - 1, device=cum_probs.device)
            )
90
        return sorted_indices[idx]
91

92
    return torch.argmax(data)
zhangyue's avatar
zhangyue committed
93
94


95
96
def test(
    handle,
97
    device,
98
99
100
101
102
    voc,
    random_val,
    topp,
    topk,
    temperature,
103
    dtype=InfiniDtype.F16,
104
    sync=None,
105
):
106
    print(
107
        f"Testing RandomSample on {InfiniDeviceNames[device]} with voc:{voc} random_val:{random_val} topp:{topp} topk:{topk} temperature:{temperature} dtype:{InfiniDtypeNames[dtype]}"
108
    )
xgqdut2016's avatar
xgqdut2016 committed
109

PanZezhongQY's avatar
PanZezhongQY committed
110
    _perm = torch.randperm(voc)
111
112
113
    logits = TestTensor.from_torch(
        torch.arange(voc)[_perm].float() * 0.0001, dtype, device
    )
114
115

    ans = random_sample(
116
        logits.torch_tensor(), random_val, topp, topk, voc, temperature
117
118
    ).to(
        torch.int32
119
    )  # 这个函数在device速度可能会很慢,可以通过data.to("cpu")方式加快计算过程
xgqdut2016's avatar
xgqdut2016 committed
120

121
    indices = TestTensor([], None, InfiniDtype.I32, device, mode="zeros")
PanZezhongQY's avatar
PanZezhongQY committed
122

123
124
125
    if sync is not None:
        sync()

126
    descriptor = infiniopOperatorDescriptor_t()
PanZezhongQY's avatar
PanZezhongQY committed
127
    check_error(
128
        LIBINFINIOP.infiniopCreateRandomSampleDescriptor(
129
130
            handle,
            ctypes.byref(descriptor),
131
132
            indices.descriptor,
            logits.descriptor,
PanZezhongQY's avatar
PanZezhongQY committed
133
134
135
136
        )
    )

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
137
138
    for tensor in [logits, indices]:
        tensor.destroy_desc()
PanZezhongQY's avatar
PanZezhongQY committed
139
140
141

    workspace_size = c_uint64(0)
    check_error(
142
        LIBINFINIOP.infiniopGetRandomSampleWorkspaceSize(
PanZezhongQY's avatar
PanZezhongQY committed
143
144
145
            descriptor, ctypes.byref(workspace_size)
        )
    )
146
    workspace = TestWorkspace(workspace_size.value, device)
147

xgqdut2016's avatar
xgqdut2016 committed
148
149
    def lib_random_sample():
        check_error(
150
            LIBINFINIOP.infiniopRandomSample(
xgqdut2016's avatar
xgqdut2016 committed
151
                descriptor,
152
                workspace.data(),
xgqdut2016's avatar
xgqdut2016 committed
153
                workspace_size.value,
154
155
                indices.data(),
                logits.data(),
xgqdut2016's avatar
xgqdut2016 committed
156
157
158
159
160
161
                random_val,
                topp,
                topk,
                temperature,
                None,
            )
PanZezhongQY's avatar
PanZezhongQY committed
162
163
        )

xgqdut2016's avatar
xgqdut2016 committed
164
165
    lib_random_sample()

166
167
    if sync is not None:
        sync()
xgqdut2016's avatar
xgqdut2016 committed
168
169
170
171

    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
        debug_all(
172
173
            (indices.actual_tensor(), logits.actual_tensor()[indices.actual_tensor()]),
            (ans, logits.torch_tensor()[ans]),
xgqdut2016's avatar
xgqdut2016 committed
174
175
176
177
            "or",
            atol=atol,
            rtol=rtol,
        )
178
179
180
181
    assert (
        indices.actual_tensor() == ans
        or logits.actual_tensor()[indices.actual_tensor()] == logits.torch_tensor()[ans]
    )
xgqdut2016's avatar
xgqdut2016 committed
182

xgqdut2016's avatar
xgqdut2016 committed
183
184
185
    # Profiling workflow
    if PROFILE:
        # fmt: off
186
        profile_operation("PyTorch", lambda: random_sample(
187
188
189
            logits.torch_tensor(), random_val, topp, topk, voc, temperature
        ), device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_random_sample(), device, NUM_PRERUN, NUM_ITERATIONS)
xgqdut2016's avatar
xgqdut2016 committed
190
        # fmt: on
191
    check_error(LIBINFINIOP.infiniopDestroyRandomSampleDescriptor(descriptor))
PanZezhongQY's avatar
PanZezhongQY committed
192

193

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
def test_batch(
    handle,
    device,
    batch_size,
    voc,
    params_list,
    dtype=InfiniDtype.F16,
    sync=None,
):
    print(
        f"Testing RandomSampleBatch on {InfiniDeviceNames[device]} with batch_size:{batch_size} voc:{voc} dtype:{InfiniDtypeNames[dtype]}"
    )

    assert len(params_list) == batch_size

    logits_list = []
    for i in range(batch_size):
        _perm = torch.randperm(voc)
        logits_list.append(torch.arange(voc)[_perm].float() * 0.0001)
    logits_batch = torch.stack(logits_list)
    logits = TestTensor.from_torch(logits_batch, dtype, device)

    ans_list = []
    for i in range(batch_size):
        random_val, topp, topk, temperature = params_list[i]
        ans = random_sample(
            logits.torch_tensor()[i], random_val, topp, topk, voc, temperature
        ).to(torch.int32)
        ans_list.append(ans)
    ans_batch = torch.stack(ans_list)

    indices = TestTensor([batch_size], None, InfiniDtype.I32, device, mode="zeros")

    if sync is not None:
        sync()

    descriptor = infiniopOperatorDescriptor_t()
    try:
        check_error(
            LIBINFINIOP.infiniopCreateRandomSampleBatchDescriptor(
                handle,
                ctypes.byref(descriptor),
                indices.descriptor,
                logits.descriptor,
            )
        )
    except Exception as e:
        print(f"\033[93mNote: Batch descriptor creation not implemented yet: {e}\033[0m")
        print(f"  This is expected - batch interface implementation is pending")
        return

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
    for tensor in [logits, indices]:
        tensor.destroy_desc()

    workspace_size = c_uint64(0)
    check_error(
        LIBINFINIOP.infiniopGetRandomSampleWorkspaceSize(
            descriptor, ctypes.byref(workspace_size)
        )
    )
    workspace = TestWorkspace(workspace_size.value, device)

    random_val_array = (ctypes.c_float * batch_size)(*[p[0] for p in params_list])
    topp_array = (ctypes.c_float * batch_size)(*[p[1] for p in params_list])
    topk_array = (ctypes.c_int * batch_size)(*[p[2] for p in params_list])
    temperature_array = (ctypes.c_float * batch_size)(*[p[3] for p in params_list])

    def lib_random_sample_batch():
        check_error(
            LIBINFINIOP.infiniopRandomSampleBatch(
                descriptor,
                workspace.data(),
                workspace_size.value,
                indices.data(),
                logits.data(),
                random_val_array,
                topp_array,
                topk_array,
                temperature_array,
                batch_size,
                None,
            )
        )

    lib_random_sample_batch()

    if sync is not None:
        sync()

    atol, rtol = get_tolerance(_TOLERANCE_MAP, dtype)
    if DEBUG:
        debug_all(
            (indices.actual_tensor(), logits.actual_tensor()[torch.arange(batch_size), indices.actual_tensor()]),
            (ans_batch, logits.torch_tensor()[torch.arange(batch_size), ans_batch]),
            "or",
            atol=atol,
            rtol=rtol,
        )
    
    actual_indices = indices.actual_tensor()
    for i in range(batch_size):
        assert (
            actual_indices[i] == ans_batch[i]
            or logits.actual_tensor()[i, actual_indices[i]] == logits.torch_tensor()[i, ans_batch[i]]
        )

    # Profiling workflow
    if PROFILE:
        # fmt: off
        def pytorch_batch():
            results = []
            for i in range(batch_size):
                random_val, topp, topk, temperature = params_list[i]
                results.append(random_sample(
                    logits.torch_tensor()[i], random_val, topp, topk, voc, temperature
                ))
            return torch.stack(results)
        profile_operation("PyTorch", lambda: pytorch_batch(), device, NUM_PRERUN, NUM_ITERATIONS)
        profile_operation("    lib", lambda: lib_random_sample_batch(), device, NUM_PRERUN, NUM_ITERATIONS)
        # fmt: on
    
    check_error(LIBINFINIOP.infiniopDestroyRandomSampleDescriptor(descriptor))


PanZezhongQY's avatar
PanZezhongQY committed
319
320
321
if __name__ == "__main__":
    args = get_args()

xgqdut2016's avatar
xgqdut2016 committed
322
    DEBUG = args.debug
xgqdut2016's avatar
xgqdut2016 committed
323
324
325
326
327
328
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations

    # Execute tests
    for device in get_test_devices(args):
329
        test_operator(device, test, _TEST_CASES, _TENSOR_DTYPES)
330
331
332
333
334
335
336
        
        print(f"\n\033[93mRunning batch tests on {InfiniDeviceNames[device]}...\033[0m")
        try:
            test_operator(device, test_batch, _BATCH_TEST_CASES, _TENSOR_DTYPES)
        except Exception as e:
            print(f"\033[91mBatch test failed (not implemented yet): {e}\033[0m")
            print(f"  This is expected - batch interface implementation is pending")
337

PanZezhongQY's avatar
PanZezhongQY committed
338
    print("\033[92mTest passed!\033[0m")