utils.py 25.7 KB
Newer Older
1
from typing import Sequence
2
import torch
PanZezhongQY's avatar
PanZezhongQY committed
3
import ctypes
zhangyue's avatar
zhangyue committed
4
import numpy as np
PanZezhongQY's avatar
PanZezhongQY committed
5
from .datatypes import *
6
from .devices import *
7
from .liboperators import infiniopTensorDescriptor_t, LIBINFINIOP, infiniopHandle_t
PanZezhongQY's avatar
PanZezhongQY committed
8
9
10
11
12
13
14


def check_error(status):
    if status != 0:
        raise Exception("Error code " + str(status))


15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
class CTensor:
    def __init__(self, dt: InfiniDtype, shape, strides):
        self.descriptor = infiniopTensorDescriptor_t()
        self.dt = dt
        self.ndim = len(shape)
        if strides is None:
            strides = [1 for _ in shape]
            for i in range(self.ndim - 2, -1, -1):
                strides[i] = strides[i + 1] * shape[i + 1]

        assert self.ndim == len(strides)
        self.c_shape = (ctypes.c_size_t * self.ndim)(*shape)
        self.c_strides = (ctypes.c_ssize_t * self.ndim)(*strides)

        LIBINFINIOP.infiniopCreateTensorDescriptor(
            ctypes.byref(self.descriptor),
            self.ndim,
            self.c_shape,
            self.c_strides,
            self.dt,
        )
PanZezhongQY's avatar
PanZezhongQY committed
36

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
    def destroy_desc(self):
        if self.descriptor is not None:
            LIBINFINIOP.infiniopDestroyTensorDescriptor(self.descriptor)
            self.descriptor = None


class TestTensor(CTensor):
    def __init__(
        self,
        shape,
        strides,
        dt: InfiniDtype,
        device: InfiniDeviceEnum,
        mode="random",
        scale=None,
        bias=None,
        set_tensor=None,
54
55
        randint_low=None,
        randint_high=None,
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
    ):
        self.dt = dt
        self.device = device
        self.shape = shape
        self.strides = strides
        torch_shape = []
        torch_strides = [] if strides is not None else None
        for i in range(len(shape)):
            if strides is not None and strides[i] == 0:
                torch_shape.append(1)
                torch_strides.append(1)
            elif strides is not None and strides[i] != 0:
                torch_shape.append(shape[i])
                torch_strides.append(strides[i])
            else:
                torch_shape.append(shape[i])
        if mode == "random":
73
            # For integer types, use randint instead of rand
74
75
76
77
78
79
80
81
82
83
84
85
            if dt in [
                InfiniDtype.I8,
                InfiniDtype.I16,
                InfiniDtype.I32,
                InfiniDtype.I64,
                InfiniDtype.U8,
                InfiniDtype.U16,
                InfiniDtype.U32,
                InfiniDtype.U64,
                InfiniDtype.BYTE,
                InfiniDtype.BOOL,
            ]:
86
87
88
                randint_low = -2000000000 if randint_low is None else randint_low
                randint_high = 2000000000 if randint_high is None else randint_high
                self._torch_tensor = torch.randint(
89
90
91
92
93
                    randint_low,
                    randint_high,
                    torch_shape,
                    dtype=to_torch_dtype(dt),
                    device=torch_device_map[device],
94
95
96
                )
            else:
                self._torch_tensor = torch.rand(
97
98
99
                    torch_shape,
                    dtype=to_torch_dtype(dt),
                    device=torch_device_map[device],
100
                )
101
102
103
104
105
106
107
108
        elif mode == "zeros":
            self._torch_tensor = torch.zeros(
                torch_shape, dtype=to_torch_dtype(dt), device=torch_device_map[device]
            )
        elif mode == "ones":
            self._torch_tensor = torch.ones(
                torch_shape, dtype=to_torch_dtype(dt), device=torch_device_map[device]
            )
blkmjsian's avatar
blkmjsian committed
109
        elif mode == "randint":
110
111
            randint_low = -2000000000 if randint_low is None else randint_low
            randint_high = 2000000000 if randint_high is None else randint_high
112
113
114
115
116
117
118
            self._torch_tensor = torch.randint(
                randint_low,
                randint_high,
                torch_shape,
                dtype=to_torch_dtype(dt),
                device=torch_device_map[device],
            )
119
        elif mode == "float8_e4m3fn":
120
121
122
            self._torch_tensor = torch.rand(
                shape, dtype=torch.float32, device=torch_device_map[device]
            ).to(dtype=torch.float8_e4m3fn)
123
124
125
126
127
128
129
        elif mode == "manual":
            assert set_tensor is not None
            assert torch_shape == list(set_tensor.shape)
            assert torch_strides == list(set_tensor.stride())
            self._torch_tensor = set_tensor.to(to_torch_dtype(dt)).to(
                torch_device_map[device]
            )
zhangyue's avatar
zhangyue committed
130
131
132
133
134
135
        elif mode == "binary":
            assert set_tensor is not None
            assert torch_shape == list(set_tensor.shape)
            self._torch_tensor = set_tensor.to(to_torch_dtype(dt)).to(
                torch_device_map[device]
            )
136
137
138
139
140
141
142
143
        else:
            raise ValueError("Unsupported mode")

        if scale is not None:
            self._torch_tensor *= scale
        if bias is not None:
            self._torch_tensor += bias

zhangyue's avatar
zhangyue committed
144
        if strides is not None and mode != "binary":
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
            self._data_tensor = rearrange_tensor(self._torch_tensor, torch_strides)
        else:
            self._data_tensor = self._torch_tensor.clone()

        super().__init__(self.dt, shape, strides)

    def torch_tensor(self):
        return self._torch_tensor

    def actual_tensor(self):
        return self._data_tensor

    def data(self):
        return self._data_tensor.data_ptr()

    def is_broadcast(self):
        return self.strides is not None and 0 in self.strides
162

zhangyue's avatar
zhangyue committed
163
    @staticmethod
164
165
166
    def from_binary(
        binary_file, shape, strides, dt: InfiniDtype, device: InfiniDeviceEnum
    ):
zhangyue's avatar
zhangyue committed
167
168
        data = np.fromfile(binary_file, dtype=to_numpy_dtype(dt))
        base = torch.from_numpy(data)
169
170
171
        torch_tensor = torch.as_strided(base, size=shape, stride=strides).to(
            torch_device_map[device]
        )
zhangyue's avatar
zhangyue committed
172
        return TestTensor(
173
174
            shape, strides, dt, device, mode="binary", set_tensor=torch_tensor
        )
175
176
177
178
179
180
181

    @staticmethod
    def from_torch(torch_tensor, dt: InfiniDtype, device: InfiniDeviceEnum):
        shape_ = list(torch_tensor.shape)
        strides_ = list(torch_tensor.stride())
        return TestTensor(
            shape_, strides_, dt, device, mode="manual", set_tensor=torch_tensor
PanZezhong's avatar
PanZezhong committed
182
183
        )

184
185
186
    def update_torch_tensor(self, new_tensor: torch.Tensor):
        self._torch_tensor = new_tensor

187
188
189
    def update_torch_tensor(self, new_tensor: torch.Tensor):
        self._torch_tensor = new_tensor

PanZezhongQY's avatar
PanZezhongQY committed
190

191
def to_torch_dtype(dt: InfiniDtype, compatability_mode=False):
192
193
194
195
196
    if dt == InfiniDtype.BOOL:
        return torch.bool
    elif dt == InfiniDtype.BYTE:
        return torch.uint8
    elif dt == InfiniDtype.I8:
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
        return torch.int8
    elif dt == InfiniDtype.I16:
        return torch.int16
    elif dt == InfiniDtype.I32:
        return torch.int32
    elif dt == InfiniDtype.I64:
        return torch.int64
    elif dt == InfiniDtype.U8:
        return torch.uint8
    elif dt == InfiniDtype.F16:
        return torch.float16
    elif dt == InfiniDtype.BF16:
        return torch.bfloat16
    elif dt == InfiniDtype.F32:
        return torch.float32
    elif dt == InfiniDtype.F64:
        return torch.float64
    # TODO: These following types may not be supported by older
    # versions of PyTorch. Use compatability mode to convert them.
    elif dt == InfiniDtype.U16:
        return torch.int16 if compatability_mode else torch.uint16
    elif dt == InfiniDtype.U32:
        return torch.int32 if compatability_mode else torch.uint32
    elif dt == InfiniDtype.U64:
        return torch.int64 if compatability_mode else torch.uint64
222
223
    elif dt == InfiniDtype.F8:
        return torch.float8_e4m3fn
224
225
    else:
        raise ValueError("Unsupported data type")
226

227

zhangyue's avatar
zhangyue committed
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
def to_numpy_dtype(dt: InfiniDtype, compatability_mode=False):
    if dt == InfiniDtype.I8:
        return np.int8
    elif dt == InfiniDtype.I16:
        return np.int16
    elif dt == InfiniDtype.I32:
        return np.int32
    elif dt == InfiniDtype.I64:
        return np.int64
    elif dt == InfiniDtype.U8:
        return np.uint8
    elif dt == InfiniDtype.U16:
        return np.uint16 if not compatability_mode else np.int16
    elif dt == InfiniDtype.U32:
        return np.uint32 if not compatability_mode else np.int32
    elif dt == InfiniDtype.U64:
        return np.uint64 if not compatability_mode else np.int64
    elif dt == InfiniDtype.F16:
        return np.float16
    elif dt == InfiniDtype.BF16:
        # numpy 1.20+ 有 float32 的模拟 bf16 方案: np.dtype("bfloat16")
        # 但很多环境里没直接支持,通常要 fallback 到 float32
        return np.dtype("bfloat16") if not compatability_mode else np.float32
    elif dt == InfiniDtype.F32:
        return np.float32
    elif dt == InfiniDtype.F64:
        return np.float64
    else:
        raise ValueError("Unsupported data type")


259
260
261
262
263
264
265
266
267
268
269
270
271
class TestWorkspace:
    def __init__(self, size, device):
        if size != 0:
            self.tensor = TestTensor((size,), None, InfiniDtype.U8, device, mode="ones")
        else:
            self.tensor = None
        self._size = size

    def data(self):
        if self.tensor is not None:
            return self.tensor.data()
        else:
            return None
PanZezhongQY's avatar
PanZezhongQY committed
272

273
274
    def size(self):
        return ctypes.c_uint64(self._size)
275

276
277

def create_handle():
PanZezhongQY's avatar
PanZezhongQY committed
278
    handle = infiniopHandle_t()
279
    check_error(LIBINFINIOP.infiniopCreateHandle(ctypes.byref(handle)))
PanZezhongQY's avatar
PanZezhongQY committed
280
281
282
    return handle


283
284
def destroy_handle(handle):
    check_error(LIBINFINIOP.infiniopDestroyHandle(handle))
PanZezhongQY's avatar
PanZezhongQY committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298


def rearrange_tensor(tensor, new_strides):
    """
    Given a PyTorch tensor and a list of new strides, return a new PyTorch tensor with the given strides.
    """
    import torch

    shape = tensor.shape

    new_size = [0] * len(shape)
    left = 0
    right = 0
    for i in range(len(shape)):
299
        if new_strides[i] >= 0:
PanZezhongQY's avatar
PanZezhongQY committed
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
            new_size[i] = (shape[i] - 1) * new_strides[i] + 1
            right += new_strides[i] * (shape[i] - 1)
        else:  # TODO: Support negative strides in the future
            # new_size[i] = (shape[i] - 1) * (-new_strides[i]) + 1
            # left += new_strides[i] * (shape[i] - 1)
            raise ValueError("Negative strides are not supported yet")

    # Create a new tensor with zeros
    new_tensor = torch.zeros(
        (right - left + 1,), dtype=tensor.dtype, device=tensor.device
    )

    # Generate indices for original tensor based on original strides
    indices = [torch.arange(s) for s in shape]
    mesh = torch.meshgrid(*indices, indexing="ij")

    # Flatten indices for linear indexing
    linear_indices = [m.flatten() for m in mesh]

    # Calculate new positions based on new strides
    new_positions = sum(
        linear_indices[i] * new_strides[i] for i in range(len(shape))
    ).to(tensor.device)
    offset = -left
    new_positions += offset

    # Copy the original data to the new tensor
327
328
329
330
331
332
333
334
335
336
337
338
    if tensor.dtype in [
        torch.bool,
        torch.uint8,
        torch.int8,
        torch.int16,
        torch.int32,
        torch.int64,
        torch.float16,
        torch.bfloat16,
        torch.float32,
        torch.float64,
    ]:
339
        new_tensor.view(-1).index_add_(0, new_positions, tensor.view(-1))
340
341
342
343
344
345
346
    elif tensor.dtype in [torch.uint16, torch.uint32, torch.uint64]:
        new_tensor_int64 = new_tensor.to(dtype=torch.int64)
        tensor_int64 = tensor.to(dtype=torch.int64)
        new_tensor_int64.view(-1).index_add_(0, new_positions, tensor_int64.view(-1))
        new_tensor = new_tensor_int64.to(dtype=tensor.dtype)
    elif tensor.dtype in [torch.float8_e4m3fn]:
        new_tensor_float64 = new_tensor.to(dtype=torch.float64)
347
348
349
350
        tensor_float64 = tensor.to(dtype=torch.float64)
        new_tensor_float64.view(-1).index_add_(
            0, new_positions, tensor_float64.view(-1)
        )
351
352
353
        new_tensor = new_tensor_float64.to(dtype=tensor.dtype)
    else:
        raise ValueError("Unsupported data type")
354

PanZezhongQY's avatar
PanZezhongQY committed
355
356
357
    new_tensor.set_(new_tensor.untyped_storage(), offset, shape, tuple(new_strides))

    return new_tensor
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395


def get_args():
    import argparse

    parser = argparse.ArgumentParser(description="Test Operator")
    parser.add_argument(
        "--profile",
        action="store_true",
        help="Whether profile tests",
    )
    parser.add_argument(
        "--num_prerun",
        type=lambda x: max(0, int(x)),
        default=10,
        help="Set the number of pre-runs before profiling. Default is 10. Must be a non-negative integer.",
    )
    parser.add_argument(
        "--num_iterations",
        type=lambda x: max(0, int(x)),
        default=1000,
        help="Set the number of iterations for profiling. Default is 1000. Must be a non-negative integer.",
    )
    parser.add_argument(
        "--debug",
        action="store_true",
        help="Whether to turn on debug mode. If turned on, it will display detailed information about the tensors and discrepancies.",
    )
    parser.add_argument(
        "--cpu",
        action="store_true",
        help="Run CPU test",
    )
    parser.add_argument(
        "--nvidia",
        action="store_true",
        help="Run NVIDIA GPU test",
    )
396
397
398
399
400
    parser.add_argument(
        "--iluvatar",
        action="store_true",
        help="Run Iluvatar GPU test",
    )
401
402
403
404
405
    parser.add_argument(
        "--qy",
        action="store_true",
        help="Run Qy GPU test",
    )
406
407
408
409
410
411
412
413
414
415
    parser.add_argument(
        "--cambricon",
        action="store_true",
        help="Run Cambricon MLU test",
    )
    parser.add_argument(
        "--ascend",
        action="store_true",
        help="Run ASCEND NPU test",
    )
416
417
418
419
420
    parser.add_argument(
        "--metax",
        action="store_true",
        help="Run METAX GPU test",
    )
421
422
423
424
425
    parser.add_argument(
        "--moore",
        action="store_true",
        help="Run MTHREADS GPU test",
    )
426
427
428
429
430
    parser.add_argument(
        "--kunlun",
        action="store_true",
        help="Run KUNLUN XPU test",
    )
431
432
433
434
435
    parser.add_argument(
        "--hygon",
        action="store_true",
        help="Run HYGON DCU test",
    )
436
437
438
439
440
441

    return parser.parse_args()


def synchronize_device(torch_device):
    import torch
442

443
444
445
446
447
448
    if torch_device == "cuda":
        torch.cuda.synchronize()
    elif torch_device == "npu":
        torch.npu.synchronize()
    elif torch_device == "mlu":
        torch.mlu.synchronize()
449
450
    elif torch_device == "musa":
        torch.musa.synchronize()
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469


def debug(actual, desired, atol=0, rtol=1e-2, equal_nan=False, verbose=True):
    """
    Debugging function to compare two tensors (actual and desired) and print discrepancies.
    Arguments:
    ----------
    - actual : The tensor containing the actual computed values.
    - desired : The tensor containing the expected values that `actual` should be compared to.
    - atol : optional (default=0)
        The absolute tolerance for the comparison.
    - rtol : optional (default=1e-2)
        The relative tolerance for the comparison.
    - equal_nan : bool, optional (default=False)
        If True, `NaN` values in `actual` and `desired` will be considered equal.
    - verbose : bool, optional (default=True)
        If True, the function will print detailed information about any discrepancies between the tensors.
    """
    import numpy as np
470

471
472
473
474
    # 如果是BF16,全部转成FP32再比对
    if actual.dtype == torch.bfloat16 or desired.dtype == torch.bfloat16:
        actual = actual.to(torch.float32)
        desired = desired.to(torch.float32)
475

476
    print_discrepancy(actual, desired, atol, rtol, equal_nan, verbose)
477
    np.testing.assert_allclose(
478
        actual.cpu(), desired.cpu(), rtol, atol, equal_nan, verbose=True
479
    )
480
481


482
def filter_tensor_dtypes_by_device(device, tensor_dtypes):
483
484
485
486
487
488
489
490
    if device in (
        InfiniDeviceEnum.CPU,
        InfiniDeviceEnum.NVIDIA,
        InfiniDeviceEnum.METAX,
        InfiniDeviceEnum.ASCEND,
        InfiniDeviceEnum.ILUVATAR,
        InfiniDeviceEnum.CAMBRICON,
    ):
491
492
493
494
495
496
        return tensor_dtypes
    else:
        # 过滤掉 torch.bfloat16
        return [dt for dt in tensor_dtypes if dt != torch.bfloat16]


497
498
499
500
501
502
503
504
505
def debug_all(
    actual_vals: Sequence,
    desired_vals: Sequence,
    condition: str,
    atol=0,
    rtol=1e-2,
    equal_nan=False,
    verbose=True,
):
506
    """
507
    Debugging function to compare two sequences of values (actual and desired) pair by pair, results
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
    are linked by the given logical condition, and prints discrepancies
    Arguments:
    ----------
    - actual_vals (Sequence): A sequence (e.g., list or tuple) of actual computed values.
    - desired_vals (Sequence): A sequence (e.g., list or tuple) of desired (expected) values to compare against.
    - condition (str): A string specifying the condition for passing the test. It must be either:
        - 'or': Test passes if any pair of actual and desired values satisfies the tolerance criteria.
        - 'and': Test passes if all pairs of actual and desired values satisfy the tolerance criteria.
    - atol (float, optional): Absolute tolerance. Default is 0.
    - rtol (float, optional): Relative tolerance. Default is 1e-2.
    - equal_nan (bool, optional): If True, NaN values in both actual and desired are considered equal. Default is False.
    - verbose (bool, optional): If True, detailed output is printed for each comparison. Default is True.
    Raises:
    ----------
    - AssertionError: If the condition is not satisfied based on the provided `condition`, `atol`, and `rtol`.
    - ValueError: If the length of `actual_vals` and `desired_vals` do not match.
    - AssertionError: If the specified `condition` is not 'or' or 'and'.
    """
    assert len(actual_vals) == len(desired_vals), "Invalid Length"
527
528
529
530
    assert condition in {
        "or",
        "and",
    }, "Invalid condition: should be either 'or' or 'and'"
531
532
533
534
535
    import numpy as np

    passed = False if condition == "or" else True

    for index, (actual, desired) in enumerate(zip(actual_vals, desired_vals)):
536
537
538
        if actual.dtype == torch.bfloat16 or desired.dtype == torch.bfloat16:
            actual = actual.to(torch.float32)
            desired = desired.to(torch.float32)
539
        print(f" \033[36mCondition #{index + 1}:\033[0m {actual} == {desired}")
540
        indices = print_discrepancy(actual, desired, atol, rtol, equal_nan, verbose)
541
542
543
544
545
546
        if condition == "or":
            if not passed and len(indices) == 0:
                passed = True
        elif condition == "and":
            if passed and len(indices) != 0:
                passed = False
547
548
549
550
551
552
553
554
555
556
557
558
                print(
                    f"\033[31mThe condition has not been satisfied: Condition #{index + 1}\033[0m"
                )
            np.testing.assert_allclose(
                actual.cpu(),
                desired.cpu(),
                rtol,
                atol,
                equal_nan,
                verbose=True,
                strict=True,
            )
559
560
561
    assert passed, "\033[31mThe condition has not been satisfied\033[0m"


562
563
564
def print_discrepancy(
    actual, expected, atol=0, rtol=1e-3, equal_nan=True, verbose=True
):
565
566
567
568
569
570
571
572
    if actual.shape != expected.shape:
        raise ValueError("Tensors must have the same shape to compare.")

    import torch
    import sys

    is_terminal = sys.stdout.isatty()

zhangyue's avatar
zhangyue committed
573
574
    actual = actual.to("cpu")
    expected = expected.to("cpu")
575

576
577
578
    actual_isnan = torch.isnan(actual)
    expected_isnan = torch.isnan(expected)

579
    # Calculate the difference mask based on atol and rtol
580
581
582
    nan_mismatch = (
        actual_isnan ^ expected_isnan if equal_nan else actual_isnan | expected_isnan
    )
583

584
    diff_mask = nan_mismatch | (
585
586
        torch.abs(actual.to(dtype=torch.float64) - expected.to(dtype=torch.float64))
        > (atol + rtol * torch.abs(expected.to(dtype=torch.float64)))
587
    )
588
    diff_indices = torch.nonzero(diff_mask, as_tuple=False)
589
    delta = actual.to(dtype=torch.float64) - expected.to(dtype=torch.float64)
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605

    # Display format: widths for columns
    col_width = [18, 20, 20, 20]
    decimal_places = [0, 12, 12, 12]
    total_width = sum(col_width) + sum(decimal_places)

    def add_color(text, color_code):
        if is_terminal:
            return f"\033[{color_code}m{text}\033[0m"
        else:
            return text

    if verbose:
        for idx in diff_indices:
            index_tuple = tuple(idx.tolist())
            actual_str = f"{actual[index_tuple]:<{col_width[1]}.{decimal_places[1]}f}"
606
607
608
            expected_str = (
                f"{expected[index_tuple]:<{col_width[2]}.{decimal_places[2]}f}"
            )
609
610
611
612
613
614
615
616
617
618
619
620
621
            delta_str = f"{delta[index_tuple]:<{col_width[3]}.{decimal_places[3]}f}"
            print(
                f" > Index: {str(index_tuple):<{col_width[0]}}"
                f"actual: {add_color(actual_str, 31)}"
                f"expect: {add_color(expected_str, 32)}"
                f"delta: {add_color(delta_str, 33)}"
            )

        print(add_color(" INFO:", 35))
        print(f"  - Actual dtype: {actual.dtype}")
        print(f"  - Desired dtype: {expected.dtype}")
        print(f"  - Atol: {atol}")
        print(f"  - Rtol: {rtol}")
622
623
624
625
626
627
628
629
630
631
632
633
        print(
            f"  - Mismatched elements: {len(diff_indices)} / {actual.numel()} ({len(diff_indices) / actual.numel() * 100}%)"
        )
        print(
            f"  - Min(actual) : {torch.min(actual):<{col_width[1]}} | Max(actual) : {torch.max(actual):<{col_width[2]}}"
        )
        print(
            f"  - Min(desired): {torch.min(expected):<{col_width[1]}} | Max(desired): {torch.max(expected):<{col_width[2]}}"
        )
        print(
            f"  - Min(delta)  : {torch.min(delta):<{col_width[1]}} | Max(delta)  : {torch.max(delta):<{col_width[2]}}"
        )
634
635
636
637
638
639
640
        print("-" * total_width + "\n")

    return diff_indices


def get_tolerance(tolerance_map, tensor_dtype, default_atol=0, default_rtol=1e-3):
    """
641
    Returns the atol and rtol for a given tensor data type in the tolerance_map.
642
643
    If the given data type is not found, it returns the provided default tolerance values.
    """
644
645
646
    return tolerance_map.get(
        tensor_dtype, {"atol": default_atol, "rtol": default_rtol}
    ).values()
647
648
649
650


def timed_op(func, num_iterations, device):
    import time
651

652
653
654
655
656
657
658
659
660
661
662
663
    """ Function for timing operations with synchronization. """
    synchronize_device(device)
    start = time.time()
    for _ in range(num_iterations):
        func()
    synchronize_device(device)
    return (time.time() - start) / num_iterations


def profile_operation(desc, func, torch_device, NUM_PRERUN, NUM_ITERATIONS):
    """
    Unified profiling workflow that is used to profile the execution time of a given function.
664
    It first performs a number of warmup runs, then performs timed execution and
665
666
667
668
669
670
671
672
673
    prints the average execution time.

    Arguments:
    ----------
    - desc (str): Description of the operation, used for output display.
    - func (callable): The operation function to be profiled.
    - torch_device (str): The device on which the operation runs, provided for timed execution.
    - NUM_PRERUN (int): The number of warmup runs.
    - NUM_ITERATIONS (int): The number of timed execution iterations, used to calculate the average execution time.
674
    """
675
676
677
    # Warmup runs
    for _ in range(NUM_PRERUN):
        func()
678

679
680
    # Timed execution
    elapsed = timed_op(lambda: func(), NUM_ITERATIONS, torch_device)
681
    print(f" {desc} time: {elapsed * 1000:6f} ms")
682
683


684
def test_operator(device, test_func, test_cases, tensor_dtypes):
685
686
687
688
689
690
691
    """
    Testing a specified operator on the given device with the given test function, test cases, and tensor data types.

    Arguments:
    ----------
    - device (InfiniDeviceEnum): The device on which the operator should be tested. See device.py.
    - test_func (function): The test function to be executed for each test case.
692
    - test_cases (list of tuples): A list of test cases, where each test case is a tuple of parameters
693
694
695
        to be passed to `test_func`.
    - tensor_dtypes (list): A list of tensor data types (e.g., `torch.float32`) to test.
    """
696
697
    LIBINFINIOP.infinirtSetDevice(device, ctypes.c_int(0))
    handle = create_handle()
698
    tensor_dtypes = filter_tensor_dtypes_by_device(device, tensor_dtypes)
699
700
701
    try:
        for test_case in test_cases:
            for tensor_dtype in tensor_dtypes:
702
703
                test_func(
                    handle,
704
                    device,
705
706
                    *test_case,
                    tensor_dtype,
707
                    get_sync_func(device),
708
                )
709
    finally:
710
        destroy_handle(handle)
711
712
713
714
715
716
717


def get_test_devices(args):
    """
    Using the given parsed Namespace to determine the devices to be tested.

    Argument:
718
    - args: the parsed Namespace object.
719
720
721
722
723
724

    Return:
    - devices_to_test: the devices that will be tested. Default is CPU.
    """
    devices_to_test = []

725
726
727
728
    if args.cpu:
        devices_to_test.append(InfiniDeviceEnum.CPU)
    if args.nvidia:
        devices_to_test.append(InfiniDeviceEnum.NVIDIA)
729
730
    if args.iluvatar:
        devices_to_test.append(InfiniDeviceEnum.ILUVATAR)
731
732
    if args.qy:
        devices_to_test.append(InfiniDeviceEnum.QY)
733
    if args.cambricon:
734
        import torch_mlu
735

736
        devices_to_test.append(InfiniDeviceEnum.CAMBRICON)
737
    if args.ascend:
738
        import torch
739
        import torch_npu
740
741

        torch.npu.set_device(0)  # Ascend NPU needs explicit device initialization
742
        devices_to_test.append(InfiniDeviceEnum.ASCEND)
743
744
745
746
    if args.metax:
        import torch

        devices_to_test.append(InfiniDeviceEnum.METAX)
747
748
749
750
751
    if args.moore:
        import torch
        import torch_musa

        devices_to_test.append(InfiniDeviceEnum.MOORE)
752
753
    if args.kunlun:
        import torch_xmlir
754

755
        devices_to_test.append(InfiniDeviceEnum.KUNLUN)
756
757
758
759
    if args.hygon:
        import torch

        devices_to_test.append(InfiniDeviceEnum.HYGON)
760
761
762
763
    if not devices_to_test:
        devices_to_test = [InfiniDeviceEnum.CPU]

    return devices_to_test
764
765
766
767


def get_sync_func(device):
    import torch
768

769
    if device == InfiniDeviceEnum.CPU or device == InfiniDeviceEnum.CAMBRICON:
770
771
        sync = None
    else:
772
        sync = getattr(torch, torch_device_map[device]).synchronize
773

774
    return sync