rotary_embedding.py 6.56 KB
Newer Older
PanZezhongQY's avatar
PanZezhongQY committed
1
import ctypes
2
from ctypes import POINTER, c_void_p, c_int32, c_uint64, Structure, byref
PanZezhongQY's avatar
PanZezhongQY committed
3
4
5
6
import sys
import os

sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
7
from libinfiniop import (
PanZezhongQY's avatar
PanZezhongQY committed
8
9
    infiniopHandle_t,
    infiniopTensorDescriptor_t,
10
11
12
    open_lib,
    to_tensor,
    get_test_devices,
PanZezhongQY's avatar
PanZezhongQY committed
13
    check_error,
14
    rearrange_if_needed,
PanZezhongQY's avatar
PanZezhongQY committed
15
    create_workspace,
16
17
18
19
20
    test_operator,
    get_args,
    debug,
    profile_operation,
    InfiniDtype,
PanZezhongQY's avatar
PanZezhongQY committed
21
22
23
)
import torch

24
25
26
27
28
DEBUG = False
PROFILE = False
NUM_PRERUN = 10
NUM_ITERATIONS = 1000

PanZezhongQY's avatar
PanZezhongQY committed
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

class RoPEDescriptor(Structure):
    _fields_ = [("device", c_int32)]


infiniopRoPEDescriptor_t = POINTER(RoPEDescriptor)


def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
    ndim = x.ndim
    assert 0 <= 1 < ndim
    assert freqs_cis.shape == (x.shape[0], x.shape[-1])
    shape = [d if i == 0 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
    return freqs_cis.view(*shape)


def rotary_embedding(t, pos, theta, torch_device):
    dh = t.shape[2]
47
48
49
50
51
52
53
54
55
56
57
58
59
60
    assert dh % 2 == 0, "Embedding dimension must be even."
    t_even = t[..., 0::2]  # [seq_len, n_head, dh // 2]
    t_odd = t[..., 1::2]  # [seq_len, n_head, dh // 2]
    freqs = (1.0 / (theta ** (torch.arange(0, dh, 2).float() / dh))).to(torch_device)
    freqs = torch.outer(pos, freqs)  # [seq_len, dh // 2]
    cos = torch.cos(freqs).unsqueeze(1)  # [seq_len, 1, dh // 2]
    sin = torch.sin(freqs).unsqueeze(1)  # [seq_len, 1, dh // 2]

    t_out_even = t_even * cos - t_odd * sin
    t_out_odd = t_even * sin + t_odd * cos

    t_out = torch.empty_like(t)
    t_out[..., 0::2] = t_out_even
    t_out[..., 1::2] = t_out_odd
61

PanZezhongQY's avatar
PanZezhongQY committed
62
63
    return t_out

64

PanZezhongQY's avatar
PanZezhongQY committed
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def sin_cos_table(max_seq_len, dim, torch_device, theta):
    pos = torch.arange(
        0, max_seq_len, dtype=torch.float32, device=torch.device(torch_device)
    )
    freqs = (1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))).to(
        torch_device
    )
    # (a0, a1, a2) -> (a0, a0, a1, a1, a2, a2)
    freqs = torch.repeat_interleave(freqs, repeats=2)
    angles = torch.outer(pos, freqs)
    return torch.sin(angles), torch.cos(angles)


def test(lib, handle, torch_device, shape, strides=None, dtype=torch.float16):
    print(
        f"Testing Rotary Positional Embedding on {torch_device} with shape:{shape} strides:{strides} and dtype:{dtype}"
    )

    t = torch.rand(shape, dtype=dtype)
84
85
    t = rearrange_if_needed(t, strides).to(torch_device)
    posTmp = torch.arange(0, t.shape[0]).to(torch_device)
86
    pos = torch.zeros(2 * posTmp.shape[0], dtype=torch.int32)
PanZezhongQY's avatar
PanZezhongQY committed
87
88
89
    for i in range(posTmp.shape[0]):
        pos[2 * i] = posTmp[i]
        pos[2 * i + 1] = 0
90
    pos = pos.to(torch_device)
PanZezhongQY's avatar
PanZezhongQY committed
91
    theta = 1e4
92
93

    ans = rotary_embedding(t, posTmp, theta, torch_device)
PanZezhongQY's avatar
PanZezhongQY committed
94
95
96
97
98
99

    descriptor = infiniopRoPEDescriptor_t()
    # 2x table length for test
    sin_table, cos_table = sin_cos_table(t.shape[0] * 2, t.shape[2], t.device, theta)
    t_tensor = to_tensor(t, lib)
    pos_tensor = to_tensor(pos[: t.shape[0]], lib)
100
    pos_tensor.descriptor.contents.dtype = InfiniDtype.U64
PanZezhongQY's avatar
PanZezhongQY committed
101
102
103
104
    sin_table_tensor = to_tensor(sin_table, lib)
    cos_table_tensor = to_tensor(cos_table, lib)

    if torch_device == "npu":
105
        torch.npu.synchronize()
PanZezhongQY's avatar
PanZezhongQY committed
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128

    check_error(
        lib.infiniopCreateRoPEDescriptor(
            handle,
            byref(descriptor),
            t_tensor.descriptor,
            pos_tensor.descriptor,
            sin_table_tensor.descriptor,
            cos_table_tensor.descriptor,
        )
    )

    # Invalidate the shape and strides in the descriptor to prevent them from being directly used by the kernel
    t_tensor.descriptor.contents.invalidate()
    pos_tensor.descriptor.contents.invalidate()
    sin_table_tensor.descriptor.contents.invalidate()
    cos_table_tensor.descriptor.contents.invalidate()

    workspace_size = c_uint64(0)
    check_error(
        lib.infiniopGetRoPEWorkspaceSize(descriptor, ctypes.byref(workspace_size))
    )
    workspace = create_workspace(workspace_size.value, t.device)
129
130
131
132
133
134
135
136
137
138
139
140
141

    def lib_rope():
        check_error(
            lib.infiniopRoPE(
                descriptor,
                workspace.data_ptr() if workspace is not None else None,
                workspace_size.value,
                t_tensor.data,
                pos_tensor.data,
                sin_table_tensor.data,
                cos_table_tensor.data,
                None,
            )
PanZezhongQY's avatar
PanZezhongQY committed
142
143
        )

144
145
146
    lib_rope()
    if DEBUG:
        debug(t, ans, atol=1e-4, rtol=1e-2)
PanZezhongQY's avatar
PanZezhongQY committed
147
    assert torch.allclose(t, ans, atol=1e-4, rtol=1e-2)
148
149
150
151
152
153
154
155
156
157
158
    if PROFILE:
        profile_operation(
            "PyTorch",
            lambda: rotary_embedding(t, posTmp, theta, torch_device),
            torch_device,
            NUM_PRERUN,
            NUM_ITERATIONS,
        )
        profile_operation(
            "    lib", lambda: lib_rope(), torch_device, NUM_PRERUN, NUM_ITERATIONS
        )
PanZezhongQY's avatar
PanZezhongQY committed
159

160
    check_error(lib.infiniopDestroyRoPEDescriptor(descriptor))
PanZezhongQY's avatar
PanZezhongQY committed
161

162

PanZezhongQY's avatar
PanZezhongQY committed
163
164
if __name__ == "__main__":
    test_cases = [
165
166
167
        # (t_shape, t_strides)
        ((1, 32, 128), None),
        ((1, 32, 64), None),
PanZezhongQY's avatar
PanZezhongQY committed
168
169
        # 昇腾暂不满足这个用例,最后一维度 <=32 会有问题,可能与其核心
        # 接口 GatherMask 的内部实现相关,目前 48 64 128 都可以支持
170
171
172
        ((4, 1, 32), None),
        ((1, 32, 128), None),
        ((3, 32, 128), (8000, 200, 1)),
PanZezhongQY's avatar
PanZezhongQY committed
173
    ]
174
    test_dtypes = [torch.float16]
PanZezhongQY's avatar
PanZezhongQY committed
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
    args = get_args()
    lib = open_lib()
    lib.infiniopCreateRoPEDescriptor.restype = c_int32
    lib.infiniopCreateRoPEDescriptor.argtypes = [
        infiniopHandle_t,
        POINTER(infiniopRoPEDescriptor_t),
        infiniopTensorDescriptor_t,
        infiniopTensorDescriptor_t,
        infiniopTensorDescriptor_t,
        infiniopTensorDescriptor_t,
    ]
    lib.infiniopGetRoPEWorkspaceSize.restype = c_int32
    lib.infiniopGetRoPEWorkspaceSize.argtypes = [
        infiniopRoPEDescriptor_t,
        POINTER(c_uint64),
    ]
    lib.infiniopRoPE.restype = c_int32
    lib.infiniopRoPE.argtypes = [
        infiniopRoPEDescriptor_t,
        c_void_p,
        c_uint64,
        c_void_p,
        c_void_p,
        c_void_p,
        c_void_p,
        c_void_p,
    ]
    lib.infiniopDestroyRoPEDescriptor.restype = c_int32
    lib.infiniopDestroyRoPEDescriptor.argtypes = [
        infiniopRoPEDescriptor_t,
    ]
206
207
208
209
210
211
212
213
214
    # Configure testing options
    DEBUG = args.debug
    PROFILE = args.profile
    NUM_PRERUN = args.num_prerun
    NUM_ITERATIONS = args.num_iterations

    # Execute tests
    for device in get_test_devices(args):
        test_operator(lib, device, test, test_cases, test_dtypes)
PanZezhongQY's avatar
PanZezhongQY committed
215
    print("\033[92mTest passed!\033[0m")