allreduce.py 5.4 KB
Newer Older
1
from typing import List, Optional, Tuple
2
3
4
5
6
7
8
9
10
11
12
13
14

import torch

if torch.version.hip is not None:
    # ROCM custom allreduce
    def init_custom_ar(
        meta: torch.Tensor,
        rank_data: torch.Tensor,
        handles: List[str],
        offsets: List[int],
        rank: int,
        full_nvlink: bool,
    ) -> int:
15
        return torch.ops.sgl_kernel.init_custom_ar.default(
16
17
18
19
            meta, rank_data, handles, offsets, rank, full_nvlink
        )

    def all_reduce_reg(fa: int, inp: torch.Tensor, out: torch.Tensor) -> None:
20
        torch.ops.sgl_kernel.all_reduce_reg.default(fa, inp, out)
21
22
23
24

    def all_reduce_unreg(
        fa: int, inp: torch.Tensor, reg_buffer: torch.Tensor, out: torch.Tensor
    ) -> None:
25
        torch.ops.sgl_kernel.all_reduce_unreg.default(fa, inp, reg_buffer, out)
26
27

    def dispose(fa: int) -> None:
28
        torch.ops.sgl_kernel.dispose.default(fa)
29
30

    def meta_size() -> int:
31
        return torch.ops.sgl_kernel.meta_size.default()
32
33
34
35

    def register_buffer(
        fa: int, t: torch.Tensor, handles: List[str], offsets: List[int]
    ) -> None:
36
        return torch.ops.sgl_kernel.register_buffer.default(fa, t, handles, offsets)
37
38

    def get_graph_buffer_ipc_meta(fa: int) -> Tuple[torch.Tensor, List[int]]:
39
        return torch.ops.sgl_kernel.get_graph_buffer_ipc_meta.default(fa)
40
41
42
43

    def register_graph_buffers(
        fa: int, handles: List[str], offsets: List[List[int]]
    ) -> None:
44
        torch.ops.sgl_kernel.register_graph_buffers.default(fa, handles, offsets)
45
46

    def allocate_meta_buffer(size: int) -> torch.Tensor:
47
        return torch.ops.sgl_kernel.allocate_meta_buffer.default(size)
48
49

    def get_meta_buffer_ipc_handle(inp: torch.Tensor) -> torch.Tensor:
50
        return torch.ops.sgl_kernel.get_meta_buffer_ipc_handle.default(inp)
51

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
    # ROCM quick allreduce
    def init_custom_qr(
        rank: int, world_size: int, qr_max_size: Optional[int] = None
    ) -> int:
        return torch.ops.sgl_kernel.init_custom_qr.default(
            world_size, rank, qr_max_size
        )

    def qr_get_handle(fa: int) -> torch.Tensor:
        return torch.ops.sgl_kernel.qr_get_handle.default(fa)

    def qr_open_handles(fa: int, handles: list[torch.Tensor]) -> None:
        torch.ops.sgl_kernel.qr_open_handles.default(fa, handles)

    def qr_all_reduce(
        fa: int,
        profile: int,
        inp: torch.Tensor,
        out: torch.Tensor,
        cast_bf162half: bool,
    ) -> None:
        torch.ops.sgl_kernel.qr_all_reduce.default(
            fa, profile, inp, out, cast_bf162half
        )

    def qr_destroy(fa: int) -> None:
        torch.ops.sgl_kernel.qr_destroy.default(fa)

    def qr_max_size() -> int:
        return torch.ops.sgl_kernel.qr_max_size.default()

    # mscclpp
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    def mscclpp_generate_unique_id() -> bytes:
        raise NotImplementedError()

    def mscclpp_init_context(
        unique_id: bytes,
        rank: int,
        world_size: int,
        scratch: torch.Tensor,
        put_buffer: torch.Tensor,
        nranks_per_node: int,
        rank_to_node: List[int],
        rank_to_ib: List[int],
        context_selection: int,
    ) -> int:
        raise NotImplementedError()

    def mscclpp_allreduce(
        context: int, inp: torch.Tensor, out: torch.Tensor, nthreads: int, nblocks: int
    ) -> None:
        raise NotImplementedError()

105
else:
106
107
108
109

    def init_custom_ar(
        ipc_tensors: List[int], rank_data: torch.Tensor, rank: int, full_nvlink: bool
    ) -> int:
110
        return torch.ops.sgl_kernel.init_custom_ar.default(
111
            ipc_tensors, rank_data, rank, full_nvlink
112
113
        )

114
    def dispose(fa: int) -> None:
115
        torch.ops.sgl_kernel.dispose.default(fa)
116

117
118
119
120
121
122
123
124
125
126
    def all_reduce(
        fa: int,
        inp: torch.Tensor,
        out: torch.Tensor,
        reg_buffer: int,
        reg_buffer_sz_bytes: int,
    ) -> None:
        torch.ops.sgl_kernel.all_reduce.default(
            fa, inp, out, reg_buffer, reg_buffer_sz_bytes
        )
127

128
    def get_graph_buffer_ipc_meta(fa) -> Tuple[List[int], List[int]]:
129
        return torch.ops.sgl_kernel.get_graph_buffer_ipc_meta.default(fa)
130

131
132
133
134
135
136
    def register_buffer(fa: int, fake_ipc_ptrs: List[int]) -> None:
        return torch.ops.sgl_kernel.register_buffer.default(fa, fake_ipc_ptrs)

    def register_graph_buffers(
        fa: int, handles: List[List[int]], offsets: List[List[int]]
    ) -> None:
137
        torch.ops.sgl_kernel.register_graph_buffers.default(fa, handles, offsets)
138
139
140

    def meta_size() -> int:
        return torch.ops.sgl_kernel.meta_size.default()
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

    def mscclpp_generate_unique_id() -> torch.Tensor:
        return torch.ops.sgl_kernel.mscclpp_generate_unique_id.default()

    def mscclpp_init_context(
        unique_id: torch.Tensor,
        rank: int,
        world_size: int,
        scratch: torch.Tensor,
        put_buffer: torch.Tensor,
        nranks_per_node: int,
        rank_to_node: List[int],
        rank_to_ib: List[int],
        context_selection: int,
    ) -> int:
        return torch.ops.sgl_kernel.mscclpp_init_context.default(
            unique_id,
            rank,
            world_size,
            scratch,
            put_buffer,
            nranks_per_node,
            rank_to_node,
            rank_to_ib,
            context_selection,
        )

    def mscclpp_allreduce(
        context: int, inp: torch.Tensor, out: torch.Tensor, nthreads: int, nblocks: int
    ) -> None:
        torch.ops.sgl_kernel.mscclpp_allreduce.default(
            context, inp, out, nthreads, nblocks
        )