allreduce.py 4.45 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from typing import List, Tuple

import torch

if torch.version.hip is not None:
    # ROCM custom allreduce
    def init_custom_ar(
        meta: torch.Tensor,
        rank_data: torch.Tensor,
        handles: List[str],
        offsets: List[int],
        rank: int,
        full_nvlink: bool,
    ) -> int:
15
        return torch.ops.sgl_kernel.init_custom_ar.default(
16
17
18
19
            meta, rank_data, handles, offsets, rank, full_nvlink
        )

    def all_reduce_reg(fa: int, inp: torch.Tensor, out: torch.Tensor) -> None:
20
        torch.ops.sgl_kernel.all_reduce_reg.default(fa, inp, out)
21
22
23
24

    def all_reduce_unreg(
        fa: int, inp: torch.Tensor, reg_buffer: torch.Tensor, out: torch.Tensor
    ) -> None:
25
        torch.ops.sgl_kernel.all_reduce_unreg.default(fa, inp, reg_buffer, out)
26
27

    def dispose(fa: int) -> None:
28
        torch.ops.sgl_kernel.dispose.default(fa)
29
30

    def meta_size() -> int:
31
        return torch.ops.sgl_kernel.meta_size.default()
32
33
34
35

    def register_buffer(
        fa: int, t: torch.Tensor, handles: List[str], offsets: List[int]
    ) -> None:
36
        return torch.ops.sgl_kernel.register_buffer.default(fa, t, handles, offsets)
37
38

    def get_graph_buffer_ipc_meta(fa: int) -> Tuple[torch.Tensor, List[int]]:
39
        return torch.ops.sgl_kernel.get_graph_buffer_ipc_meta.default(fa)
40
41
42
43

    def register_graph_buffers(
        fa: int, handles: List[str], offsets: List[List[int]]
    ) -> None:
44
        torch.ops.sgl_kernel.register_graph_buffers.default(fa, handles, offsets)
45
46

    def allocate_meta_buffer(size: int) -> torch.Tensor:
47
        return torch.ops.sgl_kernel.allocate_meta_buffer.default(size)
48
49

    def get_meta_buffer_ipc_handle(inp: torch.Tensor) -> torch.Tensor:
50
        return torch.ops.sgl_kernel.get_meta_buffer_ipc_handle.default(inp)
51

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
    def mscclpp_generate_unique_id() -> bytes:
        raise NotImplementedError()

    def mscclpp_init_context(
        unique_id: bytes,
        rank: int,
        world_size: int,
        scratch: torch.Tensor,
        put_buffer: torch.Tensor,
        nranks_per_node: int,
        rank_to_node: List[int],
        rank_to_ib: List[int],
        context_selection: int,
    ) -> int:
        raise NotImplementedError()

    def mscclpp_allreduce(
        context: int, inp: torch.Tensor, out: torch.Tensor, nthreads: int, nblocks: int
    ) -> None:
        raise NotImplementedError()

73
else:
74
75
76
77

    def init_custom_ar(
        ipc_tensors: List[int], rank_data: torch.Tensor, rank: int, full_nvlink: bool
    ) -> int:
78
        return torch.ops.sgl_kernel.init_custom_ar.default(
79
            ipc_tensors, rank_data, rank, full_nvlink
80
81
        )

82
    def dispose(fa: int) -> None:
83
        torch.ops.sgl_kernel.dispose.default(fa)
84

85
86
87
88
89
90
91
92
93
94
    def all_reduce(
        fa: int,
        inp: torch.Tensor,
        out: torch.Tensor,
        reg_buffer: int,
        reg_buffer_sz_bytes: int,
    ) -> None:
        torch.ops.sgl_kernel.all_reduce.default(
            fa, inp, out, reg_buffer, reg_buffer_sz_bytes
        )
95

96
    def get_graph_buffer_ipc_meta(fa) -> Tuple[List[int], List[int]]:
97
        return torch.ops.sgl_kernel.get_graph_buffer_ipc_meta.default(fa)
98

99
100
101
102
103
104
    def register_buffer(fa: int, fake_ipc_ptrs: List[int]) -> None:
        return torch.ops.sgl_kernel.register_buffer.default(fa, fake_ipc_ptrs)

    def register_graph_buffers(
        fa: int, handles: List[List[int]], offsets: List[List[int]]
    ) -> None:
105
        torch.ops.sgl_kernel.register_graph_buffers.default(fa, handles, offsets)
106
107
108

    def meta_size() -> int:
        return torch.ops.sgl_kernel.meta_size.default()
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141

    def mscclpp_generate_unique_id() -> torch.Tensor:
        return torch.ops.sgl_kernel.mscclpp_generate_unique_id.default()

    def mscclpp_init_context(
        unique_id: torch.Tensor,
        rank: int,
        world_size: int,
        scratch: torch.Tensor,
        put_buffer: torch.Tensor,
        nranks_per_node: int,
        rank_to_node: List[int],
        rank_to_ib: List[int],
        context_selection: int,
    ) -> int:
        return torch.ops.sgl_kernel.mscclpp_init_context.default(
            unique_id,
            rank,
            world_size,
            scratch,
            put_buffer,
            nranks_per_node,
            rank_to_node,
            rank_to_ib,
            context_selection,
        )

    def mscclpp_allreduce(
        context: int, inp: torch.Tensor, out: torch.Tensor, nthreads: int, nblocks: int
    ) -> None:
        torch.ops.sgl_kernel.mscclpp_allreduce.default(
            context, inp, out, nthreads, nblocks
        )