allreduce.py 2.54 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from typing import List, Tuple

import torch

if torch.version.hip is not None:
    # ROCM custom allreduce
    def init_custom_ar(
        meta: torch.Tensor,
        rank_data: torch.Tensor,
        handles: List[str],
        offsets: List[int],
        rank: int,
        full_nvlink: bool,
    ) -> int:
15
        return torch.ops.sgl_kernel.init_custom_ar.default(
16
17
18
19
            meta, rank_data, handles, offsets, rank, full_nvlink
        )

    def all_reduce_reg(fa: int, inp: torch.Tensor, out: torch.Tensor) -> None:
20
        torch.ops.sgl_kernel.all_reduce_reg.default(fa, inp, out)
21
22
23
24

    def all_reduce_unreg(
        fa: int, inp: torch.Tensor, reg_buffer: torch.Tensor, out: torch.Tensor
    ) -> None:
25
        torch.ops.sgl_kernel.all_reduce_unreg.default(fa, inp, reg_buffer, out)
26
27

    def dispose(fa: int) -> None:
28
        torch.ops.sgl_kernel.dispose.default(fa)
29
30

    def meta_size() -> int:
31
        return torch.ops.sgl_kernel.meta_size.default()
32
33
34
35

    def register_buffer(
        fa: int, t: torch.Tensor, handles: List[str], offsets: List[int]
    ) -> None:
36
        return torch.ops.sgl_kernel.register_buffer.default(fa, t, handles, offsets)
37
38

    def get_graph_buffer_ipc_meta(fa: int) -> Tuple[torch.Tensor, List[int]]:
39
        return torch.ops.sgl_kernel.get_graph_buffer_ipc_meta.default(fa)
40
41
42
43

    def register_graph_buffers(
        fa: int, handles: List[str], offsets: List[List[int]]
    ) -> None:
44
        torch.ops.sgl_kernel.register_graph_buffers.default(fa, handles, offsets)
45
46

    def allocate_meta_buffer(size: int) -> torch.Tensor:
47
        return torch.ops.sgl_kernel.allocate_meta_buffer.default(size)
48
49

    def get_meta_buffer_ipc_handle(inp: torch.Tensor) -> torch.Tensor:
50
        return torch.ops.sgl_kernel.get_meta_buffer_ipc_handle.default(inp)
51
52
53
54
55
56

else:
    # TRTLLM custom allreduce
    def init_custom_reduce(
        rank_id, num_devices, rank_data, buffers, tmp_buffers, barrier_in, barrier_out
    ):
57
        return torch.ops.sgl_kernel.init_custom_ar.default(
58
59
60
61
62
63
64
65
66
67
            rank_id,
            num_devices,
            rank_data,
            buffers,
            tmp_buffers,
            barrier_in,
            barrier_out,
        )

    def custom_dispose(fa):
68
        torch.ops.sgl_kernel.dispose.default(fa)
69
70

    def custom_reduce(fa, inp, out):
71
        torch.ops.sgl_kernel.all_reduce.default(fa, inp, out)
72
73

    def get_graph_buffer_ipc_meta(fa):
74
        return torch.ops.sgl_kernel.get_graph_buffer_ipc_meta.default(fa)
75
76

    def register_graph_buffers(fa, handles, offsets):
77
        torch.ops.sgl_kernel.register_graph_buffers.default(fa, handles, offsets)