__init__.pyi 3.48 KB
Newer Older
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
1
2
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.

3
from typing import Any, List, Union, Optional, Sequence
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
4
from torch import Tensor
Tom Birch's avatar
Tom Birch committed
5
import datetime
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
6

Tom Birch's avatar
Tom Birch committed
7
from . import rpc as rpc
8
from . import distributed_c10d as distributed_c10d
Tom Birch's avatar
Tom Birch committed
9

10
11
12
13
class Backend:
    GLOO: str
    MPI: str
    NCCL: str
Tom Birch's avatar
Tom Birch committed
14
15
16
17

class ProcessGroup:
    def size(self) -> int: ...
    def rank(self) -> int: ...
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
18

19
20
21
class Work:
    def wait(self) -> None: ...

Tom Birch's avatar
Tom Birch committed
22
23
24
25
26
27
28
29
30
31
32
class ReduceOp:
    SUM: ReduceOp
    PRODUCT: ReduceOp
    MIN: ReduceOp
    MAX: ReduceOp
    BAND: ReduceOp
    BOR: ReduceOp
    BXOR: ReduceOp

def get_rank(group: Any = None) -> int: ...
def get_world_size(group: Any = None) -> int: ...
33
def get_backend(group: Optional[Any] = None) -> Any: ...
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def broadcast(tensor: Tensor, src: Any, group: Optional[Any] = None, async_op: Any = False): ...
def gather(
    tensor: Tensor,
    gather_list: Optional[List[Tensor]],
    dst: Any,
    group: Optional[ProcessGroup] = None,
    async_op: Optional[bool] = False,
): ...
def reduce(
    tensor: Tensor,
    dst: Any,
    op: Optional[Any] = ReduceOp.SUM,
    group: Optional[ProcessGroup] = None,
    async_op: Optional[bool] = False,
): ...
def broadcast_object_list(object_list: List[Any], src: int, group: Optional[ProcessGroup] = None): ...
def is_available() -> bool: ...
Tom Birch's avatar
Tom Birch committed
51
def is_initialized() -> bool: ...
52
def is_nccl_available() -> bool: ...
Tom Birch's avatar
Tom Birch committed
53

54
def init_process_group(backend: Union[str, Backend], init_method: Optional[str] = None, timeout: datetime.timedelta = datetime.timedelta(0, 1800), rank: Optional[int] = None, world_size: Optional[int] = None): ...
55
def new_group(ranks: Optional[Sequence[int]] = None,
56
57
              timeout: Optional[datetime.timedelta] = datetime.timedelta(0, 1800),
              backend: Optional[Union[str, Backend]] = None): ...
Tom Birch's avatar
Tom Birch committed
58

59
60
def all_to_all(output: List[Tensor], input: List[Tensor], group:Optional[ProcessGroup] = None, async_op: bool = False): ...
def all_to_all_single(output: Tensor, input: Tensor, output_split_size: Optional[List[int]] = None, input_split_size: Optional[List[int]] = None, group:Optional[ProcessGroup] = None, async_op: bool = False): ...
Tom Birch's avatar
Tom Birch committed
61
62
def all_reduce(tensor: Tensor, op: ReduceOp = ReduceOp.SUM, group:Optional[ProcessGroup] = None, async_op: bool = False): ...
def all_gather(tensor_list: List[Tensor], tensor: Tensor, group:Optional[ProcessGroup] = None, async_op: bool = False): ...
63
def reduce_scatter(tensor: Tensor, input_list: List[Tensor], op: ReduceOp = ReduceOp.SUM, group:Optional[ProcessGroup] = None, async_op: bool = False): ...
64
65
66
# These two functions takes flatten tensors directly, avoiding internal buffer allocations overheads.
def _all_gather_base(input_tensor: Tensor, output_tensor: Tensor, group:Optional[ProcessGroup] = None): ...
def _reduce_scatter_base(output_tensor: Tensor, input_tensor: Tensor, group:Optional[ProcessGroup] = None): ...
Tom Birch's avatar
Tom Birch committed
67

68
def destroy_process_group() -> None: ...
Tom Birch's avatar
Tom Birch committed
69
70
def send(tensor: Tensor, dst: int, group: Optional[ProcessGroup] = None, tag: Optional[int] = None) -> None: ...
def isend(tensor: Tensor, dst: int, group: Optional[ProcessGroup] = None, tag: Optional[int] = None) -> None: ...
71
72
73
74
75
76
77
def recv(
    tensor: Tensor, src: Optional[int] = None, group: Optional[ProcessGroup] = None, tag: Optional[int] = None
) -> int: ...
def irecv(
    tensor: Tensor, src: Optional[int] = None, group: Optional[ProcessGroup] = None, tag: Optional[int] = None
) -> int: ...
def _broadcast_coalesced(process_group: ProcessGroup, tensors: List[Tensor], buffer_size: int) -> None: ...
Tom Birch's avatar
Tom Birch committed
78

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
79
80
class group(object):
    WORLD: Any
Tom Birch's avatar
Tom Birch committed
81
82

class RRef: ...