dist.py 2.76 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# Copyright (c) OpenMMLab. All rights reserved.
"""Helpers for parallel and distributed inference."""

import functools
import os

import torch
from torch.distributed import broadcast, broadcast_object_list, is_initialized


def get_local_rank():
    """Get local rank of current process.

    Assume environment variable ``LOCAL_RANK`` is properly set by some launcher.
    See: https://pytorch.org/docs/stable/elastic/run.html#environment-variables
    """  # noqa: E501

    return int(os.getenv('LOCAL_RANK', '0'))


def get_rank():
    """Get rank of current process.

    Assume environment variable ``RANK`` is properly set by some launcher.
    See: https://pytorch.org/docs/stable/elastic/run.html#environment-variables
    """  # noqa: E501

    return int(os.getenv('RANK', '0'))


def get_world_size():
    """Get rank of current process.

    Assume environment variable ``WORLD_SIZE`` is properly set by some launcher.
    See: https://pytorch.org/docs/stable/elastic/run.html#environment-variables
    """  # noqa: E501

    return int(os.getenv('WORLD_SIZE', '1'))


def master_only(func):
    """Decorator to run a function only on the master process."""

    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        if is_initialized():
            if get_rank() != 0:
                return None
        return func(*args, **kwargs)

    return wrapper


def master_only_and_broadcast_general(func):
    """Decorator to run a function only on the master process and broadcast the
    result to all processes."""

    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        if is_initialized():
            if get_rank() == 0:
                result = [func(*args, **kwargs)]
            else:
                result = [None]
            broadcast_object_list(result, src=0)
            result = result[0]
        else:
            result = func(*args, **kwargs)
        return result

    return wrapper


def master_only_and_broadcast_tensor(func):
    """Decorator to run a function only on the master process and broadcast the
    result to all processes.

    Note: Require CUDA tensor.
    Note: Not really work because we don't know the shape aforehand,
          for cpu tensors, use master_only_and_broadcast_general
    """

    @functools.wraps(func)
    def wrapper(*args, size, dtype, **kwargs):
        if is_initialized():
            if get_rank() == 0:
                result = func(*args, **kwargs)
            else:
                result = torch.empty(size=size,
                                     dtype=dtype,
                                     device=get_local_rank())
            broadcast(result, src=0)
            # print(f'rank {get_rank()} received {result}')
        else:
            result = func(*args, **kwargs)
        return result

    return wrapper