norm.py 5.49 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
zhangwenwei's avatar
zhangwenwei committed
2
import torch
3
from mmengine.registry import MODELS
4
from torch import Tensor
zhangwenwei's avatar
zhangwenwei committed
5
6
from torch import distributed as dist
from torch import nn as nn
zhangwenwei's avatar
zhangwenwei committed
7
from torch.autograd.function import Function
zhangwenwei's avatar
zhangwenwei committed
8

zhangwenwei's avatar
zhangwenwei committed
9
10
11
12

class AllReduce(Function):

    @staticmethod
13
    def forward(ctx, input: Tensor) -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
14
15
16
17
18
19
20
21
22
        input_list = [
            torch.zeros_like(input) for k in range(dist.get_world_size())
        ]
        # Use allgather instead of allreduce in-place operations is unreliable
        dist.all_gather(input_list, input, async_op=False)
        inputs = torch.stack(input_list, dim=0)
        return torch.sum(inputs, dim=0)

    @staticmethod
23
    def backward(ctx, grad_output: Tensor) -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
24
25
26
27
        dist.all_reduce(grad_output, async_op=False)
        return grad_output


28
@MODELS.register_module('naiveSyncBN1d')
zhangwenwei's avatar
zhangwenwei committed
29
class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
30
    """Synchronized Batch Normalization for 3D Tensors.
zhangwenwei's avatar
zhangwenwei committed
31
32
33
34
35
36
37
38
39
40

    Note:
        This implementation is modified from
        https://github.com/facebookresearch/detectron2/

        `torch.nn.SyncBatchNorm` has known unknown bugs.
        It produces significantly worse AP (and sometimes goes NaN)
        when the batch size on each worker is quite different
        (e.g., when scale augmentation is used).
        In 3D detection, different workers has points of different shapes,
41
        which also cause instability.
zhangwenwei's avatar
zhangwenwei committed
42
43
44
45
46

        Use this implementation before `nn.SyncBatchNorm` is fixed.
        It is slower than `nn.SyncBatchNorm`.
    """

47
48
    def __init__(self, *args: list, **kwargs: dict) -> None:
        super(NaiveSyncBatchNorm1d, self).__init__(*args, **kwargs)
49

50
    def forward(self, input: Tensor) -> Tensor:
51
52
        """
        Args:
53
            input (Tensor): Has shape (N, C) or (N, C, L), where N is
54
55
56
57
                the batch size, C is the number of features or
                channels, and L is the sequence length

        Returns:
58
            Tensor: Has shape (N, C) or (N, C, L), same shape as input.
59
        """
60
61
        assert input.dtype == torch.float32, \
            f'input should be in float32 type, got {input.dtype}'
62
63
64
        using_dist = dist.is_available() and dist.is_initialized()
        if (not using_dist) or dist.get_world_size() == 1 \
                or not self.training:
zhangwenwei's avatar
zhangwenwei committed
65
66
            return super().forward(input)
        assert input.shape[0] > 0, 'SyncBN does not support empty inputs'
67
68
69
70
        is_two_dim = input.dim() == 2
        if is_two_dim:
            input = input.unsqueeze(2)

zhangwenwei's avatar
zhangwenwei committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
        C = input.shape[1]
        mean = torch.mean(input, dim=[0, 2])
        meansqr = torch.mean(input * input, dim=[0, 2])

        vec = torch.cat([mean, meansqr], dim=0)
        vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())

        mean, meansqr = torch.split(vec, C)
        var = meansqr - mean * mean
        self.running_mean += self.momentum * (
            mean.detach() - self.running_mean)
        self.running_var += self.momentum * (var.detach() - self.running_var)

        invstd = torch.rsqrt(var + self.eps)
        scale = self.weight * invstd
        bias = self.bias - mean * scale
        scale = scale.reshape(1, -1, 1)
        bias = bias.reshape(1, -1, 1)
89
90
91
92
        output = input * scale + bias
        if is_two_dim:
            output = output.squeeze(2)
        return output
zhangwenwei's avatar
zhangwenwei committed
93
94


95
@MODELS.register_module('naiveSyncBN2d')
zhangwenwei's avatar
zhangwenwei committed
96
class NaiveSyncBatchNorm2d(nn.BatchNorm2d):
97
    """Synchronized Batch Normalization for 4D Tensors.
zhangwenwei's avatar
zhangwenwei committed
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113

    Note:
        This implementation is modified from
        https://github.com/facebookresearch/detectron2/

        `torch.nn.SyncBatchNorm` has known unknown bugs.
        It produces significantly worse AP (and sometimes goes NaN)
        when the batch size on each worker is quite different
        (e.g., when scale augmentation is used).
        This phenomenon also occurs when the multi-modality feature fusion
        modules of multi-modality detectors use SyncBN.

        Use this implementation before `nn.SyncBatchNorm` is fixed.
        It is slower than `nn.SyncBatchNorm`.
    """

114
115
    def __init__(self, *args: list, **kwargs: dict) -> None:
        super(NaiveSyncBatchNorm2d, self).__init__(*args, **kwargs)
116

117
    def forward(self, input: Tensor) -> Tensor:
118
119
        """
        Args:
120
            Input (Tensor): Feature has shape (N, C, H, W).
121
122

        Returns:
123
            Tensor: Has shape (N, C, H, W), same shape as input.
124
        """
125
126
        assert input.dtype == torch.float32, \
            f'input should be in float32 type, got {input.dtype}'
127
128
129
130
        using_dist = dist.is_available() and dist.is_initialized()
        if (not using_dist) or \
                dist.get_world_size() == 1 or \
                not self.training:
zhangwenwei's avatar
zhangwenwei committed
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
            return super().forward(input)

        assert input.shape[0] > 0, 'SyncBN does not support empty inputs'
        C = input.shape[1]
        mean = torch.mean(input, dim=[0, 2, 3])
        meansqr = torch.mean(input * input, dim=[0, 2, 3])

        vec = torch.cat([mean, meansqr], dim=0)
        vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())

        mean, meansqr = torch.split(vec, C)
        var = meansqr - mean * mean
        self.running_mean += self.momentum * (
            mean.detach() - self.running_mean)
        self.running_var += self.momentum * (var.detach() - self.running_var)

        invstd = torch.rsqrt(var + self.eps)
        scale = self.weight * invstd
        bias = self.bias - mean * scale
        scale = scale.reshape(1, -1, 1, 1)
        bias = bias.reshape(1, -1, 1, 1)
        return input * scale + bias