customize.py 5.33 KB
Newer Older
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
1
2
3
4
5
6
7
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
Hang Zhang's avatar
sync BN  
Hang Zhang committed
8
## LICENSE file in the root directory of this source tree
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
9
10
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Hang Zhang's avatar
sync BN  
Hang Zhang committed
11
"""Encoding Custermized NN Module"""
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
12
import torch
Zhang's avatar
Zhang committed
13
from torch.nn import Module, Sequential, Conv2d, ReLU, AdaptiveAvgPool2d, \
Zhang's avatar
v0.4.2  
Zhang committed
14
    NLLLoss, BCELoss, CrossEntropyLoss, AvgPool2d, MaxPool2d, Parameter
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
15
from torch.nn import functional as F
Zhang's avatar
v0.4.2  
Zhang committed
16
from torch.autograd import Variable
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
17
18
from .syncbn import BatchNorm2d

Zhang's avatar
v0.4.2  
Zhang committed
19
20
torch_ver = torch.__version__[:3]

Zhang's avatar
Zhang committed
21
__all__ = ['GramMatrix', 'SegmentationLosses', 'View', 'Sum', 'Mean',
Zhang's avatar
v0.4.2  
Zhang committed
22
           'Normalize']
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
23

Hang Zhang's avatar
path  
Hang Zhang committed
24
25
class GramMatrix(Module):
    r""" Gram Matrix for a 4D convolutional featuremaps as a mini-batch
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
26
27

    .. math::
Hang Zhang's avatar
path  
Hang Zhang committed
28
        \mathcal{G} = \sum_{h=1}^{H_i}\sum_{w=1}^{W_i} \mathcal{F}_{h,w}\mathcal{F}_{h,w}^T
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
29
    """
Hang Zhang's avatar
path  
Hang Zhang committed
30
31
32
33
34
35
    def forward(self, y):
        (b, ch, h, w) = y.size()
        features = y.view(b, ch, w * h)
        features_t = features.transpose(1, 2)
        gram = features.bmm(features_t) / (ch * h * w)
        return gram
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
36

Zhang's avatar
Zhang committed
37
38
39
40
41
42
def softmax_crossentropy(input, target, weight, size_average, ignore_index, reduce=True):
    return F.nll_loss(F.log_softmax(input, 1), target, weight,
                      size_average, ignore_index, reduce)

class SegmentationLosses(CrossEntropyLoss):
    """2D Cross Entropy Loss with Auxilary Loss"""
Zhang's avatar
v0.4.2  
Zhang committed
43
44
45
    def __init__(self, se_loss=False, se_weight=0.1, nclass=-1,
                 aux=False, aux_weight=0.2, weight=None,
                 size_average=True, ignore_index=-1):
Zhang's avatar
Zhang committed
46
        super(SegmentationLosses, self).__init__(weight, size_average, ignore_index)
Zhang's avatar
v0.4.2  
Zhang committed
47
        self.se_loss = se_loss
Zhang's avatar
Zhang committed
48
        self.aux = aux
Zhang's avatar
v0.4.2  
Zhang committed
49
50
        self.nclass = nclass
        self.se_weight = se_weight
Zhang's avatar
Zhang committed
51
        self.aux_weight = aux_weight
Zhang's avatar
v0.4.2  
Zhang committed
52
        self.bceloss = BCELoss(weight, size_average) 
Zhang's avatar
Zhang committed
53
54

    def forward(self, *inputs):
Zhang's avatar
v0.4.2  
Zhang committed
55
        if not self.se_loss and not self.aux:
Zhang's avatar
Zhang committed
56
            return super(SegmentationLosses, self).forward(*inputs)
Zhang's avatar
v0.4.2  
Zhang committed
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
        elif not self.se_loss:
            pred1, pred2, target = tuple(inputs)
            loss1 = super(SegmentationLosses, self).forward(pred1, target)
            loss2 = super(SegmentationLosses, self).forward(pred2, target)
            return loss1 + self.aux_weight * loss2
        elif not self.aux:
            pred, se_pred, target = tuple(inputs)
            se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred)
            loss1 = super(SegmentationLosses, self).forward(pred, target)
            loss2 = self.bceloss(F.sigmoid(se_pred), se_target)
            return loss1 + self.se_weight * loss2
        else:
            pred1, se_pred, pred2, target = tuple(inputs)
            se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1)
            loss1 = super(SegmentationLosses, self).forward(pred1, target)
            loss2 = super(SegmentationLosses, self).forward(pred2, target)
            loss3 = self.bceloss(F.sigmoid(se_pred), se_target)
            return loss1 + self.aux_weight * loss2 + self.se_weight * loss3

    @staticmethod
    def _get_batch_label_vector(target, nclass):
        # target is a 3D Variable BxHxW, output is 2D BxnClass
        batch = target.size(0)
        tvect = Variable(torch.zeros(batch, nclass))
        for i in range(batch):
            hist = torch.histc(target[i].cpu().data.float(), 
                               bins=nclass, min=0,
                               max=nclass-1)
            vect = hist>0
            tvect[i] = vect
        return tvect
Zhang's avatar
Zhang committed
88

Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
89
90
91
92
93
94
95
96
97
98
99
100
101

class View(Module):
    """Reshape the input into different size, an inplace operator, support
    SelfParallel mode.
    """
    def __init__(self, *args):
        super(View, self).__init__()
        if len(args) == 1 and isinstance(args[0], torch.Size):
            self.size = args[0]
        else:
            self.size = torch.Size(args)

    def forward(self, input):
Hang Zhang's avatar
sync BN  
Hang Zhang committed
102
        return input.view(self.size)
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
103
104


Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
105
106
107
108
109
110
111
class Sum(Module):
    def __init__(self, dim, keep_dim=False):
        super(Sum, self).__init__()
        self.dim = dim
        self.keep_dim = keep_dim

    def forward(self, input):
Hang Zhang's avatar
sync BN  
Hang Zhang committed
112
        return input.sum(self.dim, self.keep_dim)
Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
113
114
115
116
117
118
119
120
121


class Mean(Module):
    def __init__(self, dim, keep_dim=False):
        super(Mean, self).__init__()
        self.dim = dim
        self.keep_dim = keep_dim

    def forward(self, input):
Hang Zhang's avatar
sync BN  
Hang Zhang committed
122
        return input.mean(self.dim, self.keep_dim)
Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
123
124


Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
class Normalize(Module):
    r"""Performs :math:`L_p` normalization of inputs over specified dimension.

    Does:

    .. math::
        v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}

    for each subtensor v over dimension dim of input. Each subtensor is
    flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
    norm.

    With default arguments normalizes over the second dimension with Euclidean
    norm.

    Args:
        p (float): the exponent value in the norm formulation. Default: 2
        dim (int): the dimension to reduce. Default: 1
    """
    def __init__(self, p=2, dim=1):
        super(Normalize, self).__init__()
        self.p = p
Hang Zhang's avatar
sync BN  
Hang Zhang committed
147
        self.dim = dim
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
148
149

    def forward(self, x):
Hang Zhang's avatar
sync BN  
Hang Zhang committed
150
        return F.normalize(x, self.p, self.dim, eps=1e-10)