basic_loss.py 4.77 KB
Newer Older
littletomatodonkey's avatar
littletomatodonkey committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.

import paddle
import paddle.nn as nn
import paddle.nn.functional as F

from paddle.nn import L1Loss
from paddle.nn import MSELoss as L2Loss
from paddle.nn import SmoothL1Loss


class CELoss(nn.Layer):
littletomatodonkey's avatar
littletomatodonkey committed
25
    def __init__(self, epsilon=None):
littletomatodonkey's avatar
littletomatodonkey committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
        super().__init__()
        if epsilon is not None and (epsilon <= 0 or epsilon >= 1):
            epsilon = None
        self.epsilon = epsilon

    def _labelsmoothing(self, target, class_num):
        if target.shape[-1] != class_num:
            one_hot_target = F.one_hot(target, class_num)
        else:
            one_hot_target = target
        soft_target = F.label_smooth(one_hot_target, epsilon=self.epsilon)
        soft_target = paddle.reshape(soft_target, shape=[-1, class_num])
        return soft_target

    def forward(self, x, label):
        loss_dict = {}
        if self.epsilon is not None:
            class_num = x.shape[-1]
            label = self._labelsmoothing(label, class_num)
            x = -F.log_softmax(x, axis=-1)
            loss = paddle.sum(x * label, axis=-1)
        else:
            if label.shape[-1] == x.shape[-1]:
                label = F.softmax(label, axis=-1)
                soft_label = True
            else:
                soft_label = False
            loss = F.cross_entropy(x, label=label, soft_label=soft_label)
littletomatodonkey's avatar
littletomatodonkey committed
54
        return loss
littletomatodonkey's avatar
littletomatodonkey committed
55
56


LDOUBLEV's avatar
LDOUBLEV committed
57
58
class KLJSLoss(object):
    def __init__(self, mode='kl'):
59
60
        assert mode in ['kl', 'js', 'KL', 'JS'
                        ], "mode can only be one of ['kl', 'js', 'KL', 'JS']"
LDOUBLEV's avatar
LDOUBLEV committed
61
62
63
64
        self.mode = mode

    def __call__(self, p1, p2, reduction="mean"):

65
        loss = paddle.multiply(p2, paddle.log((p2 + 1e-5) / (p1 + 1e-5) + 1e-5))
LDOUBLEV's avatar
LDOUBLEV committed
66
67

        if self.mode.lower() == "js":
68
69
            loss += paddle.multiply(
                p1, paddle.log((p1 + 1e-5) / (p2 + 1e-5) + 1e-5))
LDOUBLEV's avatar
LDOUBLEV committed
70
71
            loss *= 0.5
        if reduction == "mean":
72
73
74
            loss = paddle.mean(loss, axis=[1, 2])
        elif reduction == "none" or reduction is None:
            return loss
LDOUBLEV's avatar
LDOUBLEV committed
75
        else:
76
77
78
            loss = paddle.sum(loss, axis=[1, 2])

        return loss
LDOUBLEV's avatar
LDOUBLEV committed
79
80


littletomatodonkey's avatar
littletomatodonkey committed
81
82
83
84
85
class DMLLoss(nn.Layer):
    """
    DMLLoss
    """

86
    def __init__(self, act=None, use_log=False):
littletomatodonkey's avatar
littletomatodonkey committed
87
        super().__init__()
88
89
90
91
92
93
94
95
        if act is not None:
            assert act in ["softmax", "sigmoid"]
        if act == "softmax":
            self.act = nn.Softmax(axis=-1)
        elif act == "sigmoid":
            self.act = nn.Sigmoid()
        else:
            self.act = None
96
97

        self.use_log = use_log
LDOUBLEV's avatar
LDOUBLEV committed
98
        self.jskl_loss = KLJSLoss(mode="js")
littletomatodonkey's avatar
littletomatodonkey committed
99

100
101
102
103
104
105
106
    def _kldiv(self, x, target):
        eps = 1.0e-10
        loss = target * (paddle.log(target + eps) - x)
        # batch mean loss
        loss = paddle.sum(loss) / loss.shape[0]
        return loss

littletomatodonkey's avatar
littletomatodonkey committed
107
    def forward(self, out1, out2):
108
        if self.act is not None:
andyjpaddle's avatar
andyjpaddle committed
109
110
            out1 = self.act(out1) + 1e-10
            out2 = self.act(out2) + 1e-10
111
112
        if self.use_log:
            # for recognition distillation, log is needed for feature map
LDOUBLEV's avatar
LDOUBLEV committed
113
114
            log_out1 = paddle.log(out1)
            log_out2 = paddle.log(out2)
115
116
            loss = (
                self._kldiv(log_out1, out2) + self._kldiv(log_out2, out1)) / 2.0
LDOUBLEV's avatar
LDOUBLEV committed
117
        else:
118
            # for detection distillation log is not needed
LDOUBLEV's avatar
LDOUBLEV committed
119
            loss = self.jskl_loss(out1, out2)
littletomatodonkey's avatar
littletomatodonkey committed
120
        return loss
littletomatodonkey's avatar
littletomatodonkey committed
121
122
123
124
125
126
127
128


class DistanceLoss(nn.Layer):
    """
    DistanceLoss:
        mode: loss mode
    """

littletomatodonkey's avatar
littletomatodonkey committed
129
    def __init__(self, mode="l2", **kargs):
130
        super().__init__()
littletomatodonkey's avatar
littletomatodonkey committed
131
132
133
        assert mode in ["l1", "l2", "smooth_l1"]
        if mode == "l1":
            self.loss_func = nn.L1Loss(**kargs)
134
        elif mode == "l2":
littletomatodonkey's avatar
littletomatodonkey committed
135
136
137
138
139
            self.loss_func = nn.MSELoss(**kargs)
        elif mode == "smooth_l1":
            self.loss_func = nn.SmoothL1Loss(**kargs)

    def forward(self, x, y):
littletomatodonkey's avatar
littletomatodonkey committed
140
        return self.loss_func(x, y)
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155


class LossFromOutput(nn.Layer):
    def __init__(self, key='loss', reduction='none'):
        super().__init__()
        self.key = key
        self.reduction = reduction

    def forward(self, predicts, batch):
        loss = predicts[self.key]
        if self.reduction == 'mean':
            loss = paddle.mean(loss)
        elif self.reduction == 'sum':
            loss = paddle.sum(loss)
        return {'loss': loss}