losses.py 8.89 KB
Newer Older
Kai Chen's avatar
Kai Chen committed
1
# TODO merge naive and weighted loss.
2
import numpy as np
pangjm's avatar
pangjm committed
3
4
5
import torch
import torch.nn.functional as F

6
from ..bbox import bbox_overlaps
Cao Yuhang's avatar
Cao Yuhang committed
7
8
from ...ops import sigmoid_focal_loss

pangjm's avatar
pangjm committed
9

Kai Chen's avatar
Kai Chen committed
10
11
12
13
14
def weighted_nll_loss(pred, label, weight, avg_factor=None):
    if avg_factor is None:
        avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
    raw = F.nll_loss(pred, label, reduction='none')
    return torch.sum(raw * weight)[None] / avg_factor
pangjm's avatar
pangjm committed
15
16


17
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True):
Kai Chen's avatar
Kai Chen committed
18
19
20
    if avg_factor is None:
        avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
    raw = F.cross_entropy(pred, label, reduction='none')
yhcao6's avatar
rename  
yhcao6 committed
21
    if reduce:
yhcao6's avatar
yhcao6 committed
22
        return torch.sum(raw * weight)[None] / avg_factor
yhcao6's avatar
yhcao6 committed
23
    else:
yhcao6's avatar
yhcao6 committed
24
        return raw * weight / avg_factor
pangjm's avatar
pangjm committed
25
26


Kai Chen's avatar
Kai Chen committed
27
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None):
28
29
    if pred.dim() != label.dim():
        label, weight = _expand_binary_labels(label, weight, pred.size(-1))
Kai Chen's avatar
Kai Chen committed
30
31
    if avg_factor is None:
        avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
pangjm's avatar
pangjm committed
32
33
    return F.binary_cross_entropy_with_logits(
        pred, label.float(), weight.float(),
Kai Chen's avatar
Kai Chen committed
34
        reduction='sum')[None] / avg_factor
pangjm's avatar
pangjm committed
35
36


Cao Yuhang's avatar
Cao Yuhang committed
37
38
39
40
41
42
def py_sigmoid_focal_loss(pred,
                          target,
                          weight,
                          gamma=2.0,
                          alpha=0.25,
                          reduction='mean'):
pangjm's avatar
pangjm committed
43
    pred_sigmoid = pred.sigmoid()
44
    target = target.type_as(pred)
pangjm's avatar
pangjm committed
45
46
47
    pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
    weight = (alpha * target + (1 - alpha) * (1 - target)) * weight
    weight = weight * pt.pow(gamma)
48
49
    loss = F.binary_cross_entropy_with_logits(
        pred, target, reduction='none') * weight
Kai Chen's avatar
Kai Chen committed
50
51
52
53
54
55
56
57
    reduction_enum = F._Reduction.get_enum(reduction)
    # none: 0, mean:1, sum: 2
    if reduction_enum == 0:
        return loss
    elif reduction_enum == 1:
        return loss.mean()
    elif reduction_enum == 2:
        return loss.sum()
pangjm's avatar
pangjm committed
58
59
60
61
62
63
64


def weighted_sigmoid_focal_loss(pred,
                                target,
                                weight,
                                gamma=2.0,
                                alpha=0.25,
Kai Chen's avatar
Kai Chen committed
65
                                avg_factor=None,
pangjm's avatar
pangjm committed
66
                                num_classes=80):
Kai Chen's avatar
Kai Chen committed
67
68
    if avg_factor is None:
        avg_factor = torch.sum(weight > 0).float().item() / num_classes + 1e-6
Cao Yuhang's avatar
Cao Yuhang committed
69
    return torch.sum(
70
71
        sigmoid_focal_loss(pred, target, gamma, alpha, 'none') *
        weight.view(-1, 1))[None] / avg_factor
pangjm's avatar
pangjm committed
72
73
74
75
76
77


def mask_cross_entropy(pred, target, label):
    num_rois = pred.size()[0]
    inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
    pred_slice = pred[inds, label].squeeze(1)
78
79
    return F.binary_cross_entropy_with_logits(
        pred_slice, target, reduction='mean')[None]
pangjm's avatar
pangjm committed
80
81


Kai Chen's avatar
Kai Chen committed
82
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):
pangjm's avatar
pangjm committed
83
84
85
86
87
    assert beta > 0
    assert pred.size() == target.size() and target.numel() > 0
    diff = torch.abs(pred - target)
    loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
                       diff - 0.5 * beta)
Kai Chen's avatar
Kai Chen committed
88
89
90
    reduction_enum = F._Reduction.get_enum(reduction)
    # none: 0, mean:1, sum: 2
    if reduction_enum == 0:
Kai Chen's avatar
Kai Chen committed
91
        return loss
Kai Chen's avatar
Kai Chen committed
92
    elif reduction_enum == 1:
Kai Chen's avatar
Kai Chen committed
93
        return loss.sum() / pred.numel()
Kai Chen's avatar
Kai Chen committed
94
    elif reduction_enum == 2:
Kai Chen's avatar
Kai Chen committed
95
96
97
98
99
100
101
102
        return loss.sum()


def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):
    if avg_factor is None:
        avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
    loss = smooth_l1_loss(pred, target, beta, reduction='none')
    return torch.sum(loss * weight)[None] / avg_factor
pangjm's avatar
pangjm committed
103
104


105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def balanced_l1_loss(pred,
                     target,
                     beta=1.0,
                     alpha=0.5,
                     gamma=1.5,
                     reduction='none'):
    assert beta > 0
    assert pred.size() == target.size() and target.numel() > 0

    diff = torch.abs(pred - target)
    b = np.e**(gamma / alpha) - 1
    loss = torch.where(
        diff < beta, alpha / b *
        (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
        gamma * diff + gamma / b - alpha * beta)

    reduction_enum = F._Reduction.get_enum(reduction)
    # none: 0, elementwise_mean:1, sum: 2
    if reduction_enum == 0:
        return loss
    elif reduction_enum == 1:
        return loss.sum() / pred.numel()
    elif reduction_enum == 2:
        return loss.sum()

    return loss


def weighted_balanced_l1_loss(pred,
                              target,
                              weight,
                              beta=1.0,
                              alpha=0.5,
                              gamma=1.5,
                              avg_factor=None):
    if avg_factor is None:
        avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
    loss = balanced_l1_loss(pred, target, beta, alpha, gamma, reduction='none')
    return torch.sum(loss * weight)[None] / avg_factor


146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3, reduction='mean'):
    """Improving Object Localization with Fitness NMS and Bounded IoU Loss,
    https://arxiv.org/abs/1711.00164.

    Args:
        pred (tensor): Predicted bboxes.
        target (tensor): Target bboxes.
        beta (float): beta parameter in smoothl1.
        eps (float): eps to avoid NaN.
        reduction (str): Reduction type.
    """
    pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
    pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
    pred_w = pred[:, 2] - pred[:, 0] + 1
    pred_h = pred[:, 3] - pred[:, 1] + 1
    with torch.no_grad():
        target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
        target_ctry = (target[:, 1] + target[:, 3]) * 0.5
        target_w = target[:, 2] - target[:, 0] + 1
        target_h = target[:, 3] - target[:, 1] + 1

    dx = target_ctrx - pred_ctrx
    dy = target_ctry - pred_ctry

    loss_dx = 1 - torch.max(
        (target_w - 2 * dx.abs()) /
        (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
    loss_dy = 1 - torch.max(
        (target_h - 2 * dy.abs()) /
        (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
    loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
                            (target_w + eps))
    loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
                            (target_h + eps))
    loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
                            dim=-1).view(loss_dx.size(0), -1)

    loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
                       loss_comb - 0.5 * beta)
    reduction_enum = F._Reduction.get_enum(reduction)
    # none: 0, mean:1, sum: 2
    if reduction_enum == 0:
        return loss
    elif reduction_enum == 1:
        return loss.sum() / pred.numel()
    elif reduction_enum == 2:
        return loss.sum()


def weighted_iou_loss(pred,
                      target,
                      weight,
                      style='naive',
                      beta=0.2,
                      eps=1e-3,
                      avg_factor=None):
    if style not in ['bounded', 'naive']:
        raise ValueError('Only support bounded iou loss and naive iou loss.')
    inds = torch.nonzero(weight[:, 0] > 0)
    if avg_factor is None:
        avg_factor = inds.numel() + 1e-6

    if inds.numel() > 0:
        inds = inds.squeeze(1)
    else:
        return (pred * weight).sum()[None] / avg_factor

    if style == 'bounded':
214
215
        loss = bounded_iou_loss(
            pred[inds], target[inds], beta=beta, eps=eps, reduction='sum')
216
    else:
217
        loss = iou_loss(pred[inds], target[inds], eps=eps, reduction='sum')
218
219
220
221
    loss = loss[None] / avg_factor
    return loss


pangjm's avatar
pangjm committed
222
223
224
225
def accuracy(pred, target, topk=1):
    if isinstance(topk, int):
        topk = (topk, )
        return_single = True
Kai Chen's avatar
Kai Chen committed
226
227
    else:
        return_single = False
pangjm's avatar
pangjm committed
228
229
230
231
232
233
234
235
236
237
238

    maxk = max(topk)
    _, pred_label = pred.topk(maxk, 1, True, True)
    pred_label = pred_label.t()
    correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
        res.append(correct_k.mul_(100.0 / pred.size(0)))
    return res[0] if return_single else res
239
240
241
242
243
244
245


def _expand_binary_labels(labels, label_weights, label_channels):
    bin_labels = labels.new_full((labels.size(0), label_channels), 0)
    inds = torch.nonzero(labels >= 1).squeeze()
    if inds.numel() > 0:
        bin_labels[inds, labels[inds] - 1] = 1
246
247
    bin_label_weights = label_weights.view(-1, 1).expand(
        label_weights.size(0), label_channels)
248
    return bin_labels, bin_label_weights
249
250


251
252
def iou_loss(pred_bboxes, target_bboxes, eps=1e-6, reduction='mean'):
    ious = bbox_overlaps(pred_bboxes, target_bboxes, is_aligned=True) + eps
253
254
255
256
257
258
259
260
261
    loss = -ious.log()

    reduction_enum = F._Reduction.get_enum(reduction)
    if reduction_enum == 0:
        return loss
    elif reduction_enum == 1:
        return loss.mean()
    elif reduction_enum == 2:
        return loss.sum()