cross_entropy.py 3.06 KB
Newer Older
1
2
# Copyright (c) 2023, Tri Dao.

3
4
5
import torch
import torch.nn as nn

6
from flash_attn.ops.triton.cross_entropy import cross_entropy_loss
7
8
9


class CrossEntropyLoss(nn.Module):
Tri Dao's avatar
Tri Dao committed
10
11
12
13
14
    def __init__(
        self,
        ignore_index=-100,
        reduction="mean",
        label_smoothing=0.0,
15
        logit_scale=1.0,
16
        lse_square_scale=0.0,
Tri Dao's avatar
Tri Dao committed
17
18
        inplace_backward=False,
        process_group=None,
19
        return_z_loss=False,
Tri Dao's avatar
Tri Dao committed
20
    ):
21
22
23
24
25
26
27
28
29
        """
        Arguments:
            ignored_index: int. If labels == ignored_index, the loss is set to 0.0.
            label_smoothing: float
            lse_square_scale: float. If > 0, we add lse_square_scale * lse(logits) ^ 2 to the loss.
                This is also referred to as "z-loss".
            inplace_backward: bool. If True, we do the backward pass in-place by modifying the logits.
                This saves memory.
            process_group: if not None, we're doing Tensor Parallel: each process is responsible for
30
31
32
33
                one part of the vocab. The loss will be aggregated across processes.
            return_z_loss: bool. If True, we return the component of the loss contributed by
                the lse_square_scale value. This value is only for logging and does not support
                backprop.
34
        """
35
        super().__init__()
36
37
        if reduction not in ["mean", "none", "sum"]:
            raise NotImplementedError("Only support reduction = 'mean' or 'none' or 'sum'")
38
39
40
        self.ignore_index = ignore_index
        self.reduction = reduction
        self.label_smoothing = label_smoothing
41
        self.logit_scale = logit_scale
42
        self.lse_square_scale = lse_square_scale
43
        self.inplace_backward = inplace_backward
44
        self.process_group = process_group
45
        self.return_z_loss = return_z_loss
46

47
    def forward(self, input, target):
48
49
50
51
52
53
        """
        Arguments:
            input: (batch, vocab_size)
            target: (batch,)
        Returns:
            losses: (batch,) if reduction is 'none', else (1,), dtype float
54
            z_loss: (batch,) if reduction is 'none', else (1,), dtype float (if self.return_z_loss)
55
56
        """
        assert input.is_cuda and target.is_cuda, "Only support CUDA tensors"
57
        loss, z_loss = cross_entropy_loss(
Tri Dao's avatar
Tri Dao committed
58
59
            input,
            target,
60
            label_smoothing=self.label_smoothing,
61
            logit_scale=self.logit_scale,
62
63
64
65
            lse_square_scale=self.lse_square_scale,
            ignored_index=self.ignore_index,
            inplace_backward=self.inplace_backward,
            process_group=self.process_group,
66
        )
Tri Dao's avatar
Tri Dao committed
67
        if self.reduction == "mean":
68
            loss = loss.sum() / (target != self.ignore_index).sum()
69
        elif self.reduction == "sum":
70
            loss = loss.sum()
71
        else:
72
73
74
            loss = loss

        if not self.return_z_loss:
75
            return loss
76
77
78
79
80
81
82
83
84

        if self.reduction == "mean":
            z_loss = z_loss.sum() / (target != self.ignore_index).sum()
        elif self.reduction == "sum":
            z_loss = z_loss.sum()
        else:
            z_loss = z_loss

        return loss, z_loss