# Copyright (c) DP Technology. # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from unicore import metrics from unicore.losses import UnicoreLoss, register_loss @register_loss("cross_entropy") class CrossEntropyLoss(UnicoreLoss): def __init__(self, task): super().__init__(task) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) loss = self.compute_loss(model, net_output, sample, reduce=reduce) sample_size = sample["target"].size(0) logging_output = { "loss": loss.data, "bsz": sample["target"].size(0), "sample_size": sample_size, } return loss, sample_size, logging_output def compute_loss(self, model, net_output, sample, reduce=True): lprobs = F.log_softmax(net_output.float(), dim=-1) lprobs = lprobs.view(-1, lprobs.size(-1)) target = sample['target'].view(-1) loss = F.nll_loss( lprobs, target, reduction="sum" if reduce else "none", ) return loss @staticmethod def reduce_metrics(logging_outputs, split='valid') -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) # we divide by log(2) to convert the loss from base e to base 2 metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) @staticmethod def logging_outputs_can_be_summed(is_train) -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True