test_cross_entropy.py 3.26 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import logging
from typing import Tuple

import torch
import torch.nn.functional as F
from torch.testing._internal import common_utils

logging.getLogger("torch").setLevel(logging.WARNING)

from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from apex.transformer.tensor_parallel import cross_entropy
from apex.transformer.testing.commons import set_random_seed, IdentityLayer
from apex.transformer.testing.distributed_test_base import DistributedTestBase

logging.getLogger("apex").setLevel(logging.WARNING)


def torch_cross_entropy(
    batch_size: int, seq_length: int, vocab_size: int, logits_scale: float, seed: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
    set_random_seed(seed)
    identity = IdentityLayer(
        (batch_size, seq_length, vocab_size), scale=logits_scale
    ).cuda()
    logits = identity()
    target = torch.cuda.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size)
    loss = (
        F.cross_entropy(
            logits.view(-1, logits.size()[-1]), target.view(-1), reduction="none"
        )
        .view_as(target)
        .mean()
    )
    loss.backward()
    return loss, identity.weight.grad


def tensor_sharded_cross_entropy(
    batch_size, seq_length, vocab_size, logits_scale, seed
):
    set_random_seed(seed)
    identity = IdentityLayer(
        (batch_size, seq_length, vocab_size), scale=logits_scale
    ).cuda()
    logits = identity()
    logits_parallel = tensor_parallel.scatter_to_tensor_model_parallel_region(logits)
    target = torch.cuda.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size)
    logits_parallel_ = logits_parallel.clone().detach()
    loss = cross_entropy.vocab_parallel_cross_entropy(logits_parallel, target).mean()
    loss.backward()
    # check for mutation
    assert torch.equal(logits_parallel_, logits_parallel)
    return loss, identity.weight.grad


class VocabParallelCrossEntropy(DistributedTestBase):
    def test_cross_entropy(self):
        batch_size, sequence_length, vocab_size_per_partition = 13, 17, 11
        logits_scale = 1000.0
        seed = 1234
        for tensor_model_parallel_world_size in range(1, self.world_size + 1):
            if self.world_size % tensor_model_parallel_world_size:
                continue
            with self.subTest(
                tensor_model_parallel_world_size=tensor_model_parallel_world_size
            ):
                parallel_state.initialize_model_parallel(
                    tensor_model_parallel_size_=tensor_model_parallel_world_size,
                )
                vocab_size = vocab_size_per_partition * tensor_model_parallel_world_size
                loss_torch, grad_torch = torch_cross_entropy(
                    batch_size, sequence_length, vocab_size, logits_scale, seed
                )
                (
                    loss_tensor_parallel,
                    grad_tensor_parallel,
                ) = tensor_sharded_cross_entropy(
                    batch_size, sequence_length, vocab_size, logits_scale, seed
                )

                torch.testing.assert_close(loss_torch, loss_tensor_parallel)
                torch.testing.assert_close(grad_torch, grad_tensor_parallel)

                parallel_state.destroy_model_parallel()


if __name__ == "__main__":
    common_utils.run_tests()