test_mlp.py 2.2 KB
Newer Older
liangjing's avatar
v1  
liangjing committed
1
2
3
4
5
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.

import pytest
import torch

liangjing's avatar
liangjing committed
6
from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec
liangjing's avatar
v1  
liangjing committed
7
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
liangjing's avatar
liangjing committed
8
from megatron.core.transformer.mlp import MLP
liangjing's avatar
v1  
liangjing committed
9
from megatron.core.transformer.transformer_config import TransformerConfig
liangjing's avatar
liangjing committed
10
11
from tests.unit_tests.test_utilities import Utils

liangjing's avatar
v1  
liangjing committed
12
13
14
15

class TestParallelMLP:

    def setup_method(self, method):
liangjing's avatar
liangjing committed
16
        Utils.initialize_model_parallel(1, 1)
liangjing's avatar
v1  
liangjing committed
17
        model_parallel_cuda_manual_seed(123)
liangjing's avatar
liangjing committed
18
19
20
21
        transformer_config = TransformerConfig(
            num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True
        )
        self.mlp = MLP(transformer_config, get_gpt_layer_local_spec().submodules.mlp.submodules)
liangjing's avatar
v1  
liangjing committed
22
23
24
25
26
27
28
29

    def teardown_method(self, method):
        Utils.destroy_model_parallel()

    def test_constructor(self):
        assert isinstance(self.mlp, MLP)

        num_weights = sum([p.numel() for p in self.mlp.parameters()])
liangjing's avatar
liangjing committed
30
        assert num_weights == 1212
liangjing's avatar
v1  
liangjing committed
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

    """
    def test_cpu_forward(self, mlp):
        # [sequence length, micro batch size, hidden size]
        hidden_states = torch.ones((32, 2, mlp.config.hidden_size))
        output, output_bias = mlp(hidden_states)
        assert output.shape[0] == 32
        assert output.shape[1] == 2
        assert output.shape[2] == mlp.config.hidden_size
        assert output_bias.shape[0] == mlp.config.hidden_size
        assert output.dtype == torch.float32
    """

    @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
    def test_gpu_forward(self):
        mlp = self.mlp
        mlp.cuda()
        # [sequence length, batch size, hidden size]
        hidden_states = torch.ones((32, 2, mlp.config.hidden_size))
        hidden_states = hidden_states.cuda()
        output, output_bias = mlp(hidden_states)
        assert output.shape[0] == 32
        assert output.shape[1] == 2
        assert output.shape[2] == mlp.config.hidden_size
        assert output_bias.shape[0] == mlp.config.hidden_size
        assert output.dtype == torch.float32
        assert output.device.type == 'cuda'
        assert output_bias.device.type == 'cuda'