"backends/v2/Cargo.toml" did not exist on "4c19593a903dffe3b7d7e4edb479445aff0f001f"
test_gpt_model.py 3 KB
Newer Older
liangjing's avatar
v1  
liangjing committed
1
2
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.

xingjinliang's avatar
xingjinliang committed
3
import os
liangjing's avatar
v1  
liangjing committed
4

xingjinliang's avatar
xingjinliang committed
5
import pytest
liangjing's avatar
v1  
liangjing committed
6
7
import torch

xingjinliang's avatar
xingjinliang committed
8
from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_with_transformer_engine_spec
liangjing's avatar
v1  
liangjing committed
9
10
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
xingjinliang's avatar
xingjinliang committed
11
12
13
from megatron.core.transformer.transformer_config import TransformerConfig
from tests.unit_tests.test_utilities import Utils

liangjing's avatar
v1  
liangjing committed
14
15
16
17

class TestGPTModel:

    def setup_method(self, method):
xingjinliang's avatar
xingjinliang committed
18
19
20
21
        os.environ.pop('NVTE_FUSED_ATTN', None)
        os.environ.pop('NVTE_FLASH_ATTN', None)
        os.environ.pop('NVTE_UNFUSED_ATTN', None)
        Utils.initialize_model_parallel(1, 1)
liangjing's avatar
v1  
liangjing committed
22
        model_parallel_cuda_manual_seed(123)
xingjinliang's avatar
xingjinliang committed
23
24
25
26
27
28
29
30
31
32
        transformer_config = TransformerConfig(
            num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True
        )
        self.gpt_model = GPTModel(
            config=transformer_config,
            transformer_layer_spec=get_gpt_layer_with_transformer_engine_spec(),
            vocab_size=100,
            max_sequence_length=4,
        )

liangjing's avatar
v1  
liangjing committed
33
    def teardown_method(self, method):
xingjinliang's avatar
xingjinliang committed
34
        Utils.destroy_model_parallel()
liangjing's avatar
v1  
liangjing committed
35

xingjinliang's avatar
xingjinliang committed
36
    @pytest.mark.internal
liangjing's avatar
v1  
liangjing committed
37
38
39
40
41
42
43
44
    def test_constructor(self):
        assert isinstance(self.gpt_model, GPTModel)

        assert self.gpt_model.max_sequence_length == 4

        num_weights = sum([p.numel() for p in self.gpt_model.parameters()])
        assert num_weights == 6240

xingjinliang's avatar
xingjinliang committed
45
    @pytest.mark.internal
liangjing's avatar
v1  
liangjing committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
    def test_set_input_tensor(self):
        config: TransformerConfig = self.gpt_model.config
        sequence_length = self.gpt_model.max_sequence_length
        micro_batch_size = 2

        # [sequence length, batch size, hidden size]
        input_tensor = torch.ones((sequence_length, micro_batch_size, config.hidden_size))

        self.gpt_model.set_input_tensor(input_tensor)

        assert self.gpt_model.decoder.input_tensor.shape[0] == sequence_length
        assert self.gpt_model.decoder.input_tensor.shape[1] == micro_batch_size
        assert self.gpt_model.decoder.input_tensor.shape[2] == config.hidden_size

xingjinliang's avatar
xingjinliang committed
60
    @pytest.mark.internal
liangjing's avatar
v1  
liangjing committed
61
62
63
64
65
66
67
68
69
70
    def test_post_process_forward(self):
        config: TransformerConfig = self.gpt_model.config
        sequence_length = self.gpt_model.max_sequence_length
        micro_batch_size = 2

        self.gpt_model.cuda()

        data = list(range(sequence_length))
        input_ids = torch.tensor(data, dtype=torch.int64).repeat((micro_batch_size, 1)).cuda()
        position_ids = torch.tensor(data, dtype=torch.int64).repeat((micro_batch_size, 1)).cuda()
xingjinliang's avatar
xingjinliang committed
71
72
73
        attention_mask = torch.ones(
            (micro_batch_size, 1, sequence_length, sequence_length), dtype=bool
        ).cuda()
liangjing's avatar
v1  
liangjing committed
74

xingjinliang's avatar
xingjinliang committed
75
76
77
        logits = self.gpt_model.forward(
            input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask
        )
liangjing's avatar
v1  
liangjing committed
78
79
80
81

        assert logits.shape[0] == micro_batch_size
        assert logits.shape[1] == sequence_length
        assert logits.shape[2] == self.gpt_model.vocab_size