# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. import pytest import torch from megatron.core.transformer.transformer_config import TransformerConfig from megatron.core.models.gpt.gpt_model import GPTModel from tests.unit_tests.test_utilities import Utils from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed class TestGPTModel: def setup_method(self, method): Utils.initialize_model_parallel(1,1) model_parallel_cuda_manual_seed(123) transformer_config = TransformerConfig(num_layers=2, hidden_size=12, num_attention_heads=4, use_cpu_initialization=True) self.gpt_model = GPTModel(config=transformer_config, vocab_size=100, max_sequence_length=4) def teardown_method(self, method): Utils.destroy_model_parallel() def test_constructor(self): assert isinstance(self.gpt_model, GPTModel) assert self.gpt_model.max_sequence_length == 4 num_weights = sum([p.numel() for p in self.gpt_model.parameters()]) assert num_weights == 6240 def test_set_input_tensor(self): config: TransformerConfig = self.gpt_model.config sequence_length = self.gpt_model.max_sequence_length micro_batch_size = 2 # [sequence length, batch size, hidden size] input_tensor = torch.ones((sequence_length, micro_batch_size, config.hidden_size)) self.gpt_model.set_input_tensor(input_tensor) assert self.gpt_model.decoder.input_tensor.shape[0] == sequence_length assert self.gpt_model.decoder.input_tensor.shape[1] == micro_batch_size assert self.gpt_model.decoder.input_tensor.shape[2] == config.hidden_size def test_post_process_forward(self): config: TransformerConfig = self.gpt_model.config sequence_length = self.gpt_model.max_sequence_length micro_batch_size = 2 self.gpt_model.cuda() data = list(range(sequence_length)) input_ids = torch.tensor(data, dtype=torch.int64).repeat((micro_batch_size, 1)).cuda() position_ids = torch.tensor(data, dtype=torch.int64).repeat((micro_batch_size, 1)).cuda() attention_mask = torch.ones((1, 1, sequence_length, sequence_length), dtype=bool).cuda() logits = self.gpt_model.forward(input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask) assert logits.shape[0] == micro_batch_size assert logits.shape[1] == sequence_length assert logits.shape[2] == self.gpt_model.vocab_size def test_no_post_process_forward(self): pass def test_no_preprocess_forward(self): pass def test_state_dict_for_save_checkpoint(self): pass def test_load_state_dict(self): pass