multiple_choice.py 4.5 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
3
4
5
6

"""Multiple choice model."""

import torch

xingjinliang's avatar
xingjinliang committed
7
8
9
10
11
12
13
from megatron.training import get_args, print_rank_last
from megatron.legacy.model.enums import AttnMaskType
from megatron.legacy.model.bert_model import bert_extended_attention_mask, bert_position_ids
from megatron.legacy.model.language_model import get_language_model
from megatron.legacy.model.utils import get_linear_layer
from megatron.legacy.model.utils import init_method_normal
from megatron.legacy.model.utils import scaled_init_method_normal
14
from .module import MegatronModule
15
16


17
class MultipleChoice(MegatronModule):
18

Jared Casper's avatar
Jared Casper committed
19
    def __init__(self,
liangjing's avatar
v1  
liangjing committed
20
                 config,
21
22
23
                 num_tokentypes=2,
                 pre_process=True,
                 post_process=True):
liangjing's avatar
v1  
liangjing committed
24
        super(MultipleChoice, self).__init__(share_embeddings_and_output_weights=False)
Mohammad's avatar
Mohammad committed
25
        args = get_args()
26

27
28
        self.pre_process = pre_process
        self.post_process = post_process
29
30

        self.language_model, self._language_model_key = get_language_model(
liangjing's avatar
v1  
liangjing committed
31
            config=config,
32
33
            num_tokentypes=num_tokentypes,
            add_pooler=True,
34
            encoder_attn_mask_type=AttnMaskType.padding,
35
36
            pre_process=self.pre_process,
            post_process=self.post_process)
37
38

        # Multi-choice head.
39
        if self.post_process:
40
41
42
43
            self.multichoice_dropout = torch.nn.Dropout(args.hidden_dropout)
            self.multichoice_head = get_linear_layer(args.hidden_size, 1,
                                                     init_method)
            self._multichoice_head_key = 'multichoice_head'
44

Jared Casper's avatar
Jared Casper committed
45
    def set_input_tensor(self, input_tensor):
xingjinliang's avatar
xingjinliang committed
46
        """See megatron.legacy.model.transformer.set_input_tensor()"""
47
48
        self.language_model.set_input_tensor(input_tensor)

49
    def forward(self, model_input, attention_mask, tokentype_ids=None):
50
51
52
53
54
55

        # [batch, choices, sequence] --> [batch * choices, sequence] -->
        #    transformer --> [batch, choices] --> softmax

        # Ensure the shape is [batch-size, choices, sequence]
        assert len(attention_mask.shape) == 3
56
        num_choices = attention_mask.shape[1]
57
58
59

        # Reshape and treat choice dimension the same as batch.
        attention_mask = attention_mask.view(-1, attention_mask.size(-1))
60
        extended_attention_mask = bert_extended_attention_mask(attention_mask)
61

62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
        input_ids = model_input
        # Do the same as attention_mask for input_ids, tokentype_ids
        assert len(input_ids.shape) == 3
        assert len(tokentype_ids.shape) == 3
        input_ids = input_ids.view(-1, input_ids.size(-1))
        tokentype_ids = tokentype_ids.view(-1, tokentype_ids.size(-1))
        position_ids = bert_position_ids(input_ids)

        lm_output = self.language_model(
            input_ids,
            position_ids,
            extended_attention_mask,
            tokentype_ids=tokentype_ids
        )
        if self.post_process:
77
78
79
            _, pooled_output = lm_output
            multichoice_output = self.multichoice_dropout(pooled_output)
            multichoice_logits = self.multichoice_head(multichoice_output)
80

81
82
            # Reshape back to separate choices.
            multichoice_logits = multichoice_logits.view(-1, num_choices)
83

84
85
            return multichoice_logits
        return lm_output
86

87
    def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
88
89
90
91
92
        """For easy load when model is combined with other heads,
        add an extra key."""

        state_dict_ = {}
        state_dict_[self._language_model_key] \
93
94
            = self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
                                                                 keep_vars=keep_vars)
95
        if self.post_process:
96
            state_dict_[self._multichoice_head_key] \
97
                = self.multichoice_head.state_dict(prefix=prefix, keep_vars=keep_vars)
98
99
100
101
102
103
104
        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

        self.language_model.load_state_dict(
            state_dict[self._language_model_key], strict=strict)
105
        if self.post_process:
106
107
108
109
110
111
112
            if self._multichoice_head_key in state_dict:
                self.multichoice_head.load_state_dict(
                    state_dict[self._multichoice_head_key], strict=strict)
            else:
                print_rank_last('***WARNING*** could not find {} in the checkpoint, '
                                'initializing to random'.format(
                                    self._multichoice_head_key))