vit_backbone.py 8.51 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2

3
"""Vision Transformer(VIT) model."""
4
5
6
7

import math
import einops
import torch
8
import apex
9
10
11
12
13
14
15
16
import torch.nn.functional as F
from megatron import get_args
from megatron.model.transformer import ParallelTransformer
from megatron.model.utils import (
    get_linear_layer,
    init_method_normal,
    scaled_init_method_normal,
)
17
from megatron.model.module import MegatronModule
18

19
CLASS_TOKEN_LENGTH = 8
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

class VitMlpHead(MegatronModule):
    """Pooler layer.

    Pool hidden states of a specific token (for example start of the
    sequence) and add a linear transformation followed by a tanh.

    Arguments:
        hidden_size: hidden size
        init_method: weight initialization method for the linear layer.
            bias is set to zero.
    """

    def __init__(self, hidden_size, num_classes):
        super(VitMlpHead, self).__init__()
        self.dense_in = torch.nn.Linear(hidden_size, hidden_size)
36
        self.relu = torch.nn.ReLU()
37
38
39
        self.dense_out = torch.nn.Linear(hidden_size, num_classes)
        torch.nn.init.constant_(self.dense_out.bias, -10)

40
41
    def forward(self, hidden_states):
        # hidden_states: [b, 1, h]
42
        # sequence_index: index of the token to pool.
43
        dense_in_result = self.dense_in(hidden_states)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
44
45
46
        tanh_result = torch.tanh(dense_in_result)
        dense_out_result = self.dense_out(tanh_result)
        return dense_out_result
47
48


49
50
51
52
53
54
55
def isPerfectSquare(x):
    if(x >= 0):
        sr = math.sqrt(x)
        return (int(sr) * int(sr) == x)
    return False


56
57
58
59
60
61
62
63
64
65
66
def twod_interpolate_position_embeddings_hook(
    state_dict,
    prefix,
    local_metadata,
    strict,
    missing_keys,
    unexpected_keys,
    error_msgs,
):

    args = get_args()
67
68
69
    num_patches_per_dim_h = args.img_h // args.patch_dim
    num_patches_per_dim_w = args.img_w // args.patch_dim
    num_patches = num_patches_per_dim_h * num_patches_per_dim_w
70
71
72
    hidden_size = args.hidden_size

    key = prefix + "weight"
73

74
75
76
77
    assert key in state_dict
    if key in state_dict:
        input_param = state_dict[key]

78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
        input_seq_len = input_param.shape[0]
        assert(isPerfectSquare(input_seq_len) or isPerfectSquare(input_seq_len - CLASS_TOKEN_LENGTH))
        input_has_class_token = not isPerfectSquare(input_seq_len)
        num_tok_input = input_seq_len - CLASS_TOKEN_LENGTH if input_has_class_token else input_seq_len
        num_tok_output = num_patches
        output_has_class_token = args.class_token_present

        # update input_param and load it to state_dict[key]
        if input_has_class_token:
            input_param_tok = input_param[:CLASS_TOKEN_LENGTH, :]
            input_param_grid = input_param[CLASS_TOKEN_LENGTH:, :]
        else:
            input_param_tok = torch.zeros(CLASS_TOKEN_LENGTH, hidden_size)
            input_param_grid = input_param

93
        assert input_param.shape[1] == hidden_size
94
95

        if num_tok_input != num_tok_output:
96
97

            gs_input = int(math.sqrt(num_tok_input))
98
            gs_new = (num_patches_per_dim_h, num_patches_per_dim_w)
99
100
101
102
103
104

            input_param_grid = input_param_grid.transpose(0, 1).contiguous()
            input_param_grid = input_param_grid.reshape(
                (1, -1, gs_input, gs_input)
            )
            input_param_grid = input_param_grid.float()
105
            scale_factor = (gs_new[0] / gs_input, gs_new[1] / gs_input)
106
107
108
109
110
111

            input_param_grid = F.interpolate(
                input_param_grid, scale_factor=scale_factor, mode="bilinear"
            )

            input_param_grid = input_param_grid.half()
112
            input_param_grid = input_param_grid.reshape((-1, num_tok_output))
113
114
115
116
            input_param_grid = input_param_grid.transpose(0, 1).contiguous()

            assert input_param_grid.shape[1] == hidden_size

117
118
119
120
121
122
123
124
125
126
        input_param = input_param_grid
        assert (
            input_param.shape[0] == num_tok_output
            and input_param.shape[1] == hidden_size
        )

        if output_has_class_token:
            input_param = torch.cat((input_param_tok, input_param), dim=0)

        state_dict[key] = input_param
127
128


129
class VitBackbone(MegatronModule):
Vijay Korthikanti's avatar
Vijay Korthikanti committed
130
    """Vision Transformer Model."""
131

132
    def __init__(self,
Vijay Korthikanti's avatar
Vijay Korthikanti committed
133
                 pre_process=True,
134
135
                 post_process=True,
                 class_token=True,
Vijay Korthikanti's avatar
Vijay Korthikanti committed
136
                 single_token_output=False,
137
                 post_layer_norm=True,
Vijay Korthikanti's avatar
Vijay Korthikanti committed
138
                 drop_path_rate=0.0):
139
        super(VitBackbone, self).__init__(share_word_embeddings=False)
140
141
142
143
144
145
146
147
148
149
150
151
        args = get_args()

        self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
        if args.init_method_xavier_uniform:
            self.init_method = torch.nn.init.xavier_uniform_
            self.scaled_init_method = torch.nn.init.xavier_uniform_
        else:
            self.init_method = init_method_normal(args.init_method_std)
            self.scaled_init_method = scaled_init_method_normal(
                args.init_method_std, args.num_layers
            )

Vijay Korthikanti's avatar
Vijay Korthikanti committed
152
153
        self.pre_process = pre_process
        self.post_process = post_process
154
        self.class_token = class_token
155
        self.post_layer_norm = post_layer_norm
156
157
        self.hidden_size = args.hidden_size
        self.patch_dim = args.patch_dim
158
159
160
161
        self.img_h = args.img_h
        self.img_w = args.img_w
        self.micro_batch_size = args.micro_batch_size
        self.single_token_output = single_token_output
Vijay Korthikanti's avatar
Vijay Korthikanti committed
162
        self.drop_path_rate = drop_path_rate
163
164
165
166
167
168
169

        assert self.img_h % self.patch_dim == 0
        assert self.img_w % self.patch_dim == 0
        self.num_patches_per_dim_h = self.img_h // self.patch_dim
        self.num_patches_per_dim_w = self.img_w // self.patch_dim
        self.num_patches = self.num_patches_per_dim_h * self.num_patches_per_dim_w
        self.seq_length = self.num_patches + (CLASS_TOKEN_LENGTH if self.class_token else 0)
170
        self.flatten_dim = self.patch_dim * self.patch_dim * args.num_channels
171
172
        self.input_tensor = None
        self.position_ids = None
173

Vijay Korthikanti's avatar
Vijay Korthikanti committed
174
175
        if self.pre_process:
            # cls_token
176
177
178
179
180
181
182
            if self.class_token:
                self.cls_token = torch.nn.Parameter(
                    torch.randn(1, CLASS_TOKEN_LENGTH, self.hidden_size)
                )
                torch.nn.init.zeros_(self.cls_token)
            self.position_ids = torch.arange(self.seq_length).expand(1, -1).cuda()
            
Vijay Korthikanti's avatar
Vijay Korthikanti committed
183
184
185
186
            # Linear encoder
            self.linear_encoder = torch.nn.Linear(
                self.flatten_dim, self.hidden_size
            )
187

Vijay Korthikanti's avatar
Vijay Korthikanti committed
188
189
190
191
192
193
194
            # embedding
            self.position_embeddings = torch.nn.Embedding(
                self.seq_length, self.hidden_size
            )
            init_method_normal(args.init_method_std)(
                self.position_embeddings.weight
            )
195

196
            args.class_token_present = self.class_token
Vijay Korthikanti's avatar
Vijay Korthikanti committed
197
198
199
            self.position_embeddings._register_load_state_dict_pre_hook(
                twod_interpolate_position_embeddings_hook
            )
200

Vijay Korthikanti's avatar
Vijay Korthikanti committed
201
            self.embedding_dropout = torch.nn.Dropout(args.hidden_dropout)
202
203
204

        # Transformer
        self.transformer = ParallelTransformer(
205
            self.init_method,
Vijay Korthikanti's avatar
Vijay Korthikanti committed
206
207
            self.scaled_init_method,
            pre_process=self.pre_process,
208
            post_process=self.post_process,
209
            post_layer_norm=self.post_layer_norm,
Vijay Korthikanti's avatar
Vijay Korthikanti committed
210
            drop_path_rate=self.drop_path_rate
211
212
        )

Vijay Korthikanti's avatar
Vijay Korthikanti committed
213
214
215
216
217
218
219
220
221
222
223
224
    def set_input_tensor(self, input_tensor):
        """See megatron.model.transformer.set_input_tensor()"""
        self.transformer.set_input_tensor(input_tensor)

    def forward(self, input):

        if self.pre_process:
            rearranged_input = einops.rearrange(
                input,
                "b c (h p1) (w p2) -> b (h w) (p1 p2 c)",
                p1=self.patch_dim,
                p2=self.patch_dim,
225
226
            )

Vijay Korthikanti's avatar
Vijay Korthikanti committed
227
228
            assert rearranged_input.dtype == torch.half
            encoder_output = self.linear_encoder(rearranged_input)
229
230
231
232
233

            concatenated_tokens = encoder_output
            if self.class_token:
                cls_tokens = self.cls_token.expand(encoder_output.shape[0], -1, -1)
                concatenated_tokens = torch.cat((cls_tokens, encoder_output), dim=1)
234

Vijay Korthikanti's avatar
Vijay Korthikanti committed
235
            token_embeddings = concatenated_tokens + \
236
                    self.position_embeddings(self.position_ids[:, :concatenated_tokens.shape[1]])
Vijay Korthikanti's avatar
Vijay Korthikanti committed
237
238
239
            hidden_states = self.embedding_dropout(token_embeddings)
        else:
            hidden_states = input
240

Vijay Korthikanti's avatar
Vijay Korthikanti committed
241
        hidden_states = self.transformer(hidden_states, None)
242

243
244
        if self.single_token_output:
            hidden_states = hidden_states[:,0,:]
245

Vijay Korthikanti's avatar
Vijay Korthikanti committed
246
        return hidden_states
247