test_cuda_forward.py 12.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import pytest
import json
import random
import time
import copy
from torch import nn
from modelingpreln import BertEncoder as BertEncoderPreln
from modeling import BertEncoder as BertEncoderPostln
from modeling import BertLayerNorm, BertConfig
from deepspeed import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
import deepspeed

import sys

19
20
#if not deepspeed.ops.__installed_ops__['transformer']:
#    pytest.skip("transformer kernels are not installed", allow_module_level=True)
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119


def check_equal(first, second, atol=1e-2, verbose=False):
    if verbose:
        print()
    for i, (x, y) in enumerate(zip(first, second)):
        x = x[0].cpu().detach().numpy()
        y = y[0].cpu().detach().numpy()
        if verbose:
            print("x = {}".format(x.flatten()))
            print("y = {}".format(y.flatten()))
            print('-' * 80)
        np.testing.assert_allclose(x, y, err_msg="Index: {}".format(i), atol=atol)


def zero_grad(variables):
    for variable in variables:
        variable.grad.zero_()


device = torch.device("cuda")
kwargs_fp32 = {'dtype': torch.float, 'device': device, 'requires_grad': True}
kwargs_fp16 = {'dtype': torch.half, 'device': device, 'requires_grad': True}


class DSEncoder(nn.Module):
    def __init__(self, config, weights, biases):
        super(DSEncoder, self).__init__()
        self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
        self.layer = nn.ModuleList([
            copy.deepcopy(DeepSpeedTransformerLayer(i,
                                                    config,
                                                    weights,
                                                    biases))
            for i in range(config.num_hidden_layers)
        ])
        self.grads = []
        self.pre_or_post = config.pre_layer_norm

    def forward(self,
                hidden_states,
                attention_mask,
                output_all_encoded_layers=True,
                checkpoint_activations=False):
        all_encoder_layers = []

        def custom(start, end):
            def custom_forward(*inputs):
                layers = self.layer[start:end]
                x_ = inputs[0]
                for layer in layers:
                    x_ = layer(x_, inputs[1])
                return x_

            return custom_forward

        if checkpoint_activations:
            l = 0
            num_layers = len(self.layer)
            chunk_length = math.ceil(math.sqrt(num_layers))
            while l < num_layers:
                hidden_states = checkpoint.checkpoint(custom(l,
                                                             l + chunk_length),
                                                      hidden_states,
                                                      attention_mask * 1)
                l += chunk_length
            # decoder layers
        else:
            for i, layer_module in enumerate(self.layer):
                hidden_states = layer_module(hidden_states, attention_mask)
                hidden_states.register_hook(
                    lambda x,
                    i=i,
                    self=self: self.grads.append([x,
                                                  "hidden_state"]))

                if output_all_encoded_layers:
                    all_encoder_layers.append(hidden_states)

        if not output_all_encoded_layers or checkpoint_activations:
            if (self.pre_or_post):
                hidden_states = self.FinalLayerNorm(hidden_states)
            all_encoder_layers.append(hidden_states)
        return all_encoder_layers

    def get_grads(self):
        return self.grads


def create_models(ds_config):
    bert_config = BertConfig(vocab_size_or_config_json_file=119547,
                             hidden_size=ds_config.hidden_size,
                             num_hidden_layers=ds_config.num_hidden_layers,
                             num_attention_heads=ds_config.heads,
                             batch_size=ds_config.batch_size,
                             intermediate_size=ds_config.intermediate_size,
                             hidden_act="gelu",
                             hidden_dropout_prob=ds_config.hidden_dropout_ratio,
                             attention_probs_dropout_prob=ds_config.attn_dropout_ratio,
120
                             max_position_embeddings=512,
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
                             type_vocab_size=2,
                             initializer_range=ds_config.initializer_range,
                             fp16=ds_config.fp16)

    weights = []
    biases = []

    for i in range(4):
        weights.append(
            nn.Parameter(torch.Tensor(ds_config.hidden_size,
                                      ds_config.hidden_size)))
        weights[i].data.normal_(mean=0.0, std=ds_config.initializer_range)

    weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
    weights[4].data.fill_(1.0)
    weights.append(
        nn.Parameter(torch.Tensor(ds_config.intermediate_size,
                                  ds_config.hidden_size)))
    weights[5].data.normal_(mean=0.0, std=ds_config.initializer_range)
    weights.append(
        nn.Parameter(torch.Tensor(ds_config.hidden_size,
                                  ds_config.intermediate_size)))
    weights[6].data.normal_(mean=0.0, std=ds_config.initializer_range)
    weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
    weights[7].data.fill_(1.0)

    biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
    biases[0].data.zero_()
    for i in range(4):
        biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
        biases[i + 1].data.zero_()
    biases.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size)))
    biases[5].data.zero_()
    biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
    biases[6].data.zero_()
    biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
    biases[7].data.zero_()

    if (ds_config.pre_layer_norm):
        bert_encoder = BertEncoderPreln(bert_config, weights, biases)
    else:
        bert_encoder = BertEncoderPostln(bert_config, weights, biases)
    ds_encoder = DSEncoder(ds_config, weights, biases)

    if ds_config.fp16:
        bert_encoder.half()
        ds_encoder.half()

    bert_encoder.cuda()
    ds_encoder.cuda()

    return bert_encoder, ds_encoder


def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)


def run_forward(ds_config, seq_len, atol=1e-2, verbose=False, test_bsz=None):
    set_seed(123)
    bert_encoder, ds_encoder = create_models(ds_config)

    bsz = ds_config.batch_size if test_bsz is None else test_bsz

    # prepare test data
    kwargs = kwargs_fp16 if ds_config.fp16 else kwargs_fp32
189
190
    hidden_states = torch.randn(bsz, seq_len, ds_config.hidden_size, **kwargs)
    input_mask = torch.randn(bsz, 1, 1, seq_len, **kwargs)
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210

    # run baseline
    base_results = bert_encoder(hidden_states,
                                input_mask,
                                output_all_encoded_layers=False,
                                checkpoint_activations=False)

    # run ds
    ds_results = ds_encoder(hidden_states,
                            input_mask,
                            output_all_encoded_layers=False,
                            checkpoint_activations=False)

    # check grads
    check_equal(base_results, ds_results, atol=atol, verbose=verbose)


# FP16 test cases can only run on the devices support FP16.
@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
                         [
211
212
213
214
215
                             (8,256,53,4,3,True,False),
                             (8,256,52,4,3,True,True),
                             (3,1024,51,16,3,True,False),
                             (3,1024,54,16,3,True,True),
                             (8,1024,381,16,3,True,False),
216
217
                             (8,1024,384,16,3,True,True),
                             (8,1024,384,16,3,True,True),
218
                             (8,1024,119,16,3,True,False),
219
                             (8,1024,120,16,3,True,True),
220
                             (8,1024,509,16,3,True,False),
221
222
                             (8,1024,512,16,3,True,True),
                             (64,1024,56,16,3,False,False),
223
                             (64,1024,53,16,3,False,True),
224
                             (64,1024,24,16,3,False,False),
225
                             (64,1024,21,16,3,False,True),
226
227
228
                             (8,1024,384,16,3,False,False),
                             (8,1024,384,16,3,False,True),
                             (8,1024,512,16,3,False,False),
229
                             (8,1024,511,16,3,False,True),
230
231
232
233
234
235
                             (8,1536,128,24,3,False,False),
                             (8,1536,128,24,3,False,True),
                             (8,2048,128,32,3,False,False),
                             (8,2048,128,32,3,False,True),
                             (8,2560,128,40,3,False,False),
                             (8,2560,128,40,3,False,True),
236
237
238
239
                             (8,128,128,2,3,True,False),
                             (8,128,128,2,3,True,True),
                             (8,4096,128,64,3,True,True),
                             (8,8192,128,64,3,False,True),
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
                         ]) # yapf: disable
def test_forward(batch_size,
                 hidden_size,
                 seq_len,
                 heads,
                 num_layers,
                 is_preln,
                 use_fp16):
    # Only run fp16 test cases on devices with 7+ capability.
    major, _ = torch.cuda.get_device_capability()
    if major < 7 and use_fp16 is True:
        return

    ds_config = DeepSpeedTransformerConfig()
    ds_config.layer_id = None
    ds_config.batch_size = batch_size
    ds_config.hidden_size = hidden_size
    ds_config.intermediate_size = 4 * hidden_size
    ds_config.heads = heads
    ds_config.attn_dropout_ratio = 0.0
    ds_config.hidden_dropout_ratio = 0.0
    ds_config.num_hidden_layers = num_layers
    ds_config.pre_layer_norm = is_preln
    ds_config.initializer_range = 0.02
    ds_config.fp16 = use_fp16

    run_forward(ds_config, seq_len, atol=2e-2)


@pytest.mark.parametrize('batch_size, small_bsz, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
                         [
                             (8,3,1024,512,16,3,True,False),
                             (8,7,1024,512,16,3,True,True),
                             (8,3,1024,512,16,3,False,False),
                             (8,7,1024,512,16,3,False,True),
                         ]) # yapf: disable
def test_forward_with_small_bsz(batch_size,
                                small_bsz,
                                hidden_size,
                                seq_len,
                                heads,
                                num_layers,
                                is_preln,
                                use_fp16):
    # Only run fp16 test cases on devices with 7+ capability.
    major, _ = torch.cuda.get_device_capability()
    if major < 7 and use_fp16 is True:
        return

    ds_config = DeepSpeedTransformerConfig()
    ds_config.layer_id = None
    ds_config.batch_size = batch_size
    ds_config.hidden_size = hidden_size
    ds_config.intermediate_size = 4 * hidden_size
    ds_config.heads = heads
    ds_config.attn_dropout_ratio = 0.0
    ds_config.hidden_dropout_ratio = 0.0
    ds_config.num_hidden_layers = num_layers
    ds_config.pre_layer_norm = is_preln
    ds_config.initializer_range = 0.02
    ds_config.fp16 = use_fp16

    run_forward(ds_config, seq_len, atol=2e-2, test_bsz=small_bsz)

@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
                         [
                             (64,1024,128,16,3,True,False),
                             (64,1024,128,16,3,True,True),
                             (64,1024,128,16,3,False,False),
                             (64,1024,128,16,3,False,True),
                         ]) # yapf: disable
def test_forward_stochastic(batch_size,
                            hidden_size,
                            seq_len,
                            heads,
                            num_layers,
                            is_preln,
                            use_fp16):
    # Only run fp16 test cases on devices with 7+ capability.
    major, _ = torch.cuda.get_device_capability()
    if major < 7 and use_fp16 is True:
        return

    ds_config = DeepSpeedTransformerConfig()
    ds_config.layer_id = None
    ds_config.batch_size = batch_size
    ds_config.hidden_size = hidden_size
    ds_config.intermediate_size = 4 * hidden_size
    ds_config.heads = heads
    ds_config.attn_dropout_ratio = 0.0
    ds_config.hidden_dropout_ratio = 0.0
    ds_config.num_hidden_layers = num_layers
    ds_config.pre_layer_norm = is_preln
    ds_config.initializer_range = 0.02
    ds_config.fp16 = use_fp16
    ds_config.stochastic_mode = True

    run_forward(ds_config, seq_len, atol=7e-2)