"vscode:/vscode.git/clone" did not exist on "f87a58cd96d45de73c9a8330a06b2ab56749a7fa"
bert_dataset.py 9.36 KB
Newer Older
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
1
# coding=utf-8
Mohammad's avatar
Mohammad committed
2
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
16
"""BERT Style dataset."""
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
17

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
18
import os
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
19
20
21
22
23
24
import time

import numpy as np
import torch
from torch.utils.data import Dataset

25
from megatron import get_tokenizer, get_args, print_rank_0
26
from megatron import mpu
27
28
29
30
31
from megatron.data.dataset_utils import get_a_and_b_segments
from megatron.data.dataset_utils import truncate_segments
from megatron.data.dataset_utils import create_tokens_and_tokentypes
from megatron.data.dataset_utils import pad_and_convert_to_numpy
from megatron.data.dataset_utils import create_masked_lm_predictions
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
32

Neel Kant's avatar
Neel Kant committed
33

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
34
class BertDataset(Dataset):
35

36
    def __init__(self, name, indexed_dataset, data_prefix,
37
38
                 num_epochs, max_num_samples, masked_lm_prob,
                 max_seq_length, short_seq_prob, seed):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
39
40

        # Params to store.
41
        self.name = name
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
42
43
44
45
        self.seed = seed
        self.masked_lm_prob = masked_lm_prob
        self.max_seq_length = max_seq_length

46
        # Dataset.
47
48
        self.indexed_dataset = indexed_dataset

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
49
        # Build the samples mapping.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
50
51
52
53
54
55
        self.samples_mapping = get_samples_mapping_(self.indexed_dataset,
                                                    data_prefix,
                                                    num_epochs,
                                                    max_num_samples,
                                                    self.max_seq_length,
                                                    short_seq_prob,
56
57
                                                    self.seed,
                                                    self.name)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
58
59

        # Vocab stuff.
60
61
62
63
64
65
66
        tokenizer = get_tokenizer()
        self.vocab_id_list = list(tokenizer.inv_vocab.keys())
        self.vocab_id_to_token_dict = tokenizer.inv_vocab
        self.cls_id = tokenizer.cls
        self.sep_id = tokenizer.sep
        self.mask_id = tokenizer.mask
        self.pad_id = tokenizer.pad
67

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
68
    def __len__(self):
69
        return self.samples_mapping.shape[0]
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
70
71

    def __getitem__(self, idx):
72
73
        start_idx, end_idx, seq_length = self.samples_mapping[idx]
        sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
74
75
76
        # Note that this rng state should be numpy and not python since
        # python randint is inclusive whereas the numpy one is exclusive.
        np_rng = np.random.RandomState(seed=(self.seed + idx))
77
78
79
80
81
82
83
        return build_training_sample(sample, seq_length,
                                     self.max_seq_length,  # needed for padding
                                     self.vocab_id_list,
                                     self.vocab_id_to_token_dict,
                                     self.cls_id, self.sep_id,
                                     self.mask_id, self.pad_id,
                                     self.masked_lm_prob, np_rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
84

85

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
86
87
88
89
90
91
def get_samples_mapping_(indexed_dataset,
                         data_prefix,
                         num_epochs,
                         max_num_samples,
                         max_seq_length,
                         short_seq_prob,
92
93
                         seed,
                         name):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
94
    if not num_epochs:
95
        if not max_num_samples:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
96
97
98
99
100
101
102
103
            raise ValueError("Need to specify either max_num_samples "
                             "or num_epochs")
        num_epochs = np.iinfo(np.int32).max - 1
    if not max_num_samples:
        max_num_samples = np.iinfo(np.int64).max - 1

    # Filename of the index mapping
    indexmap_filename = data_prefix
104
105
106
107
108
    indexmap_filename += '_{}_indexmap'.format(name)
    if num_epochs != (np.iinfo(np.int32).max - 1):
        indexmap_filename += '_{}ep'.format(num_epochs)
    if max_num_samples != (np.iinfo(np.int64).max - 1):
        indexmap_filename += '_{}mns'.format(max_num_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
109
110
111
112
113
114
115
116
    indexmap_filename += '_{}msl'.format(max_seq_length)
    indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
    indexmap_filename += '_{}s'.format(seed)
    indexmap_filename += '.npy'

    # Build the indexed mapping if not exist.
    if torch.distributed.get_rank() == 0 and \
       not os.path.isfile(indexmap_filename):
117
        print(' > WARNING: could not find index map file {}, building '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
118
              'the indices on rank 0 ...'.format(indexmap_filename))
119

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
120
121
122
123
124
125
        # Make sure the types match the helpers input types.
        assert indexed_dataset.doc_idx.dtype == np.int64
        assert indexed_dataset.sizes.dtype == np.int32

        # Build samples mapping
        verbose = torch.distributed.get_rank() == 0
126
        start_time = time.time()
127
128
        print_rank_0(' > building sapmles index mapping for {} ...'.format(
            name))
129
130
131
        # First compile and then import.
        from megatron.data.dataset_utils import compile_helper
        compile_helper()
Mohammad's avatar
Mohammad committed
132
        from megatron.data import helpers
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
133
134
135
136
137
        samples_mapping = helpers.build_mapping(
            indexed_dataset.doc_idx,
            indexed_dataset.sizes,
            num_epochs,
            max_num_samples,
Neel Kant's avatar
Neel Kant committed
138
            max_seq_length - 3,  # account for added tokens
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
139
140
141
            short_seq_prob,
            seed,
            verbose)
142
        print_rank_0(' > done building sapmles index maping')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
143
        np.save(indexmap_filename, samples_mapping, allow_pickle=True)
144
145
        print_rank_0(' > saved the index mapping in {}'.format(
            indexmap_filename))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
146
        # Make sure all the ranks have built the mapping
147
        print_rank_0(' > elasped time to build and save samples mapping '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
148
149
                     '(seconds): {:4f}'.format(
                         time.time() - start_time))
150
151
152
153
154
155
156
    # This should be a barrier but nccl barrier assumes
    # device_index=rank which is not the case for model
    # parallel case
    counts = torch.cuda.LongTensor([1])
    torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
    assert counts[0].item() == torch.distributed.get_world_size(
        group=mpu.get_data_parallel_group())
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
157
158

    # Load indexed dataset.
159
    print_rank_0(' > loading indexed mapping from {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
160
161
        indexmap_filename))
    start_time = time.time()
Raul Puri's avatar
Raul Puri committed
162
    samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
163
    print_rank_0('    loaded indexed file in {:3.3f} seconds'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
164
        time.time() - start_time))
165
    print_rank_0('    total number of samples: {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
166
        samples_mapping.shape[0]))
167

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
168
    return samples_mapping
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231


def build_training_sample(sample,
                          target_seq_length, max_seq_length,
                          vocab_id_list, vocab_id_to_token_dict,
                          cls_id, sep_id, mask_id, pad_id,
                          masked_lm_prob, np_rng):
    """Biuld training sample.

    Arguments:
        sample: A list of sentences in which each sentence is a list token ids.
        target_seq_length: Desired sequence length.
        max_seq_length: Maximum length of the sequence. All values are padded to
            this length.
        vocab_id_list: List of vocabulary ids. Used to pick a random id.
        vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
        cls_id: Start of example id.
        sep_id: Separator id.
        mask_id: Mask token id.
        pad_id: Padding token id.
        masked_lm_prob: Probability to mask tokens.
        np_rng: Random number genenrator. Note that this rng state should be
              numpy and not python since python randint is inclusive for
              the opper bound whereas the numpy one is exclusive.
    """

    # We assume that we have at least two sentences in the sample
    assert len(sample) > 1
    assert target_seq_length <= max_seq_length

    # Divide sample into two segments (A and B).
    tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample, np_rng)

    # Truncate to `target_sequence_length`.
    max_num_tokens = target_seq_length
    truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a),
                                  len(tokens_b), max_num_tokens, np_rng)

    # Build tokens and toketypes.
    tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b,
                                                      cls_id, sep_id)

    # Masking.
    max_predictions_per_seq = masked_lm_prob * max_num_tokens
    (tokens, masked_positions, masked_labels, _) = create_masked_lm_predictions(
        tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob,
        cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng)

    # Padding.
    tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \
        = pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
                                   masked_labels, pad_id, max_seq_length)

    train_sample = {
        'text': tokens_np,
        'types': tokentypes_np,
        'labels': labels_np,
        'is_random': int(is_next_random),
        'loss_mask': loss_mask_np,
        'padding_mask': padding_mask_np,
        'truncated': int(truncated)}
    return train_sample