bert_dataset.py 11.8 KB
Newer Older
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
16
"""BERT Style dataset."""
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
17

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
18
import os
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
19
20
21
22
23
24
import time

import numpy as np
import torch
from torch.utils.data import Dataset

25
from megatron import mpu
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
26
from megatron.data import helpers
27
from megatron.tokenizer.bert_tokenization import FullTokenizer as FullBertTokenizer
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
28
from megatron.data.dataset_utils import build_training_sample
29
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
30
from megatron import print_rank_0
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
31

32

33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
def build_train_valid_test_datasets(vocab_file, data_prefix, data_impl,
                                    splits_string, train_valid_test_num_samples,
                                    max_seq_length, masked_lm_prob,
                                    short_seq_prob, seed, skip_warmup):

    # Tokenizer is the same
    tokenizer = FullBertTokenizer(vocab_file, do_lower_case=True)
    print_rank_0(' > using full BERT tokenizer with vocabulary size: {}'.format(
        tokenizer.vocab_size()))

    # Indexed dataset.
    indexed_dataset = get_indexed_dataset_(data_prefix,
                                           data_impl,
                                           skip_warmup)

    # Get start and end indices of train/valid/train into doc-idx
    # Note that doc-idx is desinged to be num-docs + 1 so we can
    # easily iterate over it.
    total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
    splits = get_train_valid_test_split_(splits_string, total_num_of_documents)

    # Print stats about the splits.
    print_rank_0(' > dataset split:')
    def print_split_stats(name, index):
        print_rank_0('    {}:'.format(name))
        print_rank_0('     document indices in [{}, {}) total of {} '
                     'documents'.format(splits[index], splits[index + 1],
                                        splits[index + 1] - splits[index]))
        start_index = indexed_dataset.doc_idx[splits[index]]
        end_index = indexed_dataset.doc_idx[splits[index + 1]]
        print_rank_0('     sentence indices in [{}, {}) total of {} '
                     'sentences'.format(start_index, end_index,
                                        end_index - start_index))
    print_split_stats('train', 0)
    print_split_stats('validation', 1)
    print_split_stats('test', 2)

    def build_dataset(index, name):
        dataset = None
        if splits[index + 1] > splits[index]:
            # Get the pointer to the original doc-idx so we can set it later.
            doc_idx_ptr = indexed_dataset.get_doc_idx()
            # Slice the doc-idx
            start_index = splits[index]
            # Add +1 so we can index into the dataset to get the upper bound.
            end_index = splits[index + 1] + 1
            # New doc_idx view.
            indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
            # Build the dataset accordingly.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
82
            dataset = BertDataset(
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
                name=name,
                indexed_dataset=indexed_dataset,
                tokenizer=tokenizer,
                data_prefix=data_prefix,
                num_epochs=None,
                max_num_samples=train_valid_test_num_samples[index],
                masked_lm_prob=masked_lm_prob,
                max_seq_length=max_seq_length,
                short_seq_prob=short_seq_prob,
                seed=seed)
            # Set the original pointer so dataset remains the main dataset.
            indexed_dataset.set_doc_idx(doc_idx_ptr)
            # Checks.
            assert indexed_dataset.doc_idx[0] == 0
            assert indexed_dataset.doc_idx.shape[0] == \
                (total_num_of_documents + 1)
        return dataset

    train_dataset = build_dataset(0, 'train')
    valid_dataset = build_dataset(1, 'valid')
    test_dataset = build_dataset(2, 'test')

    return (train_dataset, valid_dataset, test_dataset)


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
108
class BertDataset(Dataset):
109

110
111
112
    def __init__(self, name, indexed_dataset, tokenizer, data_prefix,
                 num_epochs, max_num_samples, masked_lm_prob,
                 max_seq_length, short_seq_prob, seed):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
113
114

        # Params to store.
115
        self.name = name
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
116
117
118
119
        self.seed = seed
        self.masked_lm_prob = masked_lm_prob
        self.max_seq_length = max_seq_length

120
121
122
123
        # Tokenizer and dataset.
        self.tokenizer = tokenizer
        self.indexed_dataset = indexed_dataset

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
124
125

        # Build the samples mapping.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
126
127
128
129
130
131
        self.samples_mapping = get_samples_mapping_(self.indexed_dataset,
                                                    data_prefix,
                                                    num_epochs,
                                                    max_num_samples,
                                                    self.max_seq_length,
                                                    short_seq_prob,
132
133
                                                    self.seed,
                                                    self.name)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
134
135

        # Vocab stuff.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
136
137
138
139
140
141
        self.vocab_id_list = list(self.tokenizer.inv_vocab.keys())
        self.vocab_id_to_token_dict = self.tokenizer.inv_vocab
        self.cls_id = self.tokenizer.vocab['[CLS]']
        self.sep_id = self.tokenizer.vocab['[SEP]']
        self.mask_id = self.tokenizer.vocab['[MASK]']
        self.pad_id = self.tokenizer.vocab['[PAD]']
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
142
143


144
145
146
    def num_tokens(self):
        return self.tokenizer.vocab_size()

147

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
148
    def __len__(self):
149
        return self.samples_mapping.shape[0]
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
150

151

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
152
    def __getitem__(self, idx):
153

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
154
155
156
157
        start_index, end_index, seq_length = self.samples_mapping[idx]
        sample = []
        for index in range(start_index, end_index):
            sample.append(self.indexed_dataset[index])
158
159
160
        # Note that this rng state should be numpy and not python since
        # python randint is inclusive whereas the numpy one is exclusive.
        np_rng = np.random.RandomState(seed=(self.seed + idx))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
161
        return build_training_sample(sample, seq_length,
162
                                     self.max_seq_length, # needed for padding
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
163
164
165
166
                                     self.vocab_id_list,
                                     self.vocab_id_to_token_dict,
                                     self.cls_id, self.sep_id,
                                     self.mask_id, self.pad_id,
167
                                     self.masked_lm_prob, np_rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
168

169

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
170
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
171
172
173

    print_rank_0(' > building dataset index ...')

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
174
175
176
177
    start_time = time.time()
    indexed_dataset = make_indexed_dataset(data_prefix,
                                           data_impl,
                                           skip_warmup)
178
179
180
181
182
183
184
185
186
187
    assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
    print_rank_0(' > finished creating indexed dataset in {:4f} '
                 'seconds'.format(time.time() - start_time))

    print_rank_0(' > indexed dataset stats:')
    print_rank_0('    number of documents: {}'.format(
        indexed_dataset.doc_idx.shape[0] - 1))
    print_rank_0('    number of sentences: {}'.format(
        indexed_dataset.sizes.shape[0]))

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
188
189
190
    return indexed_dataset


191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
def get_train_valid_test_split_(splits_string, size):
    """ Get dataset splits from comma or '/' separated string list."""

    splits = []
    if splits_string.find(',') != -1:
        splits = [float(s) for s in splits_string.split(',')]
    elif splits_string.find('/') != -1:
        splits = [float(s) for s in splits_string.split('/')]
    else:
        splits = [float(splits_string)]
    while len(splits) < 3:
        splits.append(0.)
    splits = splits[:3]
    splits_sum = sum(splits)
    assert splits_sum > 0.0
    splits = [split/splits_sum for split in splits]
    splits_index = [0]
    for index, split in enumerate(splits):
        splits_index.append(splits_index[index] +
                            int(round(split * float(size))))
    diff = splits_index[-1] - size
    for index in range(1, len(splits_index)):
        splits_index[index] -= diff
    assert len(splits_index) == 4
    assert splits_index[-1] == size
    return splits_index


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
219
220
221
222
223
224
def get_samples_mapping_(indexed_dataset,
                         data_prefix,
                         num_epochs,
                         max_num_samples,
                         max_seq_length,
                         short_seq_prob,
225
226
                         seed,
                         name):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
227
    if not num_epochs:
228
        if not max_num_samples:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
229
230
231
232
233
234
235
236
            raise ValueError("Need to specify either max_num_samples "
                             "or num_epochs")
        num_epochs = np.iinfo(np.int32).max - 1
    if not max_num_samples:
        max_num_samples = np.iinfo(np.int64).max - 1

    # Filename of the index mapping
    indexmap_filename = data_prefix
237
238
239
240
241
    indexmap_filename += '_{}_indexmap'.format(name)
    if num_epochs != (np.iinfo(np.int32).max - 1):
        indexmap_filename += '_{}ep'.format(num_epochs)
    if max_num_samples != (np.iinfo(np.int64).max - 1):
        indexmap_filename += '_{}mns'.format(max_num_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
242
243
244
245
246
247
248
249
    indexmap_filename += '_{}msl'.format(max_seq_length)
    indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
    indexmap_filename += '_{}s'.format(seed)
    indexmap_filename += '.npy'

    # Build the indexed mapping if not exist.
    if torch.distributed.get_rank() == 0 and \
       not os.path.isfile(indexmap_filename):
250
        print(' > WARNING: could not find index map file {}, building '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
251
              'the indices on rank 0 ...'.format(indexmap_filename))
252

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
253
254
255
256
257
258
        # Make sure the types match the helpers input types.
        assert indexed_dataset.doc_idx.dtype == np.int64
        assert indexed_dataset.sizes.dtype == np.int32

        # Build samples mapping
        verbose = torch.distributed.get_rank() == 0
259
        start_time = time.time()
260
261
        print_rank_0(' > building sapmles index mapping for {} ...'.format(
            name))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
262
263
264
265
266
267
268
269
270
        samples_mapping = helpers.build_mapping(
            indexed_dataset.doc_idx,
            indexed_dataset.sizes,
            num_epochs,
            max_num_samples,
            max_seq_length-3, # account for added tokens
            short_seq_prob,
            seed,
            verbose)
271
        print_rank_0(' > done building sapmles index maping')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
272
        np.save(indexmap_filename, samples_mapping, allow_pickle=True)
273
274
        print_rank_0(' > saved the index mapping in {}'.format(
            indexmap_filename))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
275
        # Make sure all the ranks have built the mapping
276
        print_rank_0(' > elasped time to build and save samples mapping '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
277
278
                     '(seconds): {:4f}'.format(
                         time.time() - start_time))
279
280
281
282
283
284
285
    # This should be a barrier but nccl barrier assumes
    # device_index=rank which is not the case for model
    # parallel case
    counts = torch.cuda.LongTensor([1])
    torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
    assert counts[0].item() == torch.distributed.get_world_size(
        group=mpu.get_data_parallel_group())
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
286
287

    # Load indexed dataset.
288
    print_rank_0(' > loading indexed mapping from {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
289
290
291
        indexmap_filename))
    start_time = time.time()
    samples_mapping = np.load(indexmap_filename, allow_pickle=True)
292
    print_rank_0('    loaded indexed file in {:3.3f} seconds'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
293
        time.time() - start_time))
294
    print_rank_0('    total number of samples: {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
295
        samples_mapping.shape[0]))
296

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
297
    return samples_mapping