bert_dataset.py 11.6 KB
Newer Older
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
16
"""BERT Style dataset."""
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
17

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
18
import os
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
19
20
21
22
23
24
import time

import numpy as np
import torch
from torch.utils.data import Dataset

25
from megatron import get_tokenizer
26
from megatron import mpu
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
27
28
from megatron.data import helpers
from megatron.data.dataset_utils import build_training_sample
29
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
30
from megatron.data.ict_dataset import InverseClozeDataset
31
from megatron import print_rank_0
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
32

33

34
35
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
                                    train_valid_test_num_samples,
36
                                    max_seq_length, masked_lm_prob,
37
38
                                    short_seq_prob, seed, skip_warmup,
                                    ict_dataset=False):
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

    # Indexed dataset.
    indexed_dataset = get_indexed_dataset_(data_prefix,
                                           data_impl,
                                           skip_warmup)

    # Get start and end indices of train/valid/train into doc-idx
    # Note that doc-idx is desinged to be num-docs + 1 so we can
    # easily iterate over it.
    total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
    splits = get_train_valid_test_split_(splits_string, total_num_of_documents)

    # Print stats about the splits.
    print_rank_0(' > dataset split:')
    def print_split_stats(name, index):
        print_rank_0('    {}:'.format(name))
        print_rank_0('     document indices in [{}, {}) total of {} '
                     'documents'.format(splits[index], splits[index + 1],
                                        splits[index + 1] - splits[index]))
        start_index = indexed_dataset.doc_idx[splits[index]]
        end_index = indexed_dataset.doc_idx[splits[index + 1]]
        print_rank_0('     sentence indices in [{}, {}) total of {} '
                     'sentences'.format(start_index, end_index,
                                        end_index - start_index))
    print_split_stats('train', 0)
    print_split_stats('validation', 1)
    print_split_stats('test', 2)

    def build_dataset(index, name):
        dataset = None
        if splits[index + 1] > splits[index]:
            # Get the pointer to the original doc-idx so we can set it later.
            doc_idx_ptr = indexed_dataset.get_doc_idx()
            # Slice the doc-idx
            start_index = splits[index]
            # Add +1 so we can index into the dataset to get the upper bound.
            end_index = splits[index + 1] + 1
            # New doc_idx view.
            indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
            # Build the dataset accordingly.
79
            kwargs = dict(
80
81
82
83
84
85
86
                name=name,
                indexed_dataset=indexed_dataset,
                data_prefix=data_prefix,
                num_epochs=None,
                max_num_samples=train_valid_test_num_samples[index],
                max_seq_length=max_seq_length,
                short_seq_prob=short_seq_prob,
87
88
89
90
91
92
93
                seed=seed
            )

            if ict_dataset:
                dataset = InverseClozeDataset(**kwargs)
            else:
                dataset = BertDataset(masked_lm_prob=masked_lm_prob, **kwargs)
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
            # Set the original pointer so dataset remains the main dataset.
            indexed_dataset.set_doc_idx(doc_idx_ptr)
            # Checks.
            assert indexed_dataset.doc_idx[0] == 0
            assert indexed_dataset.doc_idx.shape[0] == \
                (total_num_of_documents + 1)
        return dataset

    train_dataset = build_dataset(0, 'train')
    valid_dataset = build_dataset(1, 'valid')
    test_dataset = build_dataset(2, 'test')

    return (train_dataset, valid_dataset, test_dataset)


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
109
class BertDataset(Dataset):
110

111
    def __init__(self, name, indexed_dataset, data_prefix,
112
113
                 num_epochs, max_num_samples, masked_lm_prob,
                 max_seq_length, short_seq_prob, seed):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
114
115

        # Params to store.
116
        self.name = name
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
117
118
119
120
        self.seed = seed
        self.masked_lm_prob = masked_lm_prob
        self.max_seq_length = max_seq_length

121
        # Dataset.
122
123
        self.indexed_dataset = indexed_dataset

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
124
125

        # Build the samples mapping.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
126
127
128
129
130
131
        self.samples_mapping = get_samples_mapping_(self.indexed_dataset,
                                                    data_prefix,
                                                    num_epochs,
                                                    max_num_samples,
                                                    self.max_seq_length,
                                                    short_seq_prob,
132
133
                                                    self.seed,
                                                    self.name)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
134
135

        # Vocab stuff.
136
137
138
139
140
141
142
        tokenizer = get_tokenizer()
        self.vocab_id_list = list(tokenizer.inv_vocab.keys())
        self.vocab_id_to_token_dict = tokenizer.inv_vocab
        self.cls_id = tokenizer.cls
        self.sep_id = tokenizer.sep
        self.mask_id = tokenizer.mask
        self.pad_id = tokenizer.pad
143

144

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
145
    def __len__(self):
146
        return self.samples_mapping.shape[0]
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
147

148

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
149
    def __getitem__(self, idx):
150

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
151
152
153
154
        start_index, end_index, seq_length = self.samples_mapping[idx]
        sample = []
        for index in range(start_index, end_index):
            sample.append(self.indexed_dataset[index])
155
156
157
        # Note that this rng state should be numpy and not python since
        # python randint is inclusive whereas the numpy one is exclusive.
        np_rng = np.random.RandomState(seed=(self.seed + idx))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
158
        return build_training_sample(sample, seq_length,
159
                                     self.max_seq_length, # needed for padding
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
160
161
162
163
                                     self.vocab_id_list,
                                     self.vocab_id_to_token_dict,
                                     self.cls_id, self.sep_id,
                                     self.mask_id, self.pad_id,
164
                                     self.masked_lm_prob, np_rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
165

166

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
167
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
168
169
170

    print_rank_0(' > building dataset index ...')

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
171
172
173
174
    start_time = time.time()
    indexed_dataset = make_indexed_dataset(data_prefix,
                                           data_impl,
                                           skip_warmup)
175
176
177
178
179
180
181
182
183
184
    assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
    print_rank_0(' > finished creating indexed dataset in {:4f} '
                 'seconds'.format(time.time() - start_time))

    print_rank_0(' > indexed dataset stats:')
    print_rank_0('    number of documents: {}'.format(
        indexed_dataset.doc_idx.shape[0] - 1))
    print_rank_0('    number of sentences: {}'.format(
        indexed_dataset.sizes.shape[0]))

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
185
186
187
    return indexed_dataset


188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def get_train_valid_test_split_(splits_string, size):
    """ Get dataset splits from comma or '/' separated string list."""

    splits = []
    if splits_string.find(',') != -1:
        splits = [float(s) for s in splits_string.split(',')]
    elif splits_string.find('/') != -1:
        splits = [float(s) for s in splits_string.split('/')]
    else:
        splits = [float(splits_string)]
    while len(splits) < 3:
        splits.append(0.)
    splits = splits[:3]
    splits_sum = sum(splits)
    assert splits_sum > 0.0
    splits = [split/splits_sum for split in splits]
    splits_index = [0]
    for index, split in enumerate(splits):
        splits_index.append(splits_index[index] +
                            int(round(split * float(size))))
    diff = splits_index[-1] - size
    for index in range(1, len(splits_index)):
        splits_index[index] -= diff
    assert len(splits_index) == 4
    assert splits_index[-1] == size
    return splits_index


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
216
217
218
219
220
221
def get_samples_mapping_(indexed_dataset,
                         data_prefix,
                         num_epochs,
                         max_num_samples,
                         max_seq_length,
                         short_seq_prob,
222
223
                         seed,
                         name):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
224
    if not num_epochs:
225
        if not max_num_samples:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
226
227
228
229
230
231
232
233
            raise ValueError("Need to specify either max_num_samples "
                             "or num_epochs")
        num_epochs = np.iinfo(np.int32).max - 1
    if not max_num_samples:
        max_num_samples = np.iinfo(np.int64).max - 1

    # Filename of the index mapping
    indexmap_filename = data_prefix
234
235
236
237
238
    indexmap_filename += '_{}_indexmap'.format(name)
    if num_epochs != (np.iinfo(np.int32).max - 1):
        indexmap_filename += '_{}ep'.format(num_epochs)
    if max_num_samples != (np.iinfo(np.int64).max - 1):
        indexmap_filename += '_{}mns'.format(max_num_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
239
240
241
242
243
244
245
246
    indexmap_filename += '_{}msl'.format(max_seq_length)
    indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
    indexmap_filename += '_{}s'.format(seed)
    indexmap_filename += '.npy'

    # Build the indexed mapping if not exist.
    if torch.distributed.get_rank() == 0 and \
       not os.path.isfile(indexmap_filename):
247
        print(' > WARNING: could not find index map file {}, building '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
248
              'the indices on rank 0 ...'.format(indexmap_filename))
249

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
250
251
252
253
254
255
        # Make sure the types match the helpers input types.
        assert indexed_dataset.doc_idx.dtype == np.int64
        assert indexed_dataset.sizes.dtype == np.int32

        # Build samples mapping
        verbose = torch.distributed.get_rank() == 0
256
        start_time = time.time()
257
258
        print_rank_0(' > building sapmles index mapping for {} ...'.format(
            name))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
259
260
261
262
263
264
265
266
267
        samples_mapping = helpers.build_mapping(
            indexed_dataset.doc_idx,
            indexed_dataset.sizes,
            num_epochs,
            max_num_samples,
            max_seq_length-3, # account for added tokens
            short_seq_prob,
            seed,
            verbose)
268
        print_rank_0(' > done building sapmles index maping')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
269
        np.save(indexmap_filename, samples_mapping, allow_pickle=True)
270
271
        print_rank_0(' > saved the index mapping in {}'.format(
            indexmap_filename))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
272
        # Make sure all the ranks have built the mapping
273
        print_rank_0(' > elasped time to build and save samples mapping '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
274
275
                     '(seconds): {:4f}'.format(
                         time.time() - start_time))
276
277
278
279
280
281
282
    # This should be a barrier but nccl barrier assumes
    # device_index=rank which is not the case for model
    # parallel case
    counts = torch.cuda.LongTensor([1])
    torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
    assert counts[0].item() == torch.distributed.get_world_size(
        group=mpu.get_data_parallel_group())
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
283
284

    # Load indexed dataset.
285
    print_rank_0(' > loading indexed mapping from {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
286
287
288
        indexmap_filename))
    start_time = time.time()
    samples_mapping = np.load(indexmap_filename, allow_pickle=True)
289
    print_rank_0('    loaded indexed file in {:3.3f} seconds'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
290
        time.time() - start_time))
291
    print_rank_0('    total number of samples: {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
292
        samples_mapping.shape[0]))
293

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
294
    return samples_mapping