bert_dataset.py 11.5 KB
Newer Older
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
16
"""BERT Style dataset."""
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
17

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
18
import os
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
19
20
21
22
23
24
import time

import numpy as np
import torch
from torch.utils.data import Dataset

25
from megatron import get_tokenizer
26
from megatron import mpu
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
27
from megatron.data.dataset_utils import build_training_sample
28
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
29
from megatron import print_rank_0
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
30

31

32
33
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
                                    train_valid_test_num_samples,
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
                                    max_seq_length, masked_lm_prob,
                                    short_seq_prob, seed, skip_warmup):

    # Indexed dataset.
    indexed_dataset = get_indexed_dataset_(data_prefix,
                                           data_impl,
                                           skip_warmup)

    # Get start and end indices of train/valid/train into doc-idx
    # Note that doc-idx is desinged to be num-docs + 1 so we can
    # easily iterate over it.
    total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
    splits = get_train_valid_test_split_(splits_string, total_num_of_documents)

    # Print stats about the splits.
    print_rank_0(' > dataset split:')
Neel Kant's avatar
Neel Kant committed
50

51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
    def print_split_stats(name, index):
        print_rank_0('    {}:'.format(name))
        print_rank_0('     document indices in [{}, {}) total of {} '
                     'documents'.format(splits[index], splits[index + 1],
                                        splits[index + 1] - splits[index]))
        start_index = indexed_dataset.doc_idx[splits[index]]
        end_index = indexed_dataset.doc_idx[splits[index + 1]]
        print_rank_0('     sentence indices in [{}, {}) total of {} '
                     'sentences'.format(start_index, end_index,
                                        end_index - start_index))
    print_split_stats('train', 0)
    print_split_stats('validation', 1)
    print_split_stats('test', 2)

    def build_dataset(index, name):
        dataset = None
        if splits[index + 1] > splits[index]:
            # Get the pointer to the original doc-idx so we can set it later.
            doc_idx_ptr = indexed_dataset.get_doc_idx()
            # Slice the doc-idx
            start_index = splits[index]
            # Add +1 so we can index into the dataset to get the upper bound.
            end_index = splits[index + 1] + 1
            # New doc_idx view.
            indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
            # Build the dataset accordingly.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
77
            dataset = BertDataset(
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
                name=name,
                indexed_dataset=indexed_dataset,
                data_prefix=data_prefix,
                num_epochs=None,
                max_num_samples=train_valid_test_num_samples[index],
                masked_lm_prob=masked_lm_prob,
                max_seq_length=max_seq_length,
                short_seq_prob=short_seq_prob,
                seed=seed)
            # Set the original pointer so dataset remains the main dataset.
            indexed_dataset.set_doc_idx(doc_idx_ptr)
            # Checks.
            assert indexed_dataset.doc_idx[0] == 0
            assert indexed_dataset.doc_idx.shape[0] == \
                (total_num_of_documents + 1)
        return dataset

    train_dataset = build_dataset(0, 'train')
    valid_dataset = build_dataset(1, 'valid')
    test_dataset = build_dataset(2, 'test')

    return (train_dataset, valid_dataset, test_dataset)


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
102
class BertDataset(Dataset):
103

104
    def __init__(self, name, indexed_dataset, data_prefix,
105
106
                 num_epochs, max_num_samples, masked_lm_prob,
                 max_seq_length, short_seq_prob, seed):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
107
108

        # Params to store.
109
        self.name = name
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
110
111
112
113
        self.seed = seed
        self.masked_lm_prob = masked_lm_prob
        self.max_seq_length = max_seq_length

114
        # Dataset.
115
116
        self.indexed_dataset = indexed_dataset

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
117
        # Build the samples mapping.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
118
119
120
121
122
123
        self.samples_mapping = get_samples_mapping_(self.indexed_dataset,
                                                    data_prefix,
                                                    num_epochs,
                                                    max_num_samples,
                                                    self.max_seq_length,
                                                    short_seq_prob,
124
125
                                                    self.seed,
                                                    self.name)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
126
127

        # Vocab stuff.
128
129
130
131
132
133
134
        tokenizer = get_tokenizer()
        self.vocab_id_list = list(tokenizer.inv_vocab.keys())
        self.vocab_id_to_token_dict = tokenizer.inv_vocab
        self.cls_id = tokenizer.cls
        self.sep_id = tokenizer.sep
        self.mask_id = tokenizer.mask
        self.pad_id = tokenizer.pad
135

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
136
    def __len__(self):
137
        return self.samples_mapping.shape[0]
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
138
139

    def __getitem__(self, idx):
140

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
141
142
143
144
        start_index, end_index, seq_length = self.samples_mapping[idx]
        sample = []
        for index in range(start_index, end_index):
            sample.append(self.indexed_dataset[index])
145
146
147
        # Note that this rng state should be numpy and not python since
        # python randint is inclusive whereas the numpy one is exclusive.
        np_rng = np.random.RandomState(seed=(self.seed + idx))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
148
        return build_training_sample(sample, seq_length,
Neel Kant's avatar
Neel Kant committed
149
                                     self.max_seq_length,  # needed for padding
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
150
151
152
153
                                     self.vocab_id_list,
                                     self.vocab_id_to_token_dict,
                                     self.cls_id, self.sep_id,
                                     self.mask_id, self.pad_id,
154
                                     self.masked_lm_prob, np_rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
155

156

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
157
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
158
159
160

    print_rank_0(' > building dataset index ...')

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
161
162
163
164
    start_time = time.time()
    indexed_dataset = make_indexed_dataset(data_prefix,
                                           data_impl,
                                           skip_warmup)
165
166
167
168
169
170
171
172
173
174
    assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
    print_rank_0(' > finished creating indexed dataset in {:4f} '
                 'seconds'.format(time.time() - start_time))

    print_rank_0(' > indexed dataset stats:')
    print_rank_0('    number of documents: {}'.format(
        indexed_dataset.doc_idx.shape[0] - 1))
    print_rank_0('    number of sentences: {}'.format(
        indexed_dataset.sizes.shape[0]))

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
175
176
177
    return indexed_dataset


178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
def get_train_valid_test_split_(splits_string, size):
    """ Get dataset splits from comma or '/' separated string list."""

    splits = []
    if splits_string.find(',') != -1:
        splits = [float(s) for s in splits_string.split(',')]
    elif splits_string.find('/') != -1:
        splits = [float(s) for s in splits_string.split('/')]
    else:
        splits = [float(splits_string)]
    while len(splits) < 3:
        splits.append(0.)
    splits = splits[:3]
    splits_sum = sum(splits)
    assert splits_sum > 0.0
Neel Kant's avatar
Neel Kant committed
193
    splits = [split / splits_sum for split in splits]
194
195
196
197
198
199
200
201
202
203
204
205
    splits_index = [0]
    for index, split in enumerate(splits):
        splits_index.append(splits_index[index] +
                            int(round(split * float(size))))
    diff = splits_index[-1] - size
    for index in range(1, len(splits_index)):
        splits_index[index] -= diff
    assert len(splits_index) == 4
    assert splits_index[-1] == size
    return splits_index


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
206
207
208
209
210
211
def get_samples_mapping_(indexed_dataset,
                         data_prefix,
                         num_epochs,
                         max_num_samples,
                         max_seq_length,
                         short_seq_prob,
212
213
                         seed,
                         name):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
214
    if not num_epochs:
215
        if not max_num_samples:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
216
217
218
219
220
221
222
223
            raise ValueError("Need to specify either max_num_samples "
                             "or num_epochs")
        num_epochs = np.iinfo(np.int32).max - 1
    if not max_num_samples:
        max_num_samples = np.iinfo(np.int64).max - 1

    # Filename of the index mapping
    indexmap_filename = data_prefix
224
225
226
227
228
    indexmap_filename += '_{}_indexmap'.format(name)
    if num_epochs != (np.iinfo(np.int32).max - 1):
        indexmap_filename += '_{}ep'.format(num_epochs)
    if max_num_samples != (np.iinfo(np.int64).max - 1):
        indexmap_filename += '_{}mns'.format(max_num_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
229
230
231
232
233
234
235
236
    indexmap_filename += '_{}msl'.format(max_seq_length)
    indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
    indexmap_filename += '_{}s'.format(seed)
    indexmap_filename += '.npy'

    # Build the indexed mapping if not exist.
    if torch.distributed.get_rank() == 0 and \
       not os.path.isfile(indexmap_filename):
237
        print(' > WARNING: could not find index map file {}, building '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
238
              'the indices on rank 0 ...'.format(indexmap_filename))
239

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
240
241
242
243
244
245
        # Make sure the types match the helpers input types.
        assert indexed_dataset.doc_idx.dtype == np.int64
        assert indexed_dataset.sizes.dtype == np.int32

        # Build samples mapping
        verbose = torch.distributed.get_rank() == 0
246
        start_time = time.time()
247
248
        print_rank_0(' > building sapmles index mapping for {} ...'.format(
            name))
249
250
251
        # First compile and then import.
        from megatron.data.dataset_utils import compile_helper
        compile_helper()
Mohammad's avatar
Mohammad committed
252
        from megatron.data import helpers
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
253
254
255
256
257
        samples_mapping = helpers.build_mapping(
            indexed_dataset.doc_idx,
            indexed_dataset.sizes,
            num_epochs,
            max_num_samples,
Neel Kant's avatar
Neel Kant committed
258
            max_seq_length - 3,  # account for added tokens
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
259
260
261
            short_seq_prob,
            seed,
            verbose)
262
        print_rank_0(' > done building sapmles index maping')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
263
        np.save(indexmap_filename, samples_mapping, allow_pickle=True)
264
265
        print_rank_0(' > saved the index mapping in {}'.format(
            indexmap_filename))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
266
        # Make sure all the ranks have built the mapping
267
        print_rank_0(' > elasped time to build and save samples mapping '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
268
269
                     '(seconds): {:4f}'.format(
                         time.time() - start_time))
270
271
272
273
274
275
276
    # This should be a barrier but nccl barrier assumes
    # device_index=rank which is not the case for model
    # parallel case
    counts = torch.cuda.LongTensor([1])
    torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
    assert counts[0].item() == torch.distributed.get_world_size(
        group=mpu.get_data_parallel_group())
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
277
278

    # Load indexed dataset.
279
    print_rank_0(' > loading indexed mapping from {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
280
281
282
        indexmap_filename))
    start_time = time.time()
    samples_mapping = np.load(indexmap_filename, allow_pickle=True)
283
    print_rank_0('    loaded indexed file in {:3.3f} seconds'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
284
        time.time() - start_time))
285
    print_rank_0('    total number of samples: {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
286
        samples_mapping.shape[0]))
287

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
288
    return samples_mapping