"examples/community/test_onnx_controlnet.py" did not exist on "5990014700060912d7248970b3969a9d91dfc026"
bert_dataset.py 11.8 KB
Newer Older
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
16
"""BERT Style dataset."""
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
17

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
18
import os
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
19
20
21
22
23
24
import time

import numpy as np
import torch
from torch.utils.data import Dataset

25
from megatron import get_tokenizer
26
from megatron import mpu
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
27
28
from megatron.data import helpers
from megatron.data.dataset_utils import build_training_sample
29
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
30
from megatron.data.ict_dataset import InverseClozeDataset
31
from megatron import print_rank_0
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
32

33

34
35
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
                                    train_valid_test_num_samples,
36
                                    max_seq_length, masked_lm_prob,
37
38
                                    short_seq_prob, seed, skip_warmup,
                                    ict_dataset=False):
39
40
41
42
43
44

    # Indexed dataset.
    indexed_dataset = get_indexed_dataset_(data_prefix,
                                           data_impl,
                                           skip_warmup)

45
46
47
48
49
    if ict_dataset:
        titles_dataset = get_indexed_dataset_(data_prefix + '-titles',
                                              data_impl,
                                              skip_warmup)

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
    # Get start and end indices of train/valid/train into doc-idx
    # Note that doc-idx is desinged to be num-docs + 1 so we can
    # easily iterate over it.
    total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
    splits = get_train_valid_test_split_(splits_string, total_num_of_documents)

    # Print stats about the splits.
    print_rank_0(' > dataset split:')
    def print_split_stats(name, index):
        print_rank_0('    {}:'.format(name))
        print_rank_0('     document indices in [{}, {}) total of {} '
                     'documents'.format(splits[index], splits[index + 1],
                                        splits[index + 1] - splits[index]))
        start_index = indexed_dataset.doc_idx[splits[index]]
        end_index = indexed_dataset.doc_idx[splits[index + 1]]
        print_rank_0('     sentence indices in [{}, {}) total of {} '
                     'sentences'.format(start_index, end_index,
                                        end_index - start_index))
    print_split_stats('train', 0)
    print_split_stats('validation', 1)
    print_split_stats('test', 2)

    def build_dataset(index, name):
        dataset = None
        if splits[index + 1] > splits[index]:
            # Get the pointer to the original doc-idx so we can set it later.
            doc_idx_ptr = indexed_dataset.get_doc_idx()
            # Slice the doc-idx
            start_index = splits[index]
            # Add +1 so we can index into the dataset to get the upper bound.
            end_index = splits[index + 1] + 1
            # New doc_idx view.
            indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
            # Build the dataset accordingly.
84
            kwargs = dict(
85
                name=name,
86
                context_dataset=indexed_dataset,
87
88
89
90
91
                data_prefix=data_prefix,
                num_epochs=None,
                max_num_samples=train_valid_test_num_samples[index],
                max_seq_length=max_seq_length,
                short_seq_prob=short_seq_prob,
92
93
94
95
                seed=seed
            )

            if ict_dataset:
96
                dataset = InverseClozeDataset(titles_dataset=titles_dataset, **kwargs)
97
98
            else:
                dataset = BertDataset(masked_lm_prob=masked_lm_prob, **kwargs)
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
            # Set the original pointer so dataset remains the main dataset.
            indexed_dataset.set_doc_idx(doc_idx_ptr)
            # Checks.
            assert indexed_dataset.doc_idx[0] == 0
            assert indexed_dataset.doc_idx.shape[0] == \
                (total_num_of_documents + 1)
        return dataset

    train_dataset = build_dataset(0, 'train')
    valid_dataset = build_dataset(1, 'valid')
    test_dataset = build_dataset(2, 'test')

    return (train_dataset, valid_dataset, test_dataset)


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
114
class BertDataset(Dataset):
115

116
    def __init__(self, name, indexed_dataset, data_prefix,
117
118
                 num_epochs, max_num_samples, masked_lm_prob,
                 max_seq_length, short_seq_prob, seed):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
119
120

        # Params to store.
121
        self.name = name
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
122
123
124
125
        self.seed = seed
        self.masked_lm_prob = masked_lm_prob
        self.max_seq_length = max_seq_length

126
        # Dataset.
127
128
        self.indexed_dataset = indexed_dataset

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
129
130

        # Build the samples mapping.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
131
132
133
134
135
136
        self.samples_mapping = get_samples_mapping_(self.indexed_dataset,
                                                    data_prefix,
                                                    num_epochs,
                                                    max_num_samples,
                                                    self.max_seq_length,
                                                    short_seq_prob,
137
138
                                                    self.seed,
                                                    self.name)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
139
140

        # Vocab stuff.
141
142
143
144
145
146
147
        tokenizer = get_tokenizer()
        self.vocab_id_list = list(tokenizer.inv_vocab.keys())
        self.vocab_id_to_token_dict = tokenizer.inv_vocab
        self.cls_id = tokenizer.cls
        self.sep_id = tokenizer.sep
        self.mask_id = tokenizer.mask
        self.pad_id = tokenizer.pad
148

149

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
150
    def __len__(self):
151
        return self.samples_mapping.shape[0]
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
152

153

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
154
    def __getitem__(self, idx):
155

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
156
157
158
159
        start_index, end_index, seq_length = self.samples_mapping[idx]
        sample = []
        for index in range(start_index, end_index):
            sample.append(self.indexed_dataset[index])
160
161
162
        # Note that this rng state should be numpy and not python since
        # python randint is inclusive whereas the numpy one is exclusive.
        np_rng = np.random.RandomState(seed=(self.seed + idx))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
163
        return build_training_sample(sample, seq_length,
164
                                     self.max_seq_length, # needed for padding
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
165
166
167
168
                                     self.vocab_id_list,
                                     self.vocab_id_to_token_dict,
                                     self.cls_id, self.sep_id,
                                     self.mask_id, self.pad_id,
169
                                     self.masked_lm_prob, np_rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
170

171

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
172
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
173
174
175

    print_rank_0(' > building dataset index ...')

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
176
177
178
179
    start_time = time.time()
    indexed_dataset = make_indexed_dataset(data_prefix,
                                           data_impl,
                                           skip_warmup)
180
181
182
183
184
185
186
187
188
189
    assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
    print_rank_0(' > finished creating indexed dataset in {:4f} '
                 'seconds'.format(time.time() - start_time))

    print_rank_0(' > indexed dataset stats:')
    print_rank_0('    number of documents: {}'.format(
        indexed_dataset.doc_idx.shape[0] - 1))
    print_rank_0('    number of sentences: {}'.format(
        indexed_dataset.sizes.shape[0]))

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
190
191
192
    return indexed_dataset


193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
def get_train_valid_test_split_(splits_string, size):
    """ Get dataset splits from comma or '/' separated string list."""

    splits = []
    if splits_string.find(',') != -1:
        splits = [float(s) for s in splits_string.split(',')]
    elif splits_string.find('/') != -1:
        splits = [float(s) for s in splits_string.split('/')]
    else:
        splits = [float(splits_string)]
    while len(splits) < 3:
        splits.append(0.)
    splits = splits[:3]
    splits_sum = sum(splits)
    assert splits_sum > 0.0
    splits = [split/splits_sum for split in splits]
    splits_index = [0]
    for index, split in enumerate(splits):
        splits_index.append(splits_index[index] +
                            int(round(split * float(size))))
    diff = splits_index[-1] - size
    for index in range(1, len(splits_index)):
        splits_index[index] -= diff
    assert len(splits_index) == 4
    assert splits_index[-1] == size
    return splits_index


Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
221
222
223
224
225
226
def get_samples_mapping_(indexed_dataset,
                         data_prefix,
                         num_epochs,
                         max_num_samples,
                         max_seq_length,
                         short_seq_prob,
227
228
                         seed,
                         name):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
229
    if not num_epochs:
230
        if not max_num_samples:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
231
232
233
234
235
236
237
238
            raise ValueError("Need to specify either max_num_samples "
                             "or num_epochs")
        num_epochs = np.iinfo(np.int32).max - 1
    if not max_num_samples:
        max_num_samples = np.iinfo(np.int64).max - 1

    # Filename of the index mapping
    indexmap_filename = data_prefix
239
240
241
242
243
    indexmap_filename += '_{}_indexmap'.format(name)
    if num_epochs != (np.iinfo(np.int32).max - 1):
        indexmap_filename += '_{}ep'.format(num_epochs)
    if max_num_samples != (np.iinfo(np.int64).max - 1):
        indexmap_filename += '_{}mns'.format(max_num_samples)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
244
245
246
247
248
249
250
251
    indexmap_filename += '_{}msl'.format(max_seq_length)
    indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
    indexmap_filename += '_{}s'.format(seed)
    indexmap_filename += '.npy'

    # Build the indexed mapping if not exist.
    if torch.distributed.get_rank() == 0 and \
       not os.path.isfile(indexmap_filename):
252
        print(' > WARNING: could not find index map file {}, building '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
253
              'the indices on rank 0 ...'.format(indexmap_filename))
254

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
255
256
257
258
259
260
        # Make sure the types match the helpers input types.
        assert indexed_dataset.doc_idx.dtype == np.int64
        assert indexed_dataset.sizes.dtype == np.int32

        # Build samples mapping
        verbose = torch.distributed.get_rank() == 0
261
        start_time = time.time()
262
263
        print_rank_0(' > building sapmles index mapping for {} ...'.format(
            name))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
264
265
266
267
268
269
270
271
272
        samples_mapping = helpers.build_mapping(
            indexed_dataset.doc_idx,
            indexed_dataset.sizes,
            num_epochs,
            max_num_samples,
            max_seq_length-3, # account for added tokens
            short_seq_prob,
            seed,
            verbose)
273
        print_rank_0(' > done building sapmles index maping')
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
274
        np.save(indexmap_filename, samples_mapping, allow_pickle=True)
275
276
        print_rank_0(' > saved the index mapping in {}'.format(
            indexmap_filename))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
277
        # Make sure all the ranks have built the mapping
278
        print_rank_0(' > elasped time to build and save samples mapping '
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
279
280
                     '(seconds): {:4f}'.format(
                         time.time() - start_time))
281
282
283
284
285
286
287
    # This should be a barrier but nccl barrier assumes
    # device_index=rank which is not the case for model
    # parallel case
    counts = torch.cuda.LongTensor([1])
    torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
    assert counts[0].item() == torch.distributed.get_world_size(
        group=mpu.get_data_parallel_group())
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
288
289

    # Load indexed dataset.
290
    print_rank_0(' > loading indexed mapping from {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
291
292
293
        indexmap_filename))
    start_time = time.time()
    samples_mapping = np.load(indexmap_filename, allow_pickle=True)
294
    print_rank_0('    loaded indexed file in {:3.3f} seconds'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
295
        time.time() - start_time))
296
    print_rank_0('    total number of samples: {}'.format(
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
297
        samples_mapping.shape[0]))
298

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
299
    return samples_mapping