dataset.py 11 KB
Newer Older
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
1
2
3
4
5
6
7
8
9
"""TO BE ADDED """

import random
import time

import numpy as np
import torch
from torch.utils.data import Dataset

10
from .dataset_utils import build_training_sample
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
11
#from data.mapping import build_training_samples_mapping
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
12

13
14
from . import helpers
from megatron.data import FullBertTokenizer, indexed_dataset
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
15

16
17
18
19

class AlbertDataset(Dataset):

    def __init__(self, indexed_dataset, tokenizer, num_epochs, max_num_samples,
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
20
21
22
23
24
25
                 masked_lm_prob, max_seq_length, short_seq_prob, seed):

        # Params to store.
        self.seed = seed
        self.masked_lm_prob = masked_lm_prob
        self.max_seq_length = max_seq_length
26
        self.tokenizer = tokenizer
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
27
28
29
30
31

        # Indexed dataset.
        self.indexed_dataset = indexed_dataset

        # Build the samples mapping.
32
33
34
35
36
        if not max_num_samples:
            max_num_samples = len(indexed_dataset) * num_epochs
        self.samples_mapping = helpers.build_mapping(
            indexed_dataset.doc_idx,
            indexed_dataset.sizes,
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
37
            num_epochs,
38
39
            max_num_samples,
            self.max_seq_length-3, # account for added tokens
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
40
41
42
43
44
45
46
47
48
49
50
51
            short_seq_prob,
            self.seed)

        # Vocab stuff.
        self.vocab_id_list = list(tokenizer.inv_vocab.keys())
        self.vocab_id_to_token_dict = tokenizer.inv_vocab
        self.cls_id = tokenizer.vocab['[CLS]']
        self.sep_id = tokenizer.vocab['[SEP]']
        self.mask_id = tokenizer.vocab['[MASK]']
        self.pad_id = tokenizer.vocab['[PAD]']


52
53
54
55
56
57
58
59
60
    @classmethod
    def from_paths(cls, vocab, data_prefix, data_impl,
                   num_epochs, max_num_samples, masked_lm_prob,
                   max_seq_length, short_seq_prob, seed):
        tokenizer = FullBertTokenizer(vocab, do_lower_case=True)
        idx_ds = indexed_dataset.make_dataset(data_prefix, data_impl)
        return cls(idx_ds, tokenizer, num_epochs, max_num_samples, masked_lm_prob,
                   max_seq_length, short_seq_prob, seed)

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
61
    def __len__(self):
62
        return self.samples_mapping.shape[0]
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
63
64

    def __getitem__(self, idx):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
65
66
67
68
69
        rng = random.Random(self.seed + idx)
        start_index, end_index, seq_length = self.samples_mapping[idx]
        sample = []
        for index in range(start_index, end_index):
            sample.append(self.indexed_dataset[index])
70
71
72
        for s in sample:
            if len(s) > 1000:
                print(self.tokenizer.convert_ids_to_tokens(s))
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
73
74
75
76
77
78
79
        return build_training_sample(sample, seq_length,
                                     self.max_seq_length,
                                     self.vocab_id_list,
                                     self.vocab_id_to_token_dict,
                                     self.cls_id, self.sep_id,
                                     self.mask_id, self.pad_id,
                                     self.masked_lm_prob, rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
80

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
81
'''
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
def get_target_seq_length(max_num_tokens, short_seq_prob, np_rng):
    """With probability `short_seq_prob` generate a smaller sequence lenght."""
    if np_rng.random() < short_seq_prob:
        return np_rng.randint(2, max_num_tokens + 1)
    return max_num_tokens


def build_training_samples_mapping(indexed_dataset, num_epochs, max_seq_length,
                                   short_seq_prob, seed):
    """Build a mapping to reconstruct training samples."""

    start_time = time.time()
    print('> building training samples mapping ...')

    # RNG:
    np_rng = np.random.RandomState(seed=seed)

    # List of start sentence index and end sentence index (end is exclusive)
    # to retrieve.
    samples = []

    # Account for [CLS], [SEP], [SEP]
    max_num_tokens = max_seq_length - 3

    # Number of documents processed:
    total_docs = 0
    # Number of documents that are skipped:
    skipped_docs = 0
    # Number of empty documents:
    empty_docs = 0

    # For each epoch:
    for epoch in range(num_epochs):
        # For each document:
        for doc_index in range(indexed_dataset.num_docs):
            if epoch == 0:
                total_docs += 1

            # Document sentences are in [sent_index_first, sent_index_last).
            sent_index_first = indexed_dataset.doc_idx[doc_index]
            sent_index_last = indexed_dataset.doc_idx[doc_index+1]
123
            assert sent_index_last >= sent_index_first
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142

            # Empty docs.
            if (sent_index_last - sent_index_first) == 0:
                if epoch == 0:
                    print('***WARNING*** document {} is empty'.format(
                        doc_index))
                    empty_docs += 1
                continue
            # Skip documents that only have one sentences.
            if (sent_index_last - sent_index_first) == 1:
                if epoch == 0:
                    print('***WARNING*** document {} has only one sentnece, '
                          'skipping ...'.format(doc_index))
                    skipped_docs += 1
                continue

            # Loop through sentences.
            sent_index = sent_index_first
            target_seq_length = get_target_seq_length(max_num_tokens,
143
                                                      short_seq_prob, np_rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
144
145
146
147
            size = 0
            while sent_index < sent_index_last:

                # Get the size.
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
148
                assert indexed_dataset.sizes[sent_index] > 0
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
149
150
151
152
153
154
155
                size += indexed_dataset.sizes[sent_index]
                sent_index += 1

                # If we have reached the target length.
                exceeded_target_size = (size >= target_seq_length)
                # If only one sentence is left in the document.
                only_one_sent_left = (sent_index == (sent_index_last - 1))
156
157
                # If we have at least two sentneces.
                have_more_than_one_sent = (sent_index - sent_index_first) > 1
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
158
159
                # If we have reached end of the document.
                reached_end_of_doc = (sent_index == sent_index_last)
160
161
                if (exceeded_target_size and not only_one_sent_left and
                    have_more_than_one_sent) or reached_end_of_doc:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
162
163
164
                    assert (sent_index - sent_index_first) > 1
                    assert size > 1
                    # Add the sample.
165
166
                    samples.append([sent_index_first, sent_index,
                                    target_seq_length])
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
167
168
169
170
                    # Reset indices
                    sent_index_first = sent_index
                    target_seq_length = get_target_seq_length(max_num_tokens,
                                                              short_seq_prob,
171
                                                              np_rng)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
                    size = 0
                    num_sentences = 0

    # Convert to numpy array.
    samples_np = np.array(samples, dtype=np.int64)
    # Shuffle.
    np_rng.shuffle(samples_np)
    elapsed_time = time.time() - start_time

    # Print some stats:
    print('\n***************************** info *****************************')
    print('   elapsed time (sec) ..................... {}'.format(elapsed_time))
    print('   number of epochs ....................... {}'.format(num_epochs))
    print('   number of samples ...................... {}'.format(
        samples_np.shape[0]))
    print('   number of documents .................... {}'.format(total_docs))
    print('   number of empty documents .............. {}'.format(empty_docs))
    print('   number of documents with one sentence .. {}'.format(skipped_docs))
    print('****************************************************************\n')

    return samples_np
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
193
'''
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
194

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
195
196
# WILL BE REPLACED WITH JARED'S
class JaredDataset(object):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
197

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
198
199
200
201
202
    def __init__(self, doc_idx, sizes, sentences):
        self.doc_idx = doc_idx
        self.num_docs = len(self.doc_idx) - 1
        self.sizes = sizes
        self.sentences = sentences
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
203
204

    def __getitem__(self, idx):
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
205
        return self.sentences[idx]
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
206
207
208
209
210



if __name__ == '__main__':
    print('dataset ...')
211
212
213
214
215
216
217
218
219
220
221
222
223
224

    from bert_tokenization import FullTokenizer
    import json
    import nltk
    nltk.download('punkt')

    def document_generator_provider(input_file):
        with open(input_file, 'r') as ifile:
            for document in ifile:
                data = json.loads(document)
                text = data['text']
                sentences = []
                for line in text.split('\n'):
                    if line != '\n':
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
225
226
227
                        sent = nltk.tokenize.sent_tokenize(line)
                        if sent:
                            sentences.extend(sent)
228
229
                yield sentences

230
231
    input_file = 'test/samples_10000.json'
    vocab_file = 'test/vocab.txt'
232
233
234
235
236
237
238
239
240

    tokenizer = FullTokenizer(vocab_file, do_lower_case=True)
    document_generator = document_generator_provider(input_file)

    doc_idx = [0]
    sizes = []
    sentences_list = []

    for sentences in document_generator:
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
241
        num_sent = 0
242
243
        for sentence in sentences:
            tokens = tokenizer.tokenize(sentence)
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
244
245
246
247
248
249
250
251
252
253
254
255
            if tokens:
                ids = tokenizer.convert_tokens_to_ids(tokens)
                if len(ids) == 0:
                    print('****************')
                    print(sentence)
                    print(tokens)
                    print(ids)
                    print('****************')
                sizes.append(len(ids))
                sentences_list.append(ids)
                num_sent += 1
        doc_idx.append(num_sent)
256
257
258
    for i in range(1, len(doc_idx)):
        doc_idx[i] += doc_idx[i-1]

Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
    #max_size = np.iinfo(np.int32).max // 32

    import time

    docs_np = np.array(doc_idx, dtype=np.uint32)
    sizes_np = np.array(sizes, dtype=np.uint16)

    start_time = time.time()
    max_seq_length = 512
    max_size = docs_np.shape[0]
    lens = np.full(max_size, max_seq_length-3, dtype=np.uint16)
    lens_rand = np.random.randint(low=2, high=(max_seq_length-2),
                                  size=max_size//10, dtype=np.uint16)
    lens_view = lens[:max_size//10]
    np.copyto(lens_view, lens_rand)
    np.random.shuffle(lens)
    print('num docs', max_size)
    print('lens time', time.time() - start_time)

    import helpers
    start_time = time.time()
    maps = helpers.build_mapping(docs_np, sizes_np, 10, 100, 509, 0.1, 1234)
    print('maps time', time.time() - start_time)
    print(maps)
    exit()

    start_time = time.time()
    max_size = 10 #np.iinfo(np.int32).max 32
    docs = np.arange(10, dtype=np.uint32)
    print(docs)

    a = example.doit(docs, max_size)
    print(type(a))
    print(a.shape)
    print(a)
    print(time.time() - start_time)
    exit()


    #start_time = time.time()
    count = doit(maps, docs_np, sizes_np, lens,docs_np.shape[0]-1, 10)
    print(count)
    maps = maps[:count]
    np.random.shuffle(maps)
    print(time.time() - start_time)


    exit()

308
309
310
    indexed_dataset = JaredDataset(doc_idx, sizes, sentences_list)
    dataset = AlbertDataSet(indexed_dataset=indexed_dataset,
                            tokenizer=tokenizer,
Mohammad Shoeybi's avatar
Mohammad Shoeybi committed
311
                            num_epochs=10,
312
313
314
315
                            masked_lm_prob=0.15,
                            max_seq_length=512,
                            short_seq_prob=0.1,
                            seed=1234)