get_mask.py 9.38 KB
Newer Older
1
2
import collections
import logging
mandoxzhang's avatar
mandoxzhang committed
3
4
5
import os
import random
import time
6
7
8
from enum import IntEnum
from random import choice

mandoxzhang's avatar
mandoxzhang committed
9
import jieba
10
11
import torch

mandoxzhang's avatar
mandoxzhang committed
12
13
jieba.setLogLevel(logging.CRITICAL)
import re
14

mandoxzhang's avatar
mandoxzhang committed
15
import mask
16
import numpy as np
mandoxzhang's avatar
mandoxzhang committed
17
18

PAD = 0
19
MaskedLMInstance = collections.namedtuple("MaskedLMInstance", ["index", "label"])
mandoxzhang's avatar
mandoxzhang committed
20
21
22
23
24
25
26


def map_to_numpy(data):
    return np.asarray(data)


class PreTrainingDataset():
27

mandoxzhang's avatar
mandoxzhang committed
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
    def __init__(self,
                 tokenizer,
                 max_seq_length,
                 backend='python',
                 max_predictions_per_seq: int = 80,
                 do_whole_word_mask: bool = True):
        self.tokenizer = tokenizer
        self.max_seq_length = max_seq_length
        self.masked_lm_prob = 0.15
        self.backend = backend
        self.do_whole_word_mask = do_whole_word_mask
        self.max_predictions_per_seq = max_predictions_per_seq
        self.vocab_words = list(tokenizer.vocab.keys())
        self.rec = re.compile('[\u4E00-\u9FA5]')
        self.whole_rec = re.compile('##[\u4E00-\u9FA5]')

        self.mlm_p = 0.15
        self.mlm_mask_p = 0.8
        self.mlm_tamper_p = 0.05
        self.mlm_maintain_p = 0.1

    def tokenize(self, doc):
        temp = []
        for d in doc:
            temp.append(self.tokenizer.tokenize(d))
        return temp

    def create_training_instance(self, instance):
        is_next = 1
57
        raw_text_list = self.get_new_segment(instance)
mandoxzhang's avatar
mandoxzhang committed
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
        tokens_a = raw_text_list
        assert len(tokens_a) == len(instance)
        # tokens_a, tokens_b, is_next = instance.get_values()
        # print(f'is_next label:{is_next}')
        # Create mapper
        tokens = []
        original_tokens = []
        segment_ids = []
        tokens.append("[CLS]")
        original_tokens.append('[CLS]')
        segment_ids.append(0)
        for index, token in enumerate(tokens_a):
            tokens.append(token)
            original_tokens.append(instance[index])
            segment_ids.append(0)

        tokens.append("[SEP]")
        original_tokens.append('[SEP]')
        segment_ids.append(0)

        # for token in tokens_b:
        #     tokens.append(token)
        #     segment_ids.append(1)

        # tokens.append("[SEP]")
        # segment_ids.append(1)

        # Get Masked LM predictions
        if self.backend == 'c++':
87
88
89
            output_tokens, masked_lm_output = mask.create_whole_masked_lm_predictions(
                tokens, original_tokens, self.vocab_words, self.tokenizer.vocab, self.max_predictions_per_seq,
                self.masked_lm_prob)
mandoxzhang's avatar
mandoxzhang committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
        elif self.backend == 'python':
            output_tokens, masked_lm_output = self.create_whole_masked_lm_predictions(tokens)

        # Convert to Ids
        input_ids = self.tokenizer.convert_tokens_to_ids(output_tokens)
        input_mask = [1] * len(input_ids)

        while len(input_ids) < self.max_seq_length:
            input_ids.append(PAD)
            segment_ids.append(PAD)
            input_mask.append(PAD)
            masked_lm_output.append(-1)
        return ([
            map_to_numpy(input_ids),
            map_to_numpy(input_mask),
            map_to_numpy(segment_ids),
            map_to_numpy(masked_lm_output),
107
            map_to_numpy([is_next])
mandoxzhang's avatar
mandoxzhang committed
108
109
110
111
112
113
114
        ])

    def create_masked_lm_predictions(self, tokens):
        cand_indexes = []
        for i, token in enumerate(tokens):
            if token == "[CLS]" or token == "[SEP]":
                continue
115
            if (self.do_whole_word_mask and len(cand_indexes) >= 1 and token.startswith("##")):
mandoxzhang's avatar
mandoxzhang committed
116
117
118
                cand_indexes[-1].append(i)
            else:
                cand_indexes.append([i])
119

mandoxzhang's avatar
mandoxzhang committed
120
121
122
123
124
            # cand_indexes.append(i)

        random.shuffle(cand_indexes)
        output_tokens = list(tokens)

125
        num_to_predict = min(self.max_predictions_per_seq, max(1, int(round(len(tokens) * self.masked_lm_prob))))
mandoxzhang's avatar
mandoxzhang committed
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145

        masked_lms = []
        covered_indexes = set()
        for index in cand_indexes:
            if len(masked_lms) >= num_to_predict:
                break
            if index in covered_indexes:
                continue
            covered_indexes.add(index)

            masked_token = None
            # 80% mask
            if random.random() < 0.8:
                masked_token = "[MASK]"
            else:
                # 10% Keep Original
                if random.random() < 0.5:
                    masked_token = tokens[index]
                # 10% replace w/ random word
                else:
146
                    masked_token = self.vocab_words[random.randint(0, len(self.vocab_words) - 1)]
mandoxzhang's avatar
mandoxzhang committed
147
148

            output_tokens[index] = masked_token
149
            masked_lms.append(MaskedLMInstance(index=index, label=tokens[index]))
mandoxzhang's avatar
mandoxzhang committed
150
151
152
153
154
155
156
157
158
159

        masked_lms = sorted(masked_lms, key=lambda x: x.index)
        masked_lm_output = [-1] * len(output_tokens)
        for p in masked_lms:
            masked_lm_output[p.index] = self.tokenizer.vocab[p.label]

        return (output_tokens, masked_lm_output)

    def get_new_segment(self, segment):
        """
160
161
        Input a sentence, return a processed sentence: In order to support the Chinese whole word mask, the words that are separated will be marked with a special mark ("#"), so that the subsequent processing module can know which words belong to the same word.
        :param segment: a sentence
mandoxzhang's avatar
mandoxzhang committed
162
163
164
165
166
167
        """
        seq_cws = jieba.lcut(''.join(segment))
        seq_cws_dict = {x: 1 for x in seq_cws}
        new_segment = []
        i = 0
        while i < len(segment):
168
            if len(self.rec.findall(segment[i])) == 0:
mandoxzhang's avatar
mandoxzhang committed
169
170
171
172
173
174
175
176
                new_segment.append(segment[i])
                i += 1
                continue

            has_add = False
            for length in range(3, 0, -1):
                if i + length > len(segment):
                    continue
177
                if ''.join(segment[i:i + length]) in seq_cws_dict:
mandoxzhang's avatar
mandoxzhang committed
178
179
                    new_segment.append(segment[i])
                    for l in range(1, length):
180
                        new_segment.append('##' + segment[i + l])
mandoxzhang's avatar
mandoxzhang committed
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
                    i += length
                    has_add = True
                    break
            if not has_add:
                new_segment.append(segment[i])
                i += 1
        return new_segment

    def create_whole_masked_lm_predictions(self, tokens):
        """Creates the predictions for the masked LM objective."""

        cand_indexes = []
        for (i, token) in enumerate(tokens):
            if token == "[CLS]" or token == "[SEP]":
                continue
            # Whole Word Masking means that if we mask all of the wordpieces
            # corresponding to an original word. When a word has been split into
            # WordPieces, the first token does not have any marker and any subsequence
            # tokens are prefixed with ##. So whenever we see the ## token, we
            # append it to the previous set of word indexes.
            #
            # Note that Whole Word Masking does *not* change the training code
            # at all -- we still predict each WordPiece independently, softmaxed
            # over the entire vocabulary.
205
            if (self.do_whole_word_mask and len(cand_indexes) >= 1 and token.startswith("##")):
mandoxzhang's avatar
mandoxzhang committed
206
207
208
209
210
211
                cand_indexes[-1].append(i)
            else:
                cand_indexes.append([i])

        random.shuffle(cand_indexes)

212
        output_tokens = [t[2:] if len(self.whole_rec.findall(t)) > 0 else t for t in tokens]    # 去掉"##"
mandoxzhang's avatar
mandoxzhang committed
213

214
        num_to_predict = min(self.max_predictions_per_seq, max(1, int(round(len(tokens) * self.masked_lm_prob))))
mandoxzhang's avatar
mandoxzhang committed
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241

        masked_lms = []
        covered_indexes = set()
        for index_set in cand_indexes:
            if len(masked_lms) >= num_to_predict:
                break
            # If adding a whole-word mask would exceed the maximum number of
            # predictions, then just skip this candidate.
            if len(masked_lms) + len(index_set) > num_to_predict:
                continue
            is_any_index_covered = False
            for index in index_set:
                if index in covered_indexes:
                    is_any_index_covered = True
                    break
            if is_any_index_covered:
                continue
            for index in index_set:
                covered_indexes.add(index)

                masked_token = None
                # 80% of the time, replace with [MASK]
                if random.random() < 0.8:
                    masked_token = "[MASK]"
                else:
                    # 10% of the time, keep original
                    if random.random() < 0.5:
242
243
                        masked_token = tokens[index][2:] if len(self.whole_rec.findall(
                            tokens[index])) > 0 else tokens[index]    # 去掉"##"
mandoxzhang's avatar
mandoxzhang committed
244
245
246
247
248
249
                    # 10% of the time, replace with random word
                    else:
                        masked_token = self.vocab_words[random.randint(0, len(self.vocab_words) - 1)]

                output_tokens[index] = masked_token

250
251
252
253
                masked_lms.append(
                    MaskedLMInstance(
                        index=index,
                        label=tokens[index][2:] if len(self.whole_rec.findall(tokens[index])) > 0 else tokens[index]))
mandoxzhang's avatar
mandoxzhang committed
254
255
256
257
258
259
260
        assert len(masked_lms) <= num_to_predict
        masked_lms = sorted(masked_lms, key=lambda x: x.index)
        masked_lm_output = [-1] * len(output_tokens)
        for p in masked_lms:
            masked_lm_output[p.index] = self.tokenizer.vocab[p.label]

        return (output_tokens, masked_lm_output)