get_mask.py 9.52 KB
Newer Older
mandoxzhang's avatar
mandoxzhang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
import torch
import os
from enum import IntEnum
from random import choice
import random
import collections
import time
import logging
import jieba
jieba.setLogLevel(logging.CRITICAL)
import re
import numpy as np
import mask

PAD = 0
MaskedLMInstance = collections.namedtuple("MaskedLMInstance",
                                          ["index", "label"])


def map_to_numpy(data):
    return np.asarray(data)


class PreTrainingDataset():
    def __init__(self,
                 tokenizer,
                 max_seq_length,
                 backend='python',
                 max_predictions_per_seq: int = 80,
                 do_whole_word_mask: bool = True):
        self.tokenizer = tokenizer
        self.max_seq_length = max_seq_length
        self.masked_lm_prob = 0.15
        self.backend = backend
        self.do_whole_word_mask = do_whole_word_mask
        self.max_predictions_per_seq = max_predictions_per_seq
        self.vocab_words = list(tokenizer.vocab.keys())
        self.rec = re.compile('[\u4E00-\u9FA5]')
        self.whole_rec = re.compile('##[\u4E00-\u9FA5]')

        self.mlm_p = 0.15
        self.mlm_mask_p = 0.8
        self.mlm_tamper_p = 0.05
        self.mlm_maintain_p = 0.1


    def tokenize(self, doc):
        temp = []
        for d in doc:
            temp.append(self.tokenizer.tokenize(d))
        return temp


    def create_training_instance(self, instance):
        is_next = 1
        raw_text_list = self.get_new_segment(instance) 
        tokens_a = raw_text_list
        assert len(tokens_a) == len(instance)
        # tokens_a, tokens_b, is_next = instance.get_values()
        # print(f'is_next label:{is_next}')
        # Create mapper
        tokens = []
        original_tokens = []
        segment_ids = []
        tokens.append("[CLS]")
        original_tokens.append('[CLS]')
        segment_ids.append(0)
        for index, token in enumerate(tokens_a):
            tokens.append(token)
            original_tokens.append(instance[index])
            segment_ids.append(0)

        tokens.append("[SEP]")
        original_tokens.append('[SEP]')
        segment_ids.append(0)

        # for token in tokens_b:
        #     tokens.append(token)
        #     segment_ids.append(1)

        # tokens.append("[SEP]")
        # segment_ids.append(1)

        # Get Masked LM predictions
        if self.backend == 'c++':
            output_tokens, masked_lm_output = mask.create_whole_masked_lm_predictions(tokens, original_tokens, self.vocab_words,
                                                self.tokenizer.vocab, self.max_predictions_per_seq, self.masked_lm_prob)
        elif self.backend == 'python':
            output_tokens, masked_lm_output = self.create_whole_masked_lm_predictions(tokens)

        # Convert to Ids
        input_ids = self.tokenizer.convert_tokens_to_ids(output_tokens)
        input_mask = [1] * len(input_ids)

        while len(input_ids) < self.max_seq_length:
            input_ids.append(PAD)
            segment_ids.append(PAD)
            input_mask.append(PAD)
            masked_lm_output.append(-1)
        return ([
            map_to_numpy(input_ids),
            map_to_numpy(input_mask),
            map_to_numpy(segment_ids),
            map_to_numpy(masked_lm_output),
            map_to_numpy([is_next])  
        ])


    def create_masked_lm_predictions(self, tokens):
        cand_indexes = []
        for i, token in enumerate(tokens):
            if token == "[CLS]" or token == "[SEP]":
                continue
            if (self.do_whole_word_mask and len(cand_indexes) >= 1 and
                token.startswith("##")):
                cand_indexes[-1].append(i)
            else:
                cand_indexes.append([i])
            
            # cand_indexes.append(i)

        random.shuffle(cand_indexes)
        output_tokens = list(tokens)

        num_to_predict = min(
            self.max_predictions_per_seq,
            max(1, int(round(len(tokens) * self.masked_lm_prob))))

        masked_lms = []
        covered_indexes = set()
        for index in cand_indexes:
            if len(masked_lms) >= num_to_predict:
                break
            if index in covered_indexes:
                continue
            covered_indexes.add(index)

            masked_token = None
            # 80% mask
            if random.random() < 0.8:
                masked_token = "[MASK]"
            else:
                # 10% Keep Original
                if random.random() < 0.5:
                    masked_token = tokens[index]
                # 10% replace w/ random word
                else:
                    masked_token = self.vocab_words[random.randint(
                        0,
                        len(self.vocab_words) - 1)]

            output_tokens[index] = masked_token
            masked_lms.append(
                MaskedLMInstance(index=index, label=tokens[index]))

        masked_lms = sorted(masked_lms, key=lambda x: x.index)
        masked_lm_output = [-1] * len(output_tokens)
        for p in masked_lms:
            masked_lm_output[p.index] = self.tokenizer.vocab[p.label]

        return (output_tokens, masked_lm_output)


    def get_new_segment(self, segment):
        """
        输入一句话,返回一句经过处理的话: 为了支持中文全称mask,将被分开的词,将上特殊标记("#"),使得后续处理模块,能够知道哪些字是属于同一个词的。
        :param segment: 一句话
        :return: 一句处理过的话
        """
        seq_cws = jieba.lcut(''.join(segment))
        seq_cws_dict = {x: 1 for x in seq_cws}
        new_segment = []
        i = 0
        while i < len(segment):
            if len(self.rec.findall(segment[i])) == 0: # 不是中文的,原文加进去。
                new_segment.append(segment[i])
                i += 1
                continue

            has_add = False
            for length in range(3, 0, -1):
                if i + length > len(segment):
                    continue
                if ''.join(segment[i: i+length]) in seq_cws_dict:
                    new_segment.append(segment[i])
                    for l in range(1, length):
                        new_segment.append('##' + segment[i+l])
                    i += length
                    has_add = True
                    break
            if not has_add:
                new_segment.append(segment[i])
                i += 1
        return new_segment


    def create_whole_masked_lm_predictions(self, tokens):
        """Creates the predictions for the masked LM objective."""

        cand_indexes = []
        for (i, token) in enumerate(tokens):
            if token == "[CLS]" or token == "[SEP]":
                continue
            # Whole Word Masking means that if we mask all of the wordpieces
            # corresponding to an original word. When a word has been split into
            # WordPieces, the first token does not have any marker and any subsequence
            # tokens are prefixed with ##. So whenever we see the ## token, we
            # append it to the previous set of word indexes.
            #
            # Note that Whole Word Masking does *not* change the training code
            # at all -- we still predict each WordPiece independently, softmaxed
            # over the entire vocabulary.
            if (self.do_whole_word_mask and len(cand_indexes) >= 1 and
                    token.startswith("##")):
                cand_indexes[-1].append(i)
            else:
                cand_indexes.append([i])

        random.shuffle(cand_indexes)

        output_tokens = [t[2:] if len(self.whole_rec.findall(t))>0 else t for t in tokens] # 去掉"##"

        num_to_predict = min(self.max_predictions_per_seq,
                            max(1, int(round(len(tokens) * self.masked_lm_prob))))

        masked_lms = []
        covered_indexes = set()
        for index_set in cand_indexes:
            if len(masked_lms) >= num_to_predict:
                break
            # If adding a whole-word mask would exceed the maximum number of
            # predictions, then just skip this candidate.
            if len(masked_lms) + len(index_set) > num_to_predict:
                continue
            is_any_index_covered = False
            for index in index_set:
                if index in covered_indexes:
                    is_any_index_covered = True
                    break
            if is_any_index_covered:
                continue
            for index in index_set:
                covered_indexes.add(index)

                masked_token = None
                # 80% of the time, replace with [MASK]
                if random.random() < 0.8:
                    masked_token = "[MASK]"
                else:
                    # 10% of the time, keep original
                    if random.random() < 0.5:
                        masked_token = tokens[index][2:] if len(self.whole_rec.findall(tokens[index]))>0 else tokens[index] # 去掉"##"
                    # 10% of the time, replace with random word
                    else:
                        masked_token = self.vocab_words[random.randint(0, len(self.vocab_words) - 1)]

                output_tokens[index] = masked_token

                masked_lms.append(MaskedLMInstance(index=index, label=tokens[index][2:] if len(self.whole_rec.findall(tokens[index]))>0 else tokens[index]))
        assert len(masked_lms) <= num_to_predict
        masked_lms = sorted(masked_lms, key=lambda x: x.index)
        masked_lm_output = [-1] * len(output_tokens)
        for p in masked_lms:
            masked_lm_output[p.index] = self.tokenizer.vocab[p.label]

        return (output_tokens, masked_lm_output)