label_ops.py 20.2 KB
Newer Older
WenmuZhou's avatar
WenmuZhou committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import numpy as np
tink2123's avatar
tink2123 committed
21
import string
LDOUBLEV's avatar
LDOUBLEV committed
22
import json
WenmuZhou's avatar
WenmuZhou committed
23
24
25
26
27
28
29
30
31
32
33
34
35


class ClsLabelEncode(object):
    def __init__(self, label_list, **kwargs):
        self.label_list = label_list

    def __call__(self, data):
        label = data['label']
        if label not in self.label_list:
            return None
        label = self.label_list.index(label)
        data['label'] = label
        return data
WenmuZhou's avatar
WenmuZhou committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55


class DetLabelEncode(object):
    def __init__(self, **kwargs):
        pass

    def __call__(self, data):
        label = data['label']
        label = json.loads(label)
        nBox = len(label)
        boxes, txts, txt_tags = [], [], []
        for bno in range(0, nBox):
            box = label[bno]['points']
            txt = label[bno]['transcription']
            boxes.append(box)
            txts.append(txt)
            if txt in ['*', '###']:
                txt_tags.append(True)
            else:
                txt_tags.append(False)
LDOUBLEV's avatar
LDOUBLEV committed
56
57
        if len(boxes) == 0:
            return None
MissPenguin's avatar
MissPenguin committed
58
        boxes = self.expand_points_num(boxes)
LDOUBLEV's avatar
LDOUBLEV committed
59
60
        boxes = np.array(boxes, dtype=np.float32)
        txt_tags = np.array(txt_tags, dtype=np.bool)
WenmuZhou's avatar
WenmuZhou committed
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

        data['polys'] = boxes
        data['texts'] = txts
        data['ignore_tags'] = txt_tags
        return data

    def order_points_clockwise(self, pts):
        rect = np.zeros((4, 2), dtype="float32")
        s = pts.sum(axis=1)
        rect[0] = pts[np.argmin(s)]
        rect[2] = pts[np.argmax(s)]
        diff = np.diff(pts, axis=1)
        rect[1] = pts[np.argmin(diff)]
        rect[3] = pts[np.argmax(diff)]
        return rect

MissPenguin's avatar
MissPenguin committed
77
78
79
80
81
82
83
84
85
86
87
    def expand_points_num(self, boxes):
        max_points_num = 0
        for box in boxes:
            if len(box) > max_points_num:
                max_points_num = len(box)
        ex_boxes = []
        for box in boxes:
            ex_box = box + [box[-1]] * (max_points_num - len(box))
            ex_boxes.append(ex_box)
        return ex_boxes

WenmuZhou's avatar
WenmuZhou committed
88
89
90
91
92
93
94
95
96

class BaseRecLabelEncode(object):
    """ Convert between text-label and text-index """

    def __init__(self,
                 max_text_length,
                 character_dict_path=None,
                 character_type='ch',
                 use_space_char=False):
MissPenguin's avatar
MissPenguin committed
97
        support_character_type = [
tink2123's avatar
tink2123 committed
98
99
            'ch', 'en', 'EN_symbol', 'french', 'german', 'japan', 'korean',
            'EN', 'it', 'xi', 'pu', 'ru', 'ar', 'ta', 'ug', 'fa', 'ur', 'rs',
tink2123's avatar
tink2123 committed
100
            'oc', 'rsc', 'bg', 'uk', 'be', 'te', 'ka', 'chinese_cht', 'hi',
tink2123's avatar
tink2123 committed
101
            'mr', 'ne', 'latin', 'arabic', 'cyrillic', 'devanagari'
MissPenguin's avatar
MissPenguin committed
102
        ]
WenmuZhou's avatar
WenmuZhou committed
103
        assert character_type in support_character_type, "Only {} are supported now but get {}".format(
MissPenguin's avatar
MissPenguin committed
104
            support_character_type, character_type)
WenmuZhou's avatar
WenmuZhou committed
105
106

        self.max_text_len = max_text_length
tink2123's avatar
tink2123 committed
107
108
        self.beg_str = "sos"
        self.end_str = "eos"
WenmuZhou's avatar
WenmuZhou committed
109
110
111
        if character_type == "en":
            self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
            dict_character = list(self.character_str)
tink2123's avatar
tink2123 committed
112
        elif character_type == "EN_symbol":
tink2123's avatar
tink2123 committed
113
114
115
116
            # same with ASTER setting (use 94 char).
            self.character_str = string.printable[:-6]
            dict_character = list(self.character_str)
        elif character_type in support_character_type:
WenmuZhou's avatar
WenmuZhou committed
117
            self.character_str = ""
tink2123's avatar
tink2123 committed
118
119
            assert character_dict_path is not None, "character_dict_path should not be None when character_type is {}".format(
                character_type)
WenmuZhou's avatar
WenmuZhou committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
            with open(character_dict_path, "rb") as fin:
                lines = fin.readlines()
                for line in lines:
                    line = line.decode('utf-8').strip("\n").strip("\r\n")
                    self.character_str += line
            if use_space_char:
                self.character_str += " "
            dict_character = list(self.character_str)
        self.character_type = character_type
        dict_character = self.add_special_char(dict_character)
        self.dict = {}
        for i, char in enumerate(dict_character):
            self.dict[char] = i
        self.character = dict_character

    def add_special_char(self, dict_character):
        return dict_character

    def encode(self, text):
        """convert text-label into text-index.
        input:
            text: text labels of each image. [batch_size]

        output:
            text: concatenated text index for CTCLoss.
                    [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
            length: length of each text. [batch_size]
        """
WenmuZhou's avatar
WenmuZhou committed
148
        if len(text) == 0 or len(text) > self.max_text_len:
WenmuZhou's avatar
WenmuZhou committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
            return None
        if self.character_type == "en":
            text = text.lower()
        text_list = []
        for char in text:
            if char not in self.dict:
                # logger = get_logger()
                # logger.warning('{} is not in dict'.format(char))
                continue
            text_list.append(self.dict[char])
        if len(text_list) == 0:
            return None
        return text_list


class CTCLabelEncode(BaseRecLabelEncode):
    """ Convert between text-label and text-index """

    def __init__(self,
                 max_text_length,
                 character_dict_path=None,
                 character_type='ch',
                 use_space_char=False,
                 **kwargs):
        super(CTCLabelEncode,
              self).__init__(max_text_length, character_dict_path,
                             character_type, use_space_char)

    def __call__(self, data):
        text = data['label']
        text = self.encode(text)
        if text is None:
            return None
        data['length'] = np.array(len(text))
        text = text + [0] * (self.max_text_len - len(text))
        data['label'] = np.array(text)
        return data

    def add_special_char(self, dict_character):
        dict_character = ['blank'] + dict_character
        return dict_character


Jethong's avatar
Jethong committed
192
class E2ELabelEncodeTest(BaseRecLabelEncode):
Jethong's avatar
Jethong committed
193
194
195
196
197
198
    def __init__(self,
                 max_text_length,
                 character_dict_path=None,
                 character_type='EN',
                 use_space_char=False,
                 **kwargs):
Jethong's avatar
Jethong committed
199
        super(E2ELabelEncodeTest,
Jethong's avatar
Jethong committed
200
201
202
203
              self).__init__(max_text_length, character_dict_path,
                             character_type, use_space_char)

    def __call__(self, data):
Jethong's avatar
Jethong committed
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
        import json
        padnum = len(self.dict)
        label = data['label']
        label = json.loads(label)
        nBox = len(label)
        boxes, txts, txt_tags = [], [], []
        for bno in range(0, nBox):
            box = label[bno]['points']
            txt = label[bno]['transcription']
            boxes.append(box)
            txts.append(txt)
            if txt in ['*', '###']:
                txt_tags.append(True)
            else:
                txt_tags.append(False)
        boxes = np.array(boxes, dtype=np.float32)
        txt_tags = np.array(txt_tags, dtype=np.bool)
        data['polys'] = boxes
Jethong's avatar
Jethong committed
222
        data['ignore_tags'] = txt_tags
Jethong's avatar
Jethong committed
223
        temp_texts = []
Jethong's avatar
Jethong committed
224
        for text in txts:
Jethong's avatar
Jethong committed
225
226
227
228
            text = text.lower()
            text = self.encode(text)
            if text is None:
                return None
Jethong's avatar
Jethong committed
229
230
            text = text + [padnum] * (self.max_text_len - len(text)
                                      )  # use 36 to pad
Jethong's avatar
Jethong committed
231
232
233
234
235
            temp_texts.append(text)
        data['texts'] = np.array(temp_texts)
        return data


Jethong's avatar
Jethong committed
236
class E2ELabelEncodeTrain(object):
Jethong's avatar
Jethong committed
237
238
    def __init__(self, **kwargs):
        pass
Jethong's avatar
Jethong committed
239
240

    def __call__(self, data):
Jethong's avatar
Jethong committed
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
        import json
        label = data['label']
        label = json.loads(label)
        nBox = len(label)
        boxes, txts, txt_tags = [], [], []
        for bno in range(0, nBox):
            box = label[bno]['points']
            txt = label[bno]['transcription']
            boxes.append(box)
            txts.append(txt)
            if txt in ['*', '###']:
                txt_tags.append(True)
            else:
                txt_tags.append(False)
        boxes = np.array(boxes, dtype=np.float32)
        txt_tags = np.array(txt_tags, dtype=np.bool)

        data['polys'] = boxes
        data['texts'] = txts
Jethong's avatar
Jethong committed
260
        data['ignore_tags'] = txt_tags
Jethong's avatar
Jethong committed
261
262
263
        return data


WenmuZhou's avatar
WenmuZhou committed
264
265
266
267
268
269
270
271
272
273
274
275
276
277
class AttnLabelEncode(BaseRecLabelEncode):
    """ Convert between text-label and text-index """

    def __init__(self,
                 max_text_length,
                 character_dict_path=None,
                 character_type='ch',
                 use_space_char=False,
                 **kwargs):
        super(AttnLabelEncode,
              self).__init__(max_text_length, character_dict_path,
                             character_type, use_space_char)

    def add_special_char(self, dict_character):
LDOUBLEV's avatar
LDOUBLEV committed
278
279
280
        self.beg_str = "sos"
        self.end_str = "eos"
        dict_character = [self.beg_str] + dict_character + [self.end_str]
WenmuZhou's avatar
WenmuZhou committed
281
282
        return dict_character

LDOUBLEV's avatar
LDOUBLEV committed
283
284
    def __call__(self, data):
        text = data['label']
WenmuZhou's avatar
WenmuZhou committed
285
        text = self.encode(text)
LDOUBLEV's avatar
LDOUBLEV committed
286
287
        if text is None:
            return None
LDOUBLEV's avatar
LDOUBLEV committed
288
        if len(text) >= self.max_text_len:
LDOUBLEV's avatar
LDOUBLEV committed
289
290
291
            return None
        data['length'] = np.array(len(text))
        text = [0] + text + [len(self.character) - 1] + [0] * (self.max_text_len
tink2123's avatar
tink2123 committed
292
                                                               - len(text) - 2)
LDOUBLEV's avatar
LDOUBLEV committed
293
294
295
296
297
298
299
        data['label'] = np.array(text)
        return data

    def get_ignored_tokens(self):
        beg_idx = self.get_beg_end_flag_idx("beg")
        end_idx = self.get_beg_end_flag_idx("end")
        return [beg_idx, end_idx]
WenmuZhou's avatar
WenmuZhou committed
300
301
302
303
304
305
306
307
308
309

    def get_beg_end_flag_idx(self, beg_or_end):
        if beg_or_end == "beg":
            idx = np.array(self.dict[self.beg_str])
        elif beg_or_end == "end":
            idx = np.array(self.dict[self.end_str])
        else:
            assert False, "Unsupport type %s in get_beg_end_flag_idx" \
                          % beg_or_end
        return idx
tink2123's avatar
tink2123 committed
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331


class SRNLabelEncode(BaseRecLabelEncode):
    """ Convert between text-label and text-index """

    def __init__(self,
                 max_text_length=25,
                 character_dict_path=None,
                 character_type='en',
                 use_space_char=False,
                 **kwargs):
        super(SRNLabelEncode,
              self).__init__(max_text_length, character_dict_path,
                             character_type, use_space_char)

    def add_special_char(self, dict_character):
        dict_character = dict_character + [self.beg_str, self.end_str]
        return dict_character

    def __call__(self, data):
        text = data['label']
        text = self.encode(text)
tink2123's avatar
tink2123 committed
332
        char_num = len(self.character)
tink2123's avatar
tink2123 committed
333
334
335
336
337
        if text is None:
            return None
        if len(text) > self.max_text_len:
            return None
        data['length'] = np.array(len(text))
tink2123's avatar
tink2123 committed
338
        text = text + [char_num - 1] * (self.max_text_len - len(text))
tink2123's avatar
tink2123 committed
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
        data['label'] = np.array(text)
        return data

    def get_ignored_tokens(self):
        beg_idx = self.get_beg_end_flag_idx("beg")
        end_idx = self.get_beg_end_flag_idx("end")
        return [beg_idx, end_idx]

    def get_beg_end_flag_idx(self, beg_or_end):
        if beg_or_end == "beg":
            idx = np.array(self.dict[self.beg_str])
        elif beg_or_end == "end":
            idx = np.array(self.dict[self.end_str])
        else:
            assert False, "Unsupport type %s in get_beg_end_flag_idx" \
                          % beg_or_end
        return idx
MissPenguin's avatar
MissPenguin committed
356

LDOUBLEV's avatar
LDOUBLEV committed
357

MissPenguin's avatar
MissPenguin committed
358
359
class TableLabelEncode(object):
    """ Convert between text-label and text-index """
LDOUBLEV's avatar
LDOUBLEV committed
360
361
362
363
364
365
366
367

    def __init__(self,
                 max_text_length,
                 max_elem_length,
                 max_cell_num,
                 character_dict_path,
                 span_weight=1.0,
                 **kwargs):
MissPenguin's avatar
MissPenguin committed
368
369
370
        self.max_text_length = max_text_length
        self.max_elem_length = max_elem_length
        self.max_cell_num = max_cell_num
LDOUBLEV's avatar
LDOUBLEV committed
371
372
        list_character, list_elem = self.load_char_elem_dict(
            character_dict_path)
MissPenguin's avatar
MissPenguin committed
373
374
375
376
377
378
379
380
381
        list_character = self.add_special_char(list_character)
        list_elem = self.add_special_char(list_elem)
        self.dict_character = {}
        for i, char in enumerate(list_character):
            self.dict_character[char] = i
        self.dict_elem = {}
        for i, elem in enumerate(list_elem):
            self.dict_elem[elem] = i
        self.span_weight = span_weight
LDOUBLEV's avatar
LDOUBLEV committed
382

MissPenguin's avatar
MissPenguin committed
383
384
385
386
387
    def load_char_elem_dict(self, character_dict_path):
        list_character = []
        list_elem = []
        with open(character_dict_path, "rb") as fin:
            lines = fin.readlines()
WenmuZhou's avatar
WenmuZhou committed
388
            substr = lines[0].decode('utf-8').strip("\r\n").split("\t")
MissPenguin's avatar
MissPenguin committed
389
390
            character_num = int(substr[0])
            elem_num = int(substr[1])
391

LDOUBLEV's avatar
LDOUBLEV committed
392
            for cno in range(1, 1 + character_num):
WenmuZhou's avatar
WenmuZhou committed
393
                character = lines[cno].decode('utf-8').strip("\r\n")
MissPenguin's avatar
MissPenguin committed
394
                list_character.append(character)
LDOUBLEV's avatar
LDOUBLEV committed
395
            for eno in range(1 + character_num, 1 + character_num + elem_num):
WenmuZhou's avatar
WenmuZhou committed
396
                elem = lines[eno].decode('utf-8').strip("\r\n")
MissPenguin's avatar
MissPenguin committed
397
398
                list_elem.append(elem)
        return list_character, list_elem
LDOUBLEV's avatar
LDOUBLEV committed
399

MissPenguin's avatar
MissPenguin committed
400
401
402
403
404
    def add_special_char(self, list_character):
        self.beg_str = "sos"
        self.end_str = "eos"
        list_character = [self.beg_str] + list_character + [self.end_str]
        return list_character
LDOUBLEV's avatar
LDOUBLEV committed
405

MissPenguin's avatar
MissPenguin committed
406
407
408
409
410
411
    def get_span_idx_list(self):
        span_idx_list = []
        for elem in self.dict_elem:
            if 'span' in elem:
                span_idx_list.append(self.dict_elem[elem])
        return span_idx_list
LDOUBLEV's avatar
LDOUBLEV committed
412

MissPenguin's avatar
MissPenguin committed
413
414
415
416
417
418
419
420
    def __call__(self, data):
        cells = data['cells']
        structure = data['structure']['tokens']
        structure = self.encode(structure, 'elem')
        if structure is None:
            return None
        elem_num = len(structure)
        structure = [0] + structure + [len(self.dict_elem) - 1]
LDOUBLEV's avatar
LDOUBLEV committed
421
422
        structure = structure + [0] * (self.max_elem_length + 2 - len(structure)
                                       )
MissPenguin's avatar
MissPenguin committed
423
424
425
426
427
        structure = np.array(structure)
        data['structure'] = structure
        elem_char_idx1 = self.dict_elem['<td>']
        elem_char_idx2 = self.dict_elem['<td']
        span_idx_list = self.get_span_idx_list()
LDOUBLEV's avatar
LDOUBLEV committed
428
429
        td_idx_list = np.logical_or(structure == elem_char_idx1,
                                    structure == elem_char_idx2)
MissPenguin's avatar
MissPenguin committed
430
        td_idx_list = np.where(td_idx_list)[0]
LDOUBLEV's avatar
LDOUBLEV committed
431
432
433

        structure_mask = np.ones(
            (self.max_elem_length + 2, 1), dtype=np.float32)
MissPenguin's avatar
MissPenguin committed
434
        bbox_list = np.zeros((self.max_elem_length + 2, 4), dtype=np.float32)
LDOUBLEV's avatar
LDOUBLEV committed
435
436
        bbox_list_mask = np.zeros(
            (self.max_elem_length + 2, 1), dtype=np.float32)
MissPenguin's avatar
MissPenguin committed
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
        img_height, img_width, img_ch = data['image'].shape
        if len(span_idx_list) > 0:
            span_weight = len(td_idx_list) * 1.0 / len(span_idx_list)
            span_weight = min(max(span_weight, 1.0), self.span_weight)
        for cno in range(len(cells)):
            if 'bbox' in cells[cno]:
                bbox = cells[cno]['bbox'].copy()
                bbox[0] = bbox[0] * 1.0 / img_width
                bbox[1] = bbox[1] * 1.0 / img_height
                bbox[2] = bbox[2] * 1.0 / img_width
                bbox[3] = bbox[3] * 1.0 / img_height
                td_idx = td_idx_list[cno]
                bbox_list[td_idx] = bbox
                bbox_list_mask[td_idx] = 1.0
                cand_span_idx = td_idx + 1
                if cand_span_idx < (self.max_elem_length + 2):
                    if structure[cand_span_idx] in span_idx_list:
                        structure_mask[cand_span_idx] = span_weight

        data['bbox_list'] = bbox_list
        data['bbox_list_mask'] = bbox_list_mask
        data['structure_mask'] = structure_mask
        char_beg_idx = self.get_beg_end_flag_idx('beg', 'char')
        char_end_idx = self.get_beg_end_flag_idx('end', 'char')
        elem_beg_idx = self.get_beg_end_flag_idx('beg', 'elem')
        elem_end_idx = self.get_beg_end_flag_idx('end', 'elem')
LDOUBLEV's avatar
LDOUBLEV committed
463
464
465
466
467
        data['sp_tokens'] = np.array([
            char_beg_idx, char_end_idx, elem_beg_idx, elem_end_idx,
            elem_char_idx1, elem_char_idx2, self.max_text_length,
            self.max_elem_length, self.max_cell_num, elem_num
        ])
MissPenguin's avatar
MissPenguin committed
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
        return data

    def encode(self, text, char_or_elem):
        """convert text-label into text-index.
        """
        if char_or_elem == "char":
            max_len = self.max_text_length
            current_dict = self.dict_character
        else:
            max_len = self.max_elem_length
            current_dict = self.dict_elem
        if len(text) > max_len:
            return None
        if len(text) == 0:
            if char_or_elem == "char":
                return [self.dict_character['space']]
            else:
                return None
        text_list = []
        for char in text:
            if char not in current_dict:
                return None
            text_list.append(current_dict[char])
        if len(text_list) == 0:
            if char_or_elem == "char":
                return [self.dict_character['space']]
            else:
                return None
        return text_list

    def get_ignored_tokens(self, char_or_elem):
        beg_idx = self.get_beg_end_flag_idx("beg", char_or_elem)
        end_idx = self.get_beg_end_flag_idx("end", char_or_elem)
        return [beg_idx, end_idx]

    def get_beg_end_flag_idx(self, beg_or_end, char_or_elem):
        if char_or_elem == "char":
            if beg_or_end == "beg":
                idx = np.array(self.dict_character[self.beg_str])
            elif beg_or_end == "end":
                idx = np.array(self.dict_character[self.end_str])
            else:
                assert False, "Unsupport type %s in get_beg_end_flag_idx of char" \
                              % beg_or_end
        elif char_or_elem == "elem":
            if beg_or_end == "beg":
                idx = np.array(self.dict_elem[self.beg_str])
            elif beg_or_end == "end":
                idx = np.array(self.dict_elem[self.end_str])
            else:
                assert False, "Unsupport type %s in get_beg_end_flag_idx of elem" \
LDOUBLEV's avatar
LDOUBLEV committed
519
                              % beg_or_end
MissPenguin's avatar
MissPenguin committed
520
521
        else:
            assert False, "Unsupport type %s in char_or_elem" \
LDOUBLEV's avatar
LDOUBLEV committed
522
                              % char_or_elem
MissPenguin's avatar
MissPenguin committed
523
        return idx
andyjpaddle's avatar
andyjpaddle committed
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569


class SARLabelEncode(BaseRecLabelEncode):
    """ Convert between text-label and text-index """

    def __init__(self,
                 max_text_length,
                 character_dict_path=None,
                 character_type='ch',
                 use_space_char=False,
                 **kwargs):
        super(SARLabelEncode,
              self).__init__(max_text_length, character_dict_path,
                             character_type, use_space_char)

    def add_special_char(self, dict_character):
        beg_end_str = "<BOS/EOS>"
        unknown_str = "<UKN>"
        padding_str = "<PAD>"
        dict_character = dict_character + [unknown_str]
        self.unknown_idx = len(dict_character) - 1
        dict_character = dict_character + [beg_end_str]
        self.start_idx = len(dict_character) - 1
        self.end_idx = len(dict_character) - 1
        dict_character = dict_character + [padding_str]
        self.padding_idx = len(dict_character) - 1

        return dict_character

    def __call__(self, data):
        text = data['label']
        text = self.encode(text)
        if text is None:
            return None
        if len(text) >= self.max_text_len - 1:
            return None
        data['length'] = np.array(len(text))
        target = [self.start_idx] + text + [self.end_idx]
        padded_text = [self.padding_idx for _ in range(self.max_text_len)]
        
        padded_text[:len(target)] = target
        data['label'] = np.array(padded_text)
        return data

    def get_ignored_tokens(self):
        return [self.padding_idx]