modeling_tf_xlnet.py 54.4 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 XLNet model.
"""
from __future__ import absolute_import, division, print_function, unicode_literals

import json
import logging
import math
import os
import sys
from io import open

import numpy as np
import tensorflow as tf

from .configuration_xlnet import XLNetConfig
31
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, TFSequenceSummary, shape_list
thomwolf's avatar
thomwolf committed
32
from .file_utils import add_start_docstrings
33
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
thomwolf's avatar
thomwolf committed
34
35
36
37
38
39
40
41
42
43


logger = logging.getLogger(__name__)

TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
    'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-tf_model.h5",
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-tf_model.h5",
}


44
def load_xlnet_pt_weights_in_tf2(tf_model, pytorch_checkpoint_path):
thomwolf's avatar
thomwolf committed
45
46
47
    inputs_list = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
    tf_inputs = tf.constant(inputs_list)
    tfo = tf_model(tf_inputs, training=False)  # build the network
48
    return load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=tf_inputs)
thomwolf's avatar
thomwolf committed
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88


def gelu(x):
    """ Implementation of the gelu activation function.
        XLNet is using OpenAI GPT's gelu
        Also see https://arxiv.org/abs/1606.08415
    """
    cdf = 0.5 * (1.0 + tf.tanh(
        (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
    return x * cdf


def swish(x):
    return x * tf.sigmoid(x)


ACT2FN = {"gelu": tf.keras.layers.Activation(gelu),
          "relu": tf.keras.activations.relu,
          "swish": tf.keras.layers.Activation(swish)}


class TFXLNetRelativeAttention(tf.keras.layers.Layer):
    def __init__(self, config, **kwargs):
        super(TFXLNetRelativeAttention, self).__init__(**kwargs)
        self.output_attentions = config.output_attentions

        if config.d_model % config.n_head != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.d_model, config.n_head))

        self.n_head = config.n_head
        self.d_head = config.d_head
        self.d_model = config.d_model
        self.scale = 1 / (config.d_head ** 0.5)
        self.initializer_range = config.initializer_range

        self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm')
        self.dropout = tf.keras.layers.Dropout(config.dropout)

thomwolf's avatar
thomwolf committed
89
    def build(self, input_shape):
thomwolf's avatar
thomwolf committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
        initializer = tf.random_normal_initializer(mean=0., stddev=self.initializer_range)
        self.q = self.add_weight(shape=(self.d_model, self.n_head, self.d_head),
                                 initializer=initializer,
                                 trainable=True, name='q')
        self.k = self.add_weight(shape=(self.d_model, self.n_head, self.d_head),
                                 initializer=initializer,
                                 trainable=True, name='k')
        self.v = self.add_weight(shape=(self.d_model, self.n_head, self.d_head),
                                 initializer=initializer,
                                 trainable=True, name='v')
        self.o = self.add_weight(shape=(self.d_model, self.n_head, self.d_head),
                                 initializer=initializer,
                                 trainable=True, name='o')
        self.r = self.add_weight(shape=(self.d_model, self.n_head, self.d_head),
                                 initializer=initializer,
                                 trainable=True, name='r')
        self.r_r_bias = self.add_weight(shape=(self.n_head, self.d_head),
                                        initializer=initializer,
                                        trainable=True, name='r_r_bias')
        self.r_s_bias = self.add_weight(shape=(self.n_head, self.d_head),
                                        initializer=initializer,
                                        trainable=True, name='r_s_bias')
        self.r_w_bias = self.add_weight(shape=(self.n_head, self.d_head),
                                        initializer=initializer,
                                        trainable=True, name='r_w_bias')
        self.seg_embed = self.add_weight(shape=(2, self.n_head, self.d_head),
                                        initializer=initializer,
                                        trainable=True, name='seg_embed')
        super(TFXLNetRelativeAttention, self).build(input_shape)

    def prune_heads(self, heads):
        raise NotImplementedError

    @staticmethod
    def rel_shift(x, klen=-1):
        """perform relative shift to form the relative attention score."""
        x_size = shape_list(x)

        x = tf.reshape(x, (x_size[1], x_size[0], x_size[2], x_size[3]))
        x = x[1:, ...]
        x = tf.reshape(x, (x_size[0], x_size[1] - 1, x_size[2], x_size[3]))
        x = x[:, 0:klen, :, :]
        # x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))

        return x

    def rel_attn_core(self, inputs, training=False):
        """Core relative positional attention operations."""

        q_head, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask, head_mask = inputs

        # content based attention score
        ac = tf.einsum('ibnd,jbnd->ijbn', q_head + self.r_w_bias, k_head_h)

        # position based attention score
        bd = tf.einsum('ibnd,jbnd->ijbn', q_head + self.r_r_bias, k_head_r)
        bd = self.rel_shift(bd, klen=ac.shape[1])

        # segment based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = tf.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)
            ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)

        # merge attention scores and perform masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
            if attn_mask.dtype == tf.float16:
                attn_score = attn_score - 65500 * attn_mask
            else:
                attn_score = attn_score - 1e30 * attn_mask

        # attention probability
thomwolf's avatar
thomwolf committed
165
        attn_prob = tf.nn.softmax(attn_score, axis=1)
thomwolf's avatar
thomwolf committed
166

thomwolf's avatar
thomwolf committed
167
        attn_prob = self.dropout(attn_prob, training=training)
thomwolf's avatar
thomwolf committed
168
169
170
171
172
173
174
175
176
177
178
179
180

        # Mask heads if we want to
        if head_mask is not None:
            attn_prob = attn_prob * head_mask

        # attention output
        attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)

        if self.output_attentions:
            return attn_vec, attn_prob

        return attn_vec

181
    def post_attention(self, inputs, residual=True, training=False):
thomwolf's avatar
thomwolf committed
182
183
        """Post-attention processing."""
        # post-attention projection (back to `d_model`)
184
        h, attn_vec = inputs
thomwolf's avatar
thomwolf committed
185
186
187

        attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, self.o)

thomwolf's avatar
thomwolf committed
188
        attn_out = self.dropout(attn_out, training=training)
thomwolf's avatar
thomwolf committed
189

190
        if residual:
thomwolf's avatar
thomwolf committed
191
192
193
194
195
196
197
198
199
200
201
202
            attn_out = attn_out + h
        output = self.layer_norm(attn_out)

        return output

    def call(self, inputs, training=False):
        (h, g, attn_mask_h, attn_mask_g,
         r, seg_mat, mems, target_mapping, head_mask) = inputs

        if g is not None:
            ###### Two-stream attention with relative positional encoding.
            # content based attention score
203
204
            if mems is not None and mems.shape.ndims > 1:
                cat = tf.concat([mems, h], axis=0)
thomwolf's avatar
thomwolf committed
205
206
207
208
            else:
                cat = h

            # content-based key head
209
            k_head_h = tf.einsum('ibh,hnd->ibnd', cat, self.k)
thomwolf's avatar
thomwolf committed
210
211

            # content-based value head
212
            v_head_h = tf.einsum('ibh,hnd->ibnd', cat, self.v)
thomwolf's avatar
thomwolf committed
213
214

            # position-based key head
215
            k_head_r = tf.einsum('ibh,hnd->ibnd', r, self.r)
thomwolf's avatar
thomwolf committed
216
217
218

            ##### h-stream
            # content-stream query head
219
            q_head_h = tf.einsum('ibh,hnd->ibnd', h, self.q)
thomwolf's avatar
thomwolf committed
220
221
222
223
224
225
226
227
228
229

            # core attention ops
            attn_vec_h = self.rel_attn_core(
                [q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask],
                training=training)

            if self.output_attentions:
                attn_vec_h, attn_prob_h = attn_vec_h

            # post processing
230
            output_h = self.post_attention([h, attn_vec_h], training=training)
thomwolf's avatar
thomwolf committed
231
232
233

            ##### g-stream
            # query-stream query head
234
            q_head_g = tf.einsum('ibh,hnd->ibnd', g, self.q)
thomwolf's avatar
thomwolf committed
235
236
237

            # core attention ops
            if target_mapping is not None:
238
                q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
thomwolf's avatar
thomwolf committed
239
240
241
242
243
244
245
                attn_vec_g = self.rel_attn_core(
                    [q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask],
                    training=training)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

246
                attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
thomwolf's avatar
thomwolf committed
247
248
249
250
251
252
253
254
255
            else:
                attn_vec_g = self.rel_attn_core(
                    [q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask],
                    training=training)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

            # post processing
256
            output_g = self.post_attention([g, attn_vec_g], training=training)
thomwolf's avatar
thomwolf committed
257
258
259
260
261
262

            if self.output_attentions:
                attn_prob = attn_prob_h, attn_prob_g

        else:
            ###### Multi-head attention with relative positional encoding
263
264
            if mems is not None and mems.shape.ndims > 1:
                cat = tf.concat([mems, h], axis=0)
thomwolf's avatar
thomwolf committed
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
            else:
                cat = h

            # content heads
            q_head_h = tf.einsum('ibh,hnd->ibnd', h, self.q)
            k_head_h = tf.einsum('ibh,hnd->ibnd', cat, self.k)
            v_head_h = tf.einsum('ibh,hnd->ibnd', cat, self.v)

            # positional heads
            k_head_r = tf.einsum('ibh,hnd->ibnd', r, self.r)

            # core attention ops
            attn_vec = self.rel_attn_core(
                [q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask],
                training=training)

            if self.output_attentions:
                attn_vec, attn_prob = attn_vec

            # post processing
285
            output_h = self.post_attention([h, attn_vec], training=training)
thomwolf's avatar
thomwolf committed
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
            output_g = None

        outputs = (output_h, output_g)
        if self.output_attentions:
            outputs = outputs + (attn_prob,)
        return outputs

class TFXLNetFeedForward(tf.keras.layers.Layer):
    def __init__(self, config, **kwargs):
        super(TFXLNetFeedForward, self).__init__(**kwargs)
        self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm')
        self.layer_1 = tf.keras.layers.Dense(config.d_inner, name='layer_1')
        self.layer_2 = tf.keras.layers.Dense(config.d_model, name='layer_2')
        self.dropout = tf.keras.layers.Dropout(config.dropout)
        if isinstance(config.ff_activation, str) or \
                (sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)):
            self.activation_function = ACT2FN[config.ff_activation]
        else:
            self.activation_function = config.ff_activation

    def call(self, inp, training=False):
        output = inp
        output = self.layer_1(output)
        output = self.activation_function(output)
thomwolf's avatar
thomwolf committed
310
        output = self.dropout(output, training=training)
thomwolf's avatar
thomwolf committed
311
        output = self.layer_2(output)
thomwolf's avatar
thomwolf committed
312
        output = self.dropout(output, training=training)
thomwolf's avatar
thomwolf committed
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
        output = self.layer_norm(output + inp)
        return output

class TFXLNetLayer(tf.keras.layers.Layer):
    def __init__(self, config, **kwargs):
        super(TFXLNetLayer, self).__init__(**kwargs)
        self.rel_attn = TFXLNetRelativeAttention(config, name='rel_attn')
        self.ff = TFXLNetFeedForward(config, name='ff')
        self.dropout = tf.keras.layers.Dropout(config.dropout)

    def call(self, inputs, training=False):
        outputs = self.rel_attn(inputs, training=training)
        output_h, output_g = outputs[:2]

        if output_g is not None:
            output_g = self.ff(output_g, training=training)
        output_h = self.ff(output_h, training=training)

        outputs = (output_h, output_g) + outputs[2:]  # Add again attentions if there are there
        return outputs


335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
class TFXLNetLMHead(tf.keras.layers.Layer):
    def __init__(self, config, input_embeddings, **kwargs):
        super(TFXLNetLMHead, self).__init__(**kwargs)
        self.vocab_size = config.vocab_size
        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.input_embeddings = input_embeddings

    def build(self, input_shape):
        self.bias = self.add_weight(shape=(self.vocab_size,),
                                    initializer='zeros',
                                    trainable=True,
                                    name='bias')
        super(TFXLNetLMHead, self).build(input_shape)

    def call(self, hidden_states):
        hidden_states = self.input_embeddings(hidden_states, mode="linear")
        hidden_states = hidden_states + self.bias
        return hidden_states


thomwolf's avatar
thomwolf committed
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
class TFXLNetMainLayer(tf.keras.layers.Layer):
    def __init__(self, config, **kwargs):
        super(TFXLNetMainLayer, self).__init__(**kwargs)
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states

        self.mem_len = config.mem_len
        self.reuse_len = config.reuse_len
        self.d_model = config.d_model
        self.same_length = config.same_length
        self.attn_type = config.attn_type
        self.bi_data = config.bi_data
        self.clamp_len = config.clamp_len
        self.n_layer = config.n_layer
        self.use_bfloat16 = config.use_bfloat16
        self.initializer_range = config.initializer_range

        self.word_embedding = TFSharedEmbeddings(config.n_token, config.d_model, initializer_range=config.initializer_range, name='word_embedding')
374
        self.layer = [TFXLNetLayer(config, name='layer_._{}'.format(i)) for i in range(config.n_layer)]
thomwolf's avatar
thomwolf committed
375
376
        self.dropout = tf.keras.layers.Dropout(config.dropout)

thomwolf's avatar
thomwolf committed
377
    def build(self, input_shape):
thomwolf's avatar
thomwolf committed
378
        initializer = tf.random_normal_initializer(mean=0., stddev=self.initializer_range)
thomwolf's avatar
thomwolf committed
379
        self.mask_emb = self.add_weight(shape=(1, 1, self.d_model),
thomwolf's avatar
thomwolf committed
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
                                 initializer=initializer,
                                 trainable=True, name='mask_emb')

    def _resize_token_embeddings(self, new_num_tokens):
        raise NotImplementedError

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    def create_mask(self, qlen, mlen, dtype=tf.float32):
        """
        Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.

        Args:
            qlen: TODO Lysandre didn't fill
            mlen: TODO Lysandre didn't fill

        ::

                  same_length=False:      same_length=True:
                  <mlen > <  qlen >       <mlen > <  qlen >
               ^ [0 0 0 0 0 1 1 1 1]     [0 0 0 0 0 1 1 1 1]
                 [0 0 0 0 0 0 1 1 1]     [1 0 0 0 0 0 1 1 1]
            qlen [0 0 0 0 0 0 0 1 1]     [1 1 0 0 0 0 0 1 1]
                 [0 0 0 0 0 0 0 0 1]     [1 1 1 0 0 0 0 0 1]
               v [0 0 0 0 0 0 0 0 0]     [1 1 1 1 0 0 0 0 0]

        """
        attn_mask = tf.ones([qlen, qlen], dtype=dtype)
        mask_u = tf.matrix_band_part(attn_mask, 0, -1)
        mask_dia = tf.matrix_band_part(attn_mask, 0, 0)
        attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype)
        ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
        if self.same_length:
            mask_l = tf.matrix_band_part(attn_mask, -1, 0)
            ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
        return ret

    def cache_mem(self, curr_out, prev_mem):
        """cache hidden states into memory."""
        if self.mem_len is None or self.mem_len == 0:
            return None
        else:
            if self.reuse_len is not None and self.reuse_len > 0:
                curr_out = curr_out[:self.reuse_len]

            if prev_mem is None:
                new_mem = curr_out[-self.mem_len:]
            else:
429
                new_mem = tf.concat([prev_mem, curr_out], 0)[-self.mem_len:]
thomwolf's avatar
thomwolf committed
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491

        return tf.stop_gradient(new_mem)

    @staticmethod
    def positional_embedding(pos_seq, inv_freq, bsz=None):
        sinusoid_inp = tf.einsum('i,d->id', pos_seq, inv_freq)
        pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], axis=-1)
        pos_emb = pos_emb[:, None, :]

        if bsz is not None:
            pos_emb = tf.tile(pos_emb, [1, bsz, 1])

        return pos_emb

    def relative_positional_encoding(self, qlen, klen, bsz=None, dtype=None):
        """create relative positional encoding."""
        freq_seq = tf.range(0, self.d_model, 2.0)
        if dtype is not None and dtype != tf.float32:
            freq_seq = tf.cast(freq_seq, dtype=dtype)
        inv_freq = 1 / (10000 ** (freq_seq / self.d_model))

        if self.attn_type == 'bi':
            # beg, end = klen - 1, -qlen
            beg, end = klen, -qlen
        elif self.attn_type == 'uni':
            # beg, end = klen - 1, -1
            beg, end = klen, -1
        else:
            raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))

        if self.bi_data:
            fwd_pos_seq = tf.range(beg, end, -1.0)
            bwd_pos_seq = tf.range(-beg, -end, 1.0)

            if dtype is not None and dtype != tf.float32:
                fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)
                bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype)

            if self.clamp_len > 0:
                fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len)
                bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -self.clamp_len, self.clamp_len)

            if bsz is not None:
                # With bi_data, the batch size should be divisible by 2.
                assert bsz%2 == 0
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
            else:
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)

            pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1)
        else:
            fwd_pos_seq = tf.range(beg, end, -1.0)
            if dtype is not None and dtype != tf.float32:
                fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)
            if self.clamp_len > 0:
                fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len)
            pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)

        return pos_emb

thomwolf's avatar
thomwolf committed
492
493
494
    def call(self, inputs, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
            token_type_ids=None, input_mask=None, head_mask=None, training=False):
        if isinstance(inputs, (tuple, list)):
thomwolf's avatar
thomwolf committed
495
            input_ids = inputs[0]
thomwolf's avatar
thomwolf committed
496
497
498
499
500
501
502
            attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
            mems = inputs[2] if len(inputs) > 2 else mems
            perm_mask = inputs[3] if len(inputs) > 3 else perm_mask
            target_mapping = inputs[4] if len(inputs) > 4 else target_mapping
            token_type_ids = inputs[5] if len(inputs) > 5 else token_type_ids
            input_mask = inputs[6] if len(inputs) > 6 else input_mask
            head_mask = inputs[7] if len(inputs) > 7 else head_mask
thomwolf's avatar
thomwolf committed
503
            assert len(inputs) <= 8, "Too many inputs."
thomwolf's avatar
thomwolf committed
504
        elif isinstance(inputs, dict):
thomwolf's avatar
thomwolf committed
505
            input_ids = inputs.get('input_ids')
thomwolf's avatar
thomwolf committed
506
507
508
509
510
511
512
            attention_mask = inputs.get('attention_mask', attention_mask)
            mems = inputs.get('mems', mems)
            perm_mask = inputs.get('perm_mask', perm_mask)
            target_mapping = inputs.get('target_mapping', target_mapping)
            token_type_ids = inputs.get('token_type_ids', token_type_ids)
            input_mask = inputs.get('input_mask', input_mask)
            head_mask = inputs.get('head_mask', head_mask)
thomwolf's avatar
thomwolf committed
513
            assert len(inputs) <= 8, "Too many inputs."
thomwolf's avatar
thomwolf committed
514
515
        else:
            input_ids = inputs
thomwolf's avatar
thomwolf committed
516

thomwolf's avatar
thomwolf committed
517
518
519
520
        # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
        # but we want a unified interface in the library with the batch size on the first dimension
        # so we move here the first dimension (batch) to the end

thomwolf's avatar
thomwolf committed
521
522
523
524
        input_ids = tf.transpose(input_ids, perm=(1, 0))
        token_type_ids = tf.transpose(token_type_ids, perm=(1, 0)) if token_type_ids is not None else None
        input_mask = tf.transpose(input_mask, perm=(1, 0)) if input_mask is not None else None
        attention_mask = tf.transpose(attention_mask, perm=(1, 0)) if attention_mask is not None else None
thomwolf's avatar
thomwolf committed
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
        perm_mask = tf.transpose(perm_mask, perm=(1, 2, 0)) if perm_mask is not None else None
        target_mapping = tf.transpose(target_mapping, perm=(1, 2, 0)) if target_mapping is not None else None

        qlen, bsz = shape_list(input_ids)[:2]
        mlen = shape_list(mems[0])[0] if mems is not None and mems[0] is not None else 0
        klen = mlen + qlen

        dtype_float = tf.bfloat16 if self.use_bfloat16 else tf.float32

        ##### Attention mask
        # causal attention mask
        if self.attn_type == 'uni':
            attn_mask = self.create_mask(qlen, mlen)
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == 'bi':
            attn_mask = None
        else:
            raise ValueError('Unsupported attention type: {}'.format(self.attn_type))

        # data mask: input mask & perm mask
        assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
        "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
        if input_mask is None and attention_mask is not None:
            input_mask = 1.0 - attention_mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask
        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
            mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz],
                                dtype=dtype_float)
            data_mask = tf.concat([mems_mask, data_mask], axis=1)
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
            attn_mask = tf.cast(attn_mask > 0, dtype=dtype_float)

        if attn_mask is not None:
            non_tgt_mask = -tf.eye(qlen, dtype=dtype_float)
            non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=dtype_float), non_tgt_mask], axis=-1)
            non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=dtype_float)
        else:
            non_tgt_mask = None

        ##### Word embeddings and prepare h & g hidden states
        word_emb_k = self.word_embedding(input_ids)
thomwolf's avatar
thomwolf committed
580
        output_h = self.dropout(word_emb_k, training=training)
thomwolf's avatar
thomwolf committed
581
        if target_mapping is not None:
582
            word_emb_q = tf.tile(self.mask_emb, [tf.shape(target_mapping)[0], bsz, 1])
thomwolf's avatar
thomwolf committed
583
584
585
        # else:  # We removed the inp_q input which was same as target mapping
        #     inp_q_ext = inp_q[:, :, None]
        #     word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
thomwolf's avatar
thomwolf committed
586
            output_g = self.dropout(word_emb_q, training=training)
thomwolf's avatar
thomwolf committed
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
        else:
            output_g = None

        ##### Segment embedding
        if token_type_ids is not None:
            # Convert `token_type_ids` to one-hot `seg_mat`
            mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32)
            cat_ids = tf.concat([mem_pad, token_type_ids], 0)

            # `1` indicates not in the same segment [qlen x klen x bsz]
            seg_mat = tf.cast(
                tf.logical_not(tf.equal(token_type_ids[:, None], cat_ids[None, :])),
                tf.int32)
            seg_mat = tf.one_hot(seg_mat, 2, dtype=dtype_float)
        else:
            seg_mat = None

        ##### Positional encoding
        pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz, dtype=dtype_float)
thomwolf's avatar
thomwolf committed
606
        pos_emb = self.dropout(pos_emb, training=training)
thomwolf's avatar
thomwolf committed
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645

        # Prepare head mask if needed
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
        # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
        if head_mask is not None:
            if head_mask.dim() == 1:
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
                head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
            elif head_mask.dim() == 2:
                head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
        else:
            head_mask = [None] * self.n_layer

        new_mems = ()
        if mems is None:
            mems = [None] * len(self.layer)

        attentions = []
        hidden_states = []
        for i, layer_module in enumerate(self.layer):
            # cache new mems
            new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
            if self.output_hidden_states:
                hidden_states.append((output_h, output_g) if output_g is not None else output_h)

            outputs = layer_module([output_h, output_g, non_tgt_mask, attn_mask,
                                    pos_emb, seg_mat, mems[i], target_mapping,
                                    head_mask[i]], training=training)
            output_h, output_g = outputs[:2]
            if self.output_attentions:
                attentions.append(outputs[2])

        # Add last hidden state
        if self.output_hidden_states:
            hidden_states.append((output_h, output_g) if output_g is not None else output_h)

thomwolf's avatar
thomwolf committed
646
        output = self.dropout(output_g if output_g is not None else output_h, training=training)
thomwolf's avatar
thomwolf committed
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688

        # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
        outputs = (tf.transpose(output, perm=(1, 0, 2)), new_mems)
        if self.output_hidden_states:
            if output_g is not None:
                hidden_states = tuple(tf.transpose(h, perm=(1, 0, 2)) for hs in hidden_states for h in hs)
            else:
                hidden_states = tuple(tf.transpose(hs, perm=(1, 0, 2)) for hs in hidden_states)
            outputs = outputs + (hidden_states,)
        if self.output_attentions:
            attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
            outputs = outputs + (attentions,)

        return outputs  # outputs, new_mems, (hidden_states), (attentions)


class TFXLNetPreTrainedModel(TFPreTrainedModel):
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
    config_class = XLNetConfig
    pretrained_model_archive_map = TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
    load_pt_weights = load_xlnet_pt_weights_in_tf2
    base_model_prefix = "transformer"


XLNET_START_DOCSTRING = r"""    The XLNet model was proposed in
    `XLNet: Generalized Autoregressive Pretraining for Language Understanding`_
    by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
    XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method
    to learn bidirectional contexts by maximizing the expected likelihood over all permutations
    of the input sequence factorization order.

    The specific attention pattern can be controlled at training and test time using the `perm_mask` input.

    Do to the difficulty of training a fully auto-regressive model over various factorization order,
    XLNet is pretrained using only a sub-set of the output tokens as target which are selected
    with the `target_mapping` input.

    To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and
    `target_mapping` inputs to control the attention span and outputs (see examples in `examples/run_generation.py`)

thomwolf's avatar
thomwolf committed
689
690
    This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
    refer to the TF 2.0 documentation for all matter related to general usage and behavior.
thomwolf's avatar
thomwolf committed
691
692
693
694

    .. _`XLNet: Generalized Autoregressive Pretraining for Language Understanding`:
        http://arxiv.org/abs/1906.08237

thomwolf's avatar
thomwolf committed
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
    .. _`tf.keras.Model`:
        https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model

    Note on the model inputs:
        TF 2.0 models accepts two formats as inputs:

            - having all inputs as keyword arguments (like PyTorch models), or
            - having all inputs as a list, tuple or dict in the first positional arguments.

        This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.

        If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :

        - a single Tensor with input_ids only and nothing else: `model(inputs_ids)
        - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
            `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
        - a dictionary with one or several input Tensors associaed to the input names given in the docstring:
            `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
thomwolf's avatar
thomwolf committed
713
714
715
716
717
718
719
720
721

    Parameters:
        config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""

XLNET_INPUTS_DOCSTRING = r"""
    Inputs:
thomwolf's avatar
thomwolf committed
722
        **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
723
724
725
726
727
728
            Indices of input sequence tokens in the vocabulary.
            XLNet is a model with relative position embeddings so you can either pad the inputs on
            the right or on the left.
            Indices can be obtained using :class:`pytorch_transformers.XLNetTokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
thomwolf's avatar
thomwolf committed
729
        **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
730
731
732
733
            Mask to avoid performing attention on padding token indices.
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
        **mems**: (`optional`)
thomwolf's avatar
thomwolf committed
734
            list of ``Numpy array`` or ``tf.Tensor`` (one for each layer):
thomwolf's avatar
thomwolf committed
735
736
737
738
739
            that contains pre-computed hidden-states (key and values in the attention blocks) as output by the model
            (see `mems` output below). Can be used to speed up sequential decoding and attend to longer context.
            To activate mems you need to set up config.mem_len to a positive value which will be the max number of tokens in
            the memory output by the model. E.g. `model = XLNetModel.from_pretrained('xlnet-base-case, mem_len=1024)` will
            instantiate a model which can use up to 1024 tokens of memory (in addition to the input it self).
thomwolf's avatar
thomwolf committed
740
        **perm_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, sequence_length)``:
thomwolf's avatar
thomwolf committed
741
742
743
744
745
            Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
            If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
            if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
            If None, each token attends to all the others (full bidirectional attention).
            Only used during pretraining (to define factorization order) or for sequential decoding (generation).
thomwolf's avatar
thomwolf committed
746
        **target_mapping**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_predict, sequence_length)``:
thomwolf's avatar
thomwolf committed
747
748
749
            Mask to indicate the output tokens to use.
            If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
            Only used during pretraining for partial prediction or for sequential decoding (generation).
thomwolf's avatar
thomwolf committed
750
        **token_type_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
751
752
753
754
755
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The type indices in XLNet are NOT selected in the vocabulary, they can be arbitrary numbers and
            the important thing is that they should be different for tokens which belong to different segments.
            The model will compute relative segment differences from the given type indices:
            0 if the segment id of two tokens are the same, 1 if not.
thomwolf's avatar
thomwolf committed
756
        **input_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
757
758
759
760
761
762
            Mask to avoid performing attention on padding token indices.
            Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
            Kept for compatibility with the original code base.
            You can only uses one of `input_mask` and `attention_mask`
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
thomwolf's avatar
thomwolf committed
763
        **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
764
765
766
767
768
769
770
771
772
773
            Mask to nullify selected heads of the self-attention modules.
            Mask values selected in ``[0, 1]``:
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare XLNet Model transformer outputing raw hidden-states without any specific head on top.",
                      XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class TFXLNetModel(TFXLNetPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
774
        **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)``
thomwolf's avatar
thomwolf committed
775
776
            Sequence of hidden-states at the last layer of the model.
        **mems**:
thomwolf's avatar
thomwolf committed
777
            list of ``tf.Tensor`` (one for each layer):
thomwolf's avatar
thomwolf committed
778
779
780
781
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
782
            list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
thomwolf's avatar
thomwolf committed
783
784
785
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
786
            list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
thomwolf's avatar
thomwolf committed
787
788
789
790
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

    Examples::

thomwolf's avatar
thomwolf committed
791
792
793
        import tensorflow as tf
        from pytorch_transformers import XLNetTokenizer, TFXLNetModel

thomwolf's avatar
thomwolf committed
794
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
thomwolf's avatar
thomwolf committed
795
796
        model = TFXLNetModel.from_pretrained('xlnet-large-cased')
        input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :]  # Batch size 1
thomwolf's avatar
thomwolf committed
797
798
799
800
801
802
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple

    """
    def __init__(self, config, *inputs, **kwargs):
        super(TFXLNetModel, self).__init__(config, *inputs, **kwargs)
thomwolf's avatar
thomwolf committed
803
        self.transformer = TFXLNetMainLayer(config, name='transformer')
thomwolf's avatar
thomwolf committed
804

thomwolf's avatar
thomwolf committed
805
806
    def call(self, inputs, **kwargs):
        outputs = self.transformer(inputs, **kwargs)
thomwolf's avatar
thomwolf committed
807
808
809
        return outputs


810
811
812
813
814
815
@add_start_docstrings("""XLNet Model with a language modeling head on top
    (linear layer with weights tied to the input embeddings). """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class TFXLNetLMHeadModel(TFXLNetPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
816
        **prediction_scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
817
818
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **mems**:
thomwolf's avatar
thomwolf committed
819
            list of ``tf.Tensor`` (one for each layer):
820
821
822
823
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
824
            list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
825
826
827
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
828
            list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
829
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
830

831
    Examples::
thomwolf's avatar
thomwolf committed
832

thomwolf's avatar
thomwolf committed
833
834
835
        import tensorflow as tf
        from pytorch_transformers import XLNetTokenizer, TFXLNetLMHeadModel

836
837
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        model = TFXLNetLMHeadModel.from_pretrained('xlnet-large-cased')
thomwolf's avatar
thomwolf committed
838

839
        # We show how to setup inputs to predict a next token using a bi-directional context.
thomwolf's avatar
thomwolf committed
840
841
        input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>"))[None, :]  # We will predict the masked token
        perm_mask = tf.zeros((1, input_ids.shape[1], input_ids.shape[1]))
842
        perm_mask[:, :, -1] = 1.0  # Previous tokens don't see last token
thomwolf's avatar
thomwolf committed
843
        target_mapping = tf.zeros((1, 1, input_ids.shape[1]))  # Shape [1, 1, seq_length] => let's predict one token
844
845
        target_mapping[0, 0, -1] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)
        outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
thomwolf's avatar
thomwolf committed
846

847
848
849
850
851
852
853
        next_token_logits = outputs[0]  # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]

    """
    def __init__(self, config, *inputs, **kwargs):
        super(TFXLNetLMHeadModel, self).__init__(config, *inputs, **kwargs)
        self.transformer = TFXLNetMainLayer(config, name='transformer')
        self.lm_loss = TFXLNetLMHead(config, self.transformer.word_embedding, name='lm_loss')
thomwolf's avatar
thomwolf committed
854

thomwolf's avatar
thomwolf committed
855
856
    def call(self, inputs, **kwargs):
        transformer_outputs = self.transformer(inputs, **kwargs)
857
858
859
860
861
862
863
864
865
866
867
868
869
870
        hidden_state = transformer_outputs[0]
        logits = self.lm_loss(hidden_state)

        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it

        return outputs  # return logits, mems, (hidden states), (attentions)


@add_start_docstrings("""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
    the pooled output) e.g. for GLUE tasks. """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
871
        **logits**: ``tf.Tensor`` of shape ``(batch_size, config.num_labels)``
872
873
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        **mems**:
thomwolf's avatar
thomwolf committed
874
            list of ``tf.Tensor`` (one for each layer):
875
876
877
878
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
879
            list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
880
881
882
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
883
            list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
884
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
885

886
    Examples::
thomwolf's avatar
thomwolf committed
887

thomwolf's avatar
thomwolf committed
888
889
890
        import tensorflow as tf
        from pytorch_transformers import XLNetTokenizer, TFXLNetForSequenceClassification

891
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
thomwolf's avatar
thomwolf committed
892
893
894
895
        model = TFXLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
        input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :]  # Batch size 1
        outputs = model(input_ids)
        logits = outputs[0]
thomwolf's avatar
thomwolf committed
896

897
898
899
900
    """
    def __init__(self, config, *inputs, **kwargs):
        super(TFXLNetForSequenceClassification, self).__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
thomwolf's avatar
thomwolf committed
901

902
903
904
        self.transformer = TFXLNetMainLayer(config, name='transformer')
        self.sequence_summary = TFSequenceSummary(config, name='sequence_summary')
        self.logits_proj = tf.keras.layers.Dense(config.num_labels, name='logits_proj')
thomwolf's avatar
thomwolf committed
905

thomwolf's avatar
thomwolf committed
906
907
    def call(self, inputs, **kwargs):
        transformer_outputs = self.transformer(inputs, **kwargs)
908
        output = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
909

910
911
        output = self.sequence_summary(output)
        logits = self.logits_proj(output)
thomwolf's avatar
thomwolf committed
912

913
        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
thomwolf's avatar
thomwolf committed
914

915
        return outputs  # return logits, mems, (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
916
917


918
919
# @add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
#     the hidden-states output to compute `span start logits` and `span end logits`). """,
thomwolf's avatar
thomwolf committed
920
#     XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
921
922
923
924
# class TFXLNetForQuestionAnswering(TFXLNetPreTrainedModel):
class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
925
        **start_scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length,)``
926
            Span-start scores (before SoftMax).
thomwolf's avatar
thomwolf committed
927
        **end_scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length,)``
928
929
            Span-end scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
930
            list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
931
932
933
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
934
            list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
935
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
936

937
    Examples::
thomwolf's avatar
thomwolf committed
938

thomwolf's avatar
thomwolf committed
939
940
941
        import tensorflow as tf
        from pytorch_transformers import XLNetTokenizer, TFXLNetForQuestionAnsweringSimple

942
943
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
        model = TFXLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
thomwolf's avatar
thomwolf committed
944
945
946
        input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :]  # Batch size 1
        outputs = model(input_ids)
        start_scores, end_scores = outputs[:2]
thomwolf's avatar
thomwolf committed
947

948
949
950
951
952
953
    """
    def __init__(self, config, *inputs, **kwargs):
        super(TFXLNetForQuestionAnsweringSimple, self).__init__(config, *inputs, **kwargs)
        self.transformer = TFXLNetMainLayer(config, name='transformer')
        self.qa_outputs = tf.keras.layers.Dense(config.num_labels, name='qa_outputs')

thomwolf's avatar
thomwolf committed
954
955
    def call(self, inputs, **kwargs):
        transformer_outputs = self.transformer(inputs, **kwargs)
956
957

        sequence_output = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
958

959
960
961
962
963
964
965
966
        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = tf.split(logits, 2, axis=-1)
        start_logits = tf.squeeze(start_logits, axis=-1)
        end_logits = tf.squeeze(end_logits, axis=-1)

        outputs = (start_logits, end_logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it

        return outputs  # start_logits, end_logits, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
967
968
969
970

# @add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
#     the hidden-states output to compute `span start logits` and `span end logits`). """,
#     XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
971
# class TFXLNetForQuestionAnswering(TFXLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
972
973
974
#     r"""
#     Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
#         **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
thomwolf's avatar
thomwolf committed
975
#             ``tf.Tensor`` of shape ``(batch_size, config.start_n_top)``
thomwolf's avatar
thomwolf committed
976
977
978
979
980
#             Log probabilities for the top config.start_n_top start token possibilities (beam-search).
#         **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
#             ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
#             Indices for the top config.start_n_top start token possibilities (beam-search).
#         **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
thomwolf's avatar
thomwolf committed
981
#             ``tf.Tensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
thomwolf's avatar
thomwolf committed
982
983
984
985
986
#             Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
#         **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
#             ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
#             Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
#         **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
thomwolf's avatar
thomwolf committed
987
#             ``tf.Tensor`` of shape ``(batch_size,)``
thomwolf's avatar
thomwolf committed
988
989
#             Log probabilities for the ``is_impossible`` label of the answers.
#         **mems**:
thomwolf's avatar
thomwolf committed
990
#             list of ``tf.Tensor`` (one for each layer):
thomwolf's avatar
thomwolf committed
991
992
993
994
#             that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
#             if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
#             See details in the docstring of the `mems` input above.
#         **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
995
#             list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
thomwolf's avatar
thomwolf committed
996
997
998
#             of shape ``(batch_size, sequence_length, hidden_size)``:
#             Hidden-states of the model at the output of each layer plus the initial embedding outputs.
#         **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
999
#             list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
thomwolf's avatar
thomwolf committed
1000
1001
1002
1003
1004
1005
#             Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

#     Examples::

#         tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
#         model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
thomwolf's avatar
thomwolf committed
1006
1007
1008
#         input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :]  # Batch size 1
#         start_positions = tf.constant([1])
#         end_positions = tf.constant([3])
thomwolf's avatar
thomwolf committed
1009
1010
1011
1012
#         outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
#         loss, start_scores, end_scores = outputs[:2]

#     """
1013
1014
#     def __init__(self, config, *inputs, **kwargs):
#         super(TFXLNetForQuestionAnswering, self).__init__(config, *inputs, **kwargs)
thomwolf's avatar
thomwolf committed
1015
1016
1017
#         self.start_n_top = config.start_n_top
#         self.end_n_top = config.end_n_top

1018
1019
1020
1021
1022
1023
1024
#         self.transformer = TFXLNetMainLayer(config, name='transformer')
#         self.start_logits = TFPoolerStartLogits(config, name='start_logits')
#         self.end_logits = TFPoolerEndLogits(config, name='end_logits')
#         self.answer_class = TFPoolerAnswerClass(config, name='answer_class')

#     def call(self, inputs, training=False):
#         transformer_outputs = self.transformer(inputs, training=training)
thomwolf's avatar
thomwolf committed
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
#         hidden_states = transformer_outputs[0]
#         start_logits = self.start_logits(hidden_states, p_mask=p_mask)

#         outputs = transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it

#         if start_positions is not None and end_positions is not None:
#             # If we are on multi-GPU, let's remove the dimension added by batch splitting
#             for x in (start_positions, end_positions, cls_index, is_impossible):
#                 if x is not None and x.dim() > 1:
#                     x.squeeze_(-1)

#             # during training, compute the end logits based on the ground truth of the start position
#             end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

#             loss_fct = CrossEntropyLoss()
#             start_loss = loss_fct(start_logits, start_positions)
#             end_loss = loss_fct(end_logits, end_positions)
#             total_loss = (start_loss + end_loss) / 2

#             if cls_index is not None and is_impossible is not None:
#                 # Predict answerability from the representation of CLS and START
#                 cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
#                 loss_fct_cls = nn.BCEWithLogitsLoss()
#                 cls_loss = loss_fct_cls(cls_logits, is_impossible)

#                 # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
#                 total_loss += cls_loss * 0.5

#             outputs = (total_loss,) + outputs

#         else:
#             # during inference, compute the end logits based on beam search
#             bsz, slen, hsz = hidden_states.size()
#             start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)

#             start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
#             start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
#             start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
#             start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)

#             hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
#             p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
#             end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
#             end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)

#             end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
#             end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
#             end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

#             start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)  # get the representation of START as weighted sum of hidden states
#             cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)  # Shape (batch size,): one single `cls_logits` for each sample

#             outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs

#         # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
#         # or (if labels are provided) (total_loss,)
#         return outputs