modeling_xlnet.py 58 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
from __future__ import absolute_import, division, print_function, unicode_literals

import json
import logging
import math
import os
import sys
from io import open

import torch
from torch import nn
thomwolf's avatar
thomwolf committed
29
from torch.nn import functional as F
30
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
31

32
33
34
from .modeling_utils import PreTrainedModel, prune_linear_layer, SequenceSummary, PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits
from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings
35

thomwolf's avatar
thomwolf committed
36
37
38

logger = logging.getLogger(__name__)

39
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
40
    'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
41
42
43
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}

thomwolf's avatar
thomwolf committed
44

45
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
thomwolf's avatar
thomwolf committed
46
47
48
49
50
51
52
53
    """ A map of modules from TF to PyTorch.
        I use a map to keep the PyTorch model as
        identical to the original PyTorch model as possible.
    """

    tf_to_pt_map = {}

    if hasattr(model, 'transformer'):
54
55
56
        if hasattr(model, 'lm_loss'):
            # We will load also the output bias
            tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias
57
        if hasattr(model, 'sequence_summary') and 'model/sequnece_summary/summary/kernel' in tf_weights:
58
59
60
            # We will load also the sequence summary
            tf_to_pt_map['model/sequnece_summary/summary/kernel'] = model.sequence_summary.summary.weight
            tf_to_pt_map['model/sequnece_summary/summary/bias'] = model.sequence_summary.summary.bias
thomwolf's avatar
thomwolf committed
61
62
        if hasattr(model, 'logits_proj') and config.finetuning_task is not None \
                and 'model/regression_{}/logit/kernel'.format(config.finetuning_task) in tf_weights:
63
64
            tf_to_pt_map['model/regression_{}/logit/kernel'.format(config.finetuning_task)] = model.logits_proj.weight
            tf_to_pt_map['model/regression_{}/logit/bias'.format(config.finetuning_task)] = model.logits_proj.bias
65

thomwolf's avatar
thomwolf committed
66
67
68
69
70
        # Now load the rest of the transformer
        model = model.transformer

    # Embeddings and output
    tf_to_pt_map.update({'model/transformer/word_embedding/lookup_table': model.word_embedding.weight,
71
                         'model/transformer/mask_emb/mask_emb': model.mask_emb})
thomwolf's avatar
thomwolf committed
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114

    # Transformer blocks
    for i, b in enumerate(model.layer):
        layer_str = "model/transformer/layer_%d/" % i
        tf_to_pt_map.update({
            layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
            layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
            layer_str + "rel_attn/o/kernel": b.rel_attn.o,
            layer_str + "rel_attn/q/kernel": b.rel_attn.q,
            layer_str + "rel_attn/k/kernel": b.rel_attn.k,
            layer_str + "rel_attn/r/kernel": b.rel_attn.r,
            layer_str + "rel_attn/v/kernel": b.rel_attn.v,
            layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
            layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
            layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
            layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
            layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
            layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
        })

    # Relative positioning biases
    if config.untie_r:
        r_r_list = []
        r_w_list = []
        r_s_list = []
        seg_embed_list = []
        for b in model.layer:
            r_r_list.append(b.rel_attn.r_r_bias)
            r_w_list.append(b.rel_attn.r_w_bias)
            r_s_list.append(b.rel_attn.r_s_bias)
            seg_embed_list.append(b.rel_attn.seg_embed)
    else:
        r_r_list = [model.r_r_bias]
        r_w_list = [model.r_w_bias]
        r_s_list = [model.r_s_bias]
        seg_embed_list = [model.seg_embed]
    tf_to_pt_map.update({
        'model/transformer/r_r_bias': r_r_list,
        'model/transformer/r_w_bias': r_w_list,
        'model/transformer/r_s_bias': r_s_list,
        'model/transformer/seg_embed': seg_embed_list})
    return tf_to_pt_map

115
def load_tf_weights_in_xlnet(model, config, tf_path):
thomwolf's avatar
thomwolf committed
116
117
118
119
120
121
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import numpy as np
        import tensorflow as tf
    except ImportError:
thomwolf's avatar
thomwolf committed
122
        logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
thomwolf's avatar
thomwolf committed
123
124
125
126
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
thomwolf's avatar
thomwolf committed
127
    tf_weights = {}
thomwolf's avatar
thomwolf committed
128
    for name, shape in init_vars:
thomwolf's avatar
thomwolf committed
129
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
thomwolf's avatar
thomwolf committed
130
        array = tf.train.load_variable(tf_path, name)
thomwolf's avatar
thomwolf committed
131
        tf_weights[name] = array
thomwolf's avatar
thomwolf committed
132

133
    # Build TF to PyTorch weights loading map
134
    tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
135

thomwolf's avatar
thomwolf committed
136
    for name, pointer in tf_to_pt_map.items():
thomwolf's avatar
thomwolf committed
137
        logger.info("Importing {}".format(name))
138
        if name not in tf_weights:
thomwolf's avatar
thomwolf committed
139
            logger.info("{} not in tf pre-trained weights, skipping".format(name))
140
            continue
thomwolf's avatar
thomwolf committed
141
        array = tf_weights[name]
thomwolf's avatar
thomwolf committed
142
143
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
144
        if 'kernel' in name and ('ff' in name or 'summary' in name or 'logit' in name):
thomwolf's avatar
thomwolf committed
145
            logger.info("Transposing")
thomwolf's avatar
thomwolf committed
146
            array = np.transpose(array)
thomwolf's avatar
thomwolf committed
147
148
149
150
151
152
153
154
155
156
        if isinstance(pointer, list):
            # Here we will split the TF weigths
            assert len(pointer) == array.shape[0]
            for i, p_i in enumerate(pointer):
                arr_i = array[i, ...]
                try:
                    assert p_i.shape == arr_i.shape
                except AssertionError as e:
                    e.args += (p_i.shape, arr_i.shape)
                    raise
thomwolf's avatar
thomwolf committed
157
                logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
thomwolf's avatar
thomwolf committed
158
159
160
161
162
163
164
                p_i.data = torch.from_numpy(arr_i)
        else:
            try:
                assert pointer.shape == array.shape
            except AssertionError as e:
                e.args += (pointer.shape, array.shape)
                raise
thomwolf's avatar
thomwolf committed
165
            logger.info("Initialize PyTorch weight {}".format(name))
thomwolf's avatar
thomwolf committed
166
167
168
169
170
            pointer.data = torch.from_numpy(array)
        tf_weights.pop(name, None)
        tf_weights.pop(name + '/Adam', None)
        tf_weights.pop(name + '/Adam_1', None)

thomwolf's avatar
thomwolf committed
171
    logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
thomwolf's avatar
thomwolf committed
172
173
174
175
    return model


def gelu(x):
176
177
    """ Implementation of the gelu activation function.
        XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)
thomwolf's avatar
thomwolf committed
178
179
        Also see https://arxiv.org/abs/1606.08415
    """
180
181
    cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
    return x * cdf
thomwolf's avatar
thomwolf committed
182
183
184
185
186
187
188
189
190
191
192


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm
雷打不动!'s avatar
雷打不动! committed
193
except (ImportError, AttributeError) as e:
thomwolf's avatar
thomwolf committed
194
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
195
    from torch.nn import LayerNorm as XLNetLayerNorm
thomwolf's avatar
thomwolf committed
196

thomwolf's avatar
thomwolf committed
197
class XLNetRelativeAttention(nn.Module):
thomwolf's avatar
thomwolf committed
198
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
199
        super(XLNetRelativeAttention, self).__init__()
thomwolf's avatar
thomwolf committed
200
201
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
202
        if config.d_model % config.n_head != 0:
thomwolf's avatar
thomwolf committed
203
204
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
thomwolf's avatar
thomwolf committed
205
                "heads (%d)" % (config.d_model, config.n_head))
thomwolf's avatar
thomwolf committed
206

thomwolf's avatar
thomwolf committed
207
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
208
209
210
211
        self.d_head = config.d_head
        self.d_model = config.d_model
        self.scale = 1 / (config.d_head ** 0.5)

thomwolf's avatar
thomwolf committed
212
213
214
215
216
        self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
217

thomwolf's avatar
thomwolf committed
218
219
220
221
        self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
        self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
        self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
        self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
222

thomwolf's avatar
thomwolf committed
223
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
224
225
226
227
228
        self.dropout = nn.Dropout(config.dropout)

    def prune_heads(self, heads):
        raise NotImplementedError

thomwolf's avatar
thomwolf committed
229
230
231
232
233
234
235
236
    @staticmethod
    def rel_shift(x, klen=-1):
        """perform relative shift to form the relative attention score."""
        x_size = x.shape

        x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
        x = x[1:, ...]
        x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
237
        # x = x[:, 0:klen, :, :]
thomwolf's avatar
thomwolf committed
238
        x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
thomwolf's avatar
thomwolf committed
239
240
241

        return x

242
    def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
243
244
245
246
247
248
249
        """Core relative positional attention operations."""

        # content based attention score
        ac = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_w_bias, k_head_h)

        # position based attention score
        bd = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_r_bias, k_head_r)
thomwolf's avatar
thomwolf committed
250
        bd = self.rel_shift(bd, klen=ac.shape[1])
thomwolf's avatar
thomwolf committed
251
252
253
254
255
256
257
258
259
260
261
262

        # segment based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)
            ef = torch.einsum('ijbs,ibns->ijbn', seg_mat, ef)

        # merge attention scores and perform masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
ziliwang's avatar
ziliwang committed
263
264
265
266
            if attn_mask.dtype == torch.float16:
                attn_score = attn_score - 65500 * attn_mask
            else:
                attn_score = attn_score - 1e30 * attn_mask
thomwolf's avatar
thomwolf committed
267
268
269
270
271

        # attention probability
        attn_prob = F.softmax(attn_score, dim=1)
        attn_prob = self.dropout(attn_prob)

272
273
274
275
        # Mask heads if we want to
        if head_mask is not None:
            attn_prob = attn_prob * head_mask

thomwolf's avatar
thomwolf committed
276
277
278
        # attention output
        attn_vec = torch.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)

279
280
281
        if self.output_attentions:
            return attn_vec, attn_prob

thomwolf's avatar
thomwolf committed
282
283
284
285
286
287
288
289
290
291
        return attn_vec

    def post_attention(self, h, attn_vec, residual=True):
        """Post-attention processing."""
        # post-attention projection (back to `d_model`)
        attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)

        attn_out = self.dropout(attn_out)
        if residual:
            attn_out = attn_out + h
thomwolf's avatar
thomwolf committed
292
        output = self.layer_norm(attn_out)
thomwolf's avatar
thomwolf committed
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322

        return output

    def forward(self, h, g,
                      attn_mask_h, attn_mask_g,
                      r, seg_mat,
                      mems=None, target_mapping=None, head_mask=None):
        if g is not None:
            ###### Two-stream attention with relative positional encoding.
            # content based attention score
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content-based key head
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)

            # content-based value head
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # position-based key head
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            ##### h-stream
            # content-stream query head
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)

            # core attention ops
            attn_vec_h = self.rel_attn_core(
323
324
325
326
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec_h, attn_prob_h = attn_vec_h
thomwolf's avatar
thomwolf committed
327
328
329
330
331
332
333
334
335
336
337
338

            # post processing
            output_h = self.post_attention(h, attn_vec_h)

            ##### g-stream
            # query-stream query head
            q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q)

            # core attention ops
            if target_mapping is not None:
                q_head_g = torch.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
                attn_vec_g = self.rel_attn_core(
339
340
341
342
343
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

thomwolf's avatar
thomwolf committed
344
345
346
                attn_vec_g = torch.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
            else:
                attn_vec_g = self.rel_attn_core(
347
348
349
350
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g
thomwolf's avatar
thomwolf committed
351
352
353

            # post processing
            output_g = self.post_attention(g, attn_vec_g)
354
355
356
357

            if self.output_attentions:
                attn_prob = attn_prob_h, attn_prob_g

thomwolf's avatar
thomwolf committed
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
        else:
            ###### Multi-head attention with relative positional encoding
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content heads
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # positional heads
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            # core attention ops
            attn_vec = self.rel_attn_core(
375
376
377
378
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec, attn_prob = attn_vec
thomwolf's avatar
thomwolf committed
379
380

            # post processing
thomwolf's avatar
thomwolf committed
381
382
            output_h = self.post_attention(h, attn_vec)
            output_g = None
thomwolf's avatar
thomwolf committed
383

384
        outputs = (output_h, output_g)
385
        if self.output_attentions:
386
            outputs = outputs + (attn_prob,)
thomwolf's avatar
thomwolf committed
387
        return outputs
thomwolf's avatar
thomwolf committed
388
389
390
391

class XLNetFeedForward(nn.Module):
    def __init__(self, config):
        super(XLNetFeedForward, self).__init__()
thomwolf's avatar
thomwolf committed
392
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
393
394
395
        self.layer_1 = nn.Linear(config.d_model, config.d_inner)
        self.layer_2 = nn.Linear(config.d_inner, config.d_model)
        self.dropout = nn.Dropout(config.dropout)
396
397
        if isinstance(config.ff_activation, str) or \
                (sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)):
thomwolf's avatar
thomwolf committed
398
399
400
401
            self.activation_function = ACT2FN[config.ff_activation]
        else:
            self.activation_function = config.ff_activation

thomwolf's avatar
thomwolf committed
402
403
404
405
406
407
408
    def forward(self, inp):
        output = inp
        output = self.layer_1(output)
        output = self.activation_function(output)
        output = self.dropout(output)
        output = self.layer_2(output)
        output = self.dropout(output)
thomwolf's avatar
thomwolf committed
409
        output = self.layer_norm(output + inp)
thomwolf's avatar
thomwolf committed
410
        return output
thomwolf's avatar
thomwolf committed
411
412

class XLNetLayer(nn.Module):
thomwolf's avatar
thomwolf committed
413
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
414
        super(XLNetLayer, self).__init__()
thomwolf's avatar
thomwolf committed
415
        self.rel_attn = XLNetRelativeAttention(config)
thomwolf's avatar
thomwolf committed
416
417
418
419
420
        self.ff = XLNetFeedForward(config)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, output_h, output_g,
                attn_mask_h, attn_mask_g,
421
422
423
424
425
426
                r, seg_mat, mems=None, target_mapping=None, head_mask=None):
        outputs = self.rel_attn(output_h, output_g, attn_mask_h, attn_mask_g,
                                r, seg_mat, mems=mems, target_mapping=target_mapping,
                                head_mask=head_mask)
        output_h, output_g = outputs[:2]

thomwolf's avatar
thomwolf committed
427
        if output_g is not None:
thomwolf's avatar
thomwolf committed
428
429
430
            output_g = self.ff(output_g)
        output_h = self.ff(output_h)

431
        outputs = (output_h, output_g) + outputs[2:]  # Add again attentions if there are there
432
        return outputs
thomwolf's avatar
thomwolf committed
433

434
435

class XLNetPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
436
437
438
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
439
    config_class = XLNetConfig
440
    pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
441
442
443
    load_tf_weights = load_tf_weights_in_xlnet
    base_model_prefix = "transformer"

444
    def _init_weights(self, module):
thomwolf's avatar
thomwolf committed
445
446
447
448
449
450
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
451
452
            if isinstance(module, nn.Linear) and module.bias is not None:
                module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
453
454
455
        elif isinstance(module, XLNetLayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
456
457
458
459
460
        elif isinstance(module, XLNetRelativeAttention):
            for param in [module.q, module.k, module.v, module.o, module.r,
                          module.r_r_bias, module.r_s_bias, module.r_w_bias,
                          module.seg_embed]:
                param.data.normal_(mean=0.0, std=self.config.initializer_range)
461
462
        elif isinstance(module, XLNetModel):
                module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
thomwolf's avatar
thomwolf committed
463
464


thomwolf's avatar
thomwolf committed
465
466
467
468
469
470
XLNET_START_DOCSTRING = r"""    The XLNet model was proposed in
    `XLNet: Generalized Autoregressive Pretraining for Language Understanding`_
    by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
    XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method
    to learn bidirectional contexts by maximizing the expected likelihood over all permutations
    of the input sequence factorization order.
471

thomwolf's avatar
thomwolf committed
472
    The specific attention pattern can be controlled at training and test time using the `perm_mask` input.
473

thomwolf's avatar
thomwolf committed
474
475
476
477
478
479
    Do to the difficulty of training a fully auto-regressive model over various factorization order,
    XLNet is pretrained using only a sub-set of the output tokens as target which are selected
    with the `target_mapping` input.

    To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and
    `target_mapping` inputs to control the attention span and outputs (see examples in `examples/run_generation.py`)
480

thomwolf's avatar
thomwolf committed
481
482
    This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
    refer to the PyTorch documentation for all matter related to general usage and behavior.
483

thomwolf's avatar
thomwolf committed
484
485
    .. _`XLNet: Generalized Autoregressive Pretraining for Language Understanding`:
        http://arxiv.org/abs/1906.08237
486

thomwolf's avatar
thomwolf committed
487
488
    .. _`torch.nn.Module`:
        https://pytorch.org/docs/stable/nn.html#module
489

thomwolf's avatar
thomwolf committed
490
491
    Parameters:
        config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
492
493
            Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
thomwolf's avatar
thomwolf committed
494
495
496
497
498
499
"""

XLNET_INPUTS_DOCSTRING = r"""
    Inputs:
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
thomwolf's avatar
thomwolf committed
500
501
            XLNet is a model with relative position embeddings so you can either pad the inputs on
            the right or on the left.
thomwolf's avatar
thomwolf committed
502
503
504
505
506
            Indices can be obtained using :class:`pytorch_transformers.XLNetTokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
thomwolf's avatar
thomwolf committed
507
508
509
510
            The type indices in XLNet are NOT selected in the vocabulary, they can be arbitrary numbers and
            the important thing is that they should be different for tokens which belong to different segments.
            The model will compute relative segment differences from the given type indices:
            0 if the segment id of two tokens are the same, 1 if not.
thomwolf's avatar
thomwolf committed
511
        **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
512
513
514
515
516
            Mask to avoid performing attention on padding token indices.
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
        **mems**: (`optional`)
            list of ``torch.FloatTensor`` (one for each layer):
thomwolf's avatar
thomwolf committed
517
            that contains pre-computed hidden-states (key and values in the attention blocks) as output by the model
thomwolf's avatar
thomwolf committed
518
            (see `mems` output below). Can be used to speed up sequential decoding and attend to longer context.
thomwolf's avatar
thomwolf committed
519
520
521
            To activate mems you need to set up config.mem_len to a positive value which will be the max number of tokens in
            the memory output by the model. E.g. `model = XLNetModel.from_pretrained('xlnet-base-case, mem_len=1024)` will
            instantiate a model which can use up to 1024 tokens of memory (in addition to the input it self).
thomwolf's avatar
thomwolf committed
522
523
524
525
526
527
528
529
530
531
        **perm_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, sequence_length)``:
            Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
            If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
            if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
            If None, each token attends to all the others (full bidirectional attention).
            Only used during pretraining (to define factorization order) or for sequential decoding (generation).
        **target_mapping**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_predict, sequence_length)``:
            Mask to indicate the output tokens to use.
            If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
            Only used during pretraining for partial prediction or for sequential decoding (generation).
532
533
534
535
536
537
538
539
540
541
542
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The embeddings from these tokens will be summed with the respective token embeddings.
            Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
        **input_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
            Mask to avoid performing attention on padding token indices.
            Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
            Kept for compatibility with the original code base.
            You can only uses one of `input_mask` and `attention_mask`
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
thomwolf's avatar
thomwolf committed
543
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
            Mask to nullify selected heads of the self-attention modules.
            Mask values selected in ``[0, 1]``:
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare XLNet Model transformer outputing raw hidden-states without any specific head on top.",
                      XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetModel(XLNetPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
            Sequence of hidden-states at the last layer of the model.
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
559
560
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
561
562
563
564
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
565
566
567
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
568
569
570

    Examples::

wangfei's avatar
wangfei committed
571
572
573
574
575
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        model = XLNetModel.from_pretrained('xlnet-large-cased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
576

577
    """
thomwolf's avatar
thomwolf committed
578
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
579
        super(XLNetModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
580
581
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
582

thomwolf's avatar
thomwolf committed
583
584
        self.mem_len = config.mem_len
        self.reuse_len = config.reuse_len
thomwolf's avatar
thomwolf committed
585
586
587
588
589
        self.d_model = config.d_model
        self.same_length = config.same_length
        self.attn_type = config.attn_type
        self.bi_data = config.bi_data
        self.clamp_len = config.clamp_len
thomwolf's avatar
thomwolf committed
590
        self.n_layer = config.n_layer
thomwolf's avatar
thomwolf committed
591

thomwolf's avatar
thomwolf committed
592
        self.word_embedding = nn.Embedding(config.n_token, config.d_model)
thomwolf's avatar
thomwolf committed
593
        self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
594
        self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
595
        self.dropout = nn.Dropout(config.dropout)
thomwolf's avatar
thomwolf committed
596

597
        self.init_weights()
598

thomwolf's avatar
thomwolf committed
599
600
    def _resize_token_embeddings(self, new_num_tokens):
        self.word_embedding = self._get_resized_embeddings(self.word_embedding, new_num_tokens)
thomwolf's avatar
thomwolf committed
601
        return self.word_embedding
thomwolf's avatar
thomwolf committed
602

thomwolf's avatar
thomwolf committed
603
    def _prune_heads(self, heads_to_prune):
thomwolf's avatar
thomwolf committed
604
        raise NotImplementedError
thomwolf's avatar
thomwolf committed
605

thomwolf's avatar
thomwolf committed
606
    def create_mask(self, qlen, mlen):
607
608
609
610
        """
        Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.

        Args:
611
612
            qlen: TODO Lysandre didn't fill
            mlen: TODO Lysandre didn't fill
613
614
615
616
617
618
619
620
621
622
623

        ::

                  same_length=False:      same_length=True:
                  <mlen > <  qlen >       <mlen > <  qlen >
               ^ [0 0 0 0 0 1 1 1 1]     [0 0 0 0 0 1 1 1 1]
                 [0 0 0 0 0 0 1 1 1]     [1 0 0 0 0 0 1 1 1]
            qlen [0 0 0 0 0 0 0 1 1]     [1 1 0 0 0 0 0 1 1]
                 [0 0 0 0 0 0 0 0 1]     [1 1 1 0 0 0 0 0 1]
               v [0 0 0 0 0 0 0 0 0]     [1 1 1 1 0 0 0 0 0]

thomwolf's avatar
thomwolf committed
624
625
626
627
628
629
630
631
632
633
        """
        attn_mask = torch.ones([qlen, qlen])
        mask_up = torch.triu(attn_mask, diagonal=1)
        attn_mask_pad = torch.zeros([qlen, mlen])
        ret = torch.cat([attn_mask_pad, mask_up], dim=1)
        if self.same_length:
            mask_lo = torch.tril(attn_mask, diagonal=-1)
            ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)

        ret = ret.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
        return ret

    def cache_mem(self, curr_out, prev_mem):
        """cache hidden states into memory."""
        if self.mem_len is None or self.mem_len == 0:
            return None
        else:
            if self.reuse_len is not None and self.reuse_len > 0:
                curr_out = curr_out[:self.reuse_len]

            if prev_mem is None:
                new_mem = curr_out[-self.mem_len:]
            else:
                new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len:]

        return new_mem.detach()

thomwolf's avatar
thomwolf committed
651
652
653
654
655
656
657
658
659
660
661
662
    @staticmethod
    def positional_embedding(pos_seq, inv_freq, bsz=None):
        sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq)
        pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
        pos_emb = pos_emb[:, None, :]

        if bsz is not None:
            pos_emb = pos_emb.expand(-1, bsz, -1)

        return pos_emb

    def relative_positional_encoding(self, qlen, klen, bsz=None):
thomwolf's avatar
thomwolf committed
663
        """create relative positional encoding."""
thomwolf's avatar
thomwolf committed
664
        freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
665
        inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
thomwolf's avatar
thomwolf committed
666
667
668
669
670
671
672
673
674
675
676

        if self.attn_type == 'bi':
            # beg, end = klen - 1, -qlen
            beg, end = klen, -qlen
        elif self.attn_type == 'uni':
            # beg, end = klen - 1, -1
            beg, end = klen, -1
        else:
            raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))

        if self.bi_data:
thomwolf's avatar
thomwolf committed
677
678
            fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
            bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
thomwolf's avatar
thomwolf committed
679
680
681
682
683
684

            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
                bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)

            if bsz is not None:
thomwolf's avatar
thomwolf committed
685
686
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
thomwolf's avatar
thomwolf committed
687
            else:
thomwolf's avatar
thomwolf committed
688
689
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
thomwolf's avatar
thomwolf committed
690
691
692

            pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
        else:
thomwolf's avatar
thomwolf committed
693
            fwd_pos_seq = torch.arange(beg, end, -1.0)
thomwolf's avatar
thomwolf committed
694
695
            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
thomwolf's avatar
thomwolf committed
696
            pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
thomwolf's avatar
thomwolf committed
697

thomwolf's avatar
thomwolf committed
698
        pos_emb = pos_emb.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
699
700
        return pos_emb

701
702
    def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
                token_type_ids=None, input_mask=None, head_mask=None):
703
704
705
        # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
        # but we want a unified interface in the library with the batch size on the first dimension
        # so we move here the first dimension (batch) to the end
thomwolf's avatar
thomwolf committed
706
        input_ids = input_ids.transpose(0, 1).contiguous()
thomwolf's avatar
thomwolf committed
707
        token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
708
        input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
thomwolf's avatar
thomwolf committed
709
        attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
710
711
712
        perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
        target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None

thomwolf's avatar
thomwolf committed
713
        qlen, bsz = input_ids.shape[0], input_ids.shape[1]
thomwolf's avatar
thomwolf committed
714
        mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
thomwolf's avatar
thomwolf committed
715
        klen = mlen + qlen
thomwolf's avatar
thomwolf committed
716
717
718

        dtype_float = next(self.parameters()).dtype
        device = next(self.parameters()).device
thomwolf's avatar
thomwolf committed
719
720
721
722

        ##### Attention mask
        # causal attention mask
        if self.attn_type == 'uni':
thomwolf's avatar
thomwolf committed
723
            attn_mask = self.create_mask(qlen, mlen)
thomwolf's avatar
thomwolf committed
724
725
726
727
728
729
730
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == 'bi':
            attn_mask = None
        else:
            raise ValueError('Unsupported attention type: {}'.format(self.attn_type))

        # data mask: input mask & perm mask
731
732
733
734
735
736
737
738
739
        assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
        "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
        if input_mask is None and attention_mask is not None:
            input_mask = 1.0 - attention_mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask
        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
thomwolf's avatar
thomwolf committed
740
741
742
743
744
745
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
thomwolf's avatar
thomwolf committed
746
747
748
            if mlen > 0:
                mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
                data_mask = torch.cat([mems_mask, data_mask], dim=1)
thomwolf's avatar
thomwolf committed
749
750
751
752
753
754
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
755
            attn_mask = (attn_mask > 0).to(dtype_float)
thomwolf's avatar
thomwolf committed
756
757

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
758
            non_tgt_mask = -torch.eye(qlen).to(attn_mask)
thomwolf's avatar
thomwolf committed
759
760
            if mlen > 0:
                non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
thomwolf's avatar
thomwolf committed
761
            non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
thomwolf's avatar
thomwolf committed
762
763
764
        else:
            non_tgt_mask = None

thomwolf's avatar
thomwolf committed
765
        ##### Word embeddings and prepare h & g hidden states
thomwolf's avatar
thomwolf committed
766
        word_emb_k = self.word_embedding(input_ids)
thomwolf's avatar
thomwolf committed
767
        output_h = self.dropout(word_emb_k)
768
769
770
771
772
        if target_mapping is not None:
            word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
        # else:  # We removed the inp_q input which was same as target mapping
        #     inp_q_ext = inp_q[:, :, None]
        #     word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
thomwolf's avatar
thomwolf committed
773
774
775
776
777
            output_g = self.dropout(word_emb_q)
        else:
            output_g = None

        ##### Segment embedding
thomwolf's avatar
thomwolf committed
778
779
        if token_type_ids is not None:
            # Convert `token_type_ids` to one-hot `seg_mat`
thomwolf's avatar
thomwolf committed
780
781
782
783
784
            if mlen > 0:
                mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
                cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
            else:
                cat_ids = token_type_ids
thomwolf's avatar
thomwolf committed
785
786

            # `1` indicates not in the same segment [qlen x klen x bsz]
thomwolf's avatar
thomwolf committed
787
            seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
thomwolf's avatar
thomwolf committed
788
            seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
thomwolf's avatar
thomwolf committed
789
790
791
792
        else:
            seg_mat = None

        ##### Positional encoding
thomwolf's avatar
thomwolf committed
793
        pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
thomwolf's avatar
thomwolf committed
794
795
        pos_emb = self.dropout(pos_emb)

thomwolf's avatar
thomwolf committed
796
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
797
798
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
799
800
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
        # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
thomwolf's avatar
thomwolf committed
801
802
        if head_mask is not None:
            if head_mask.dim() == 1:
thomwolf's avatar
thomwolf committed
803
804
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
                head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
805
            elif head_mask.dim() == 2:
thomwolf's avatar
thomwolf committed
806
                head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
thomwolf's avatar
thomwolf committed
807
808
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
        else:
thomwolf's avatar
thomwolf committed
809
            head_mask = [None] * self.n_layer
thomwolf's avatar
thomwolf committed
810

811
        new_mems = ()
thomwolf's avatar
thomwolf committed
812
813
814
        if mems is None:
            mems = [None] * len(self.layer)

815
        attentions = []
816
        hidden_states = []
thomwolf's avatar
thomwolf committed
817
818
        for i, layer_module in enumerate(self.layer):
            # cache new mems
819
            new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
820
821
822
823
824
            if self.output_hidden_states:
                hidden_states.append((output_h, output_g) if output_g is not None else output_h)

            outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask,
                                   r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping,
thomwolf's avatar
thomwolf committed
825
                                   head_mask=head_mask[i])
826
827
            output_h, output_g = outputs[:2]
            if self.output_attentions:
thomwolf's avatar
thomwolf committed
828
                attentions.append(outputs[2])
829
830
831

        # Add last hidden state
        if self.output_hidden_states:
thomwolf's avatar
thomwolf committed
832
            hidden_states.append((output_h, output_g) if output_g is not None else output_h)
thomwolf's avatar
thomwolf committed
833
834
835

        output = self.dropout(output_g if output_g is not None else output_h)

836
        # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
837
        outputs = (output.permute(1, 0, 2).contiguous(), new_mems)
838
839
        if self.output_hidden_states:
            if output_g is not None:
840
                hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
841
            else:
842
                hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
843
            outputs = outputs + (hidden_states,)
844
        if self.output_attentions:
845
            attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
846
            outputs = outputs + (attentions,)
847

848
        return outputs  # outputs, new_mems, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
849
850


thomwolf's avatar
thomwolf committed
851
852
853
@add_start_docstrings("""XLNet Model with a language modeling head on top
    (linear layer with weights tied to the input embeddings). """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
854
class XLNetLMHeadModel(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
    r"""
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for language modeling.
            Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Language modeling loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
871
872
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
873
874
875
876
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
877
878
879
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
880
881
882

    Examples::

wangfei's avatar
wangfei committed
883
884
885
886
887
888
889
890
891
892
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
        # We show how to setup inputs to predict a next token using a bi-directional context.
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>")).unsqueeze(0)  # We will predict the masked token
        perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
        perm_mask[:, :, -1] = 1.0  # Previous tokens don't see last token
        target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float)  # Shape [1, 1, seq_length] => let's predict one token
        target_mapping[0, 0, -1] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)
        outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
        next_token_logits = outputs[0]  # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
893

thomwolf's avatar
thomwolf committed
894
    """
thomwolf's avatar
thomwolf committed
895
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
896
        super(XLNetLMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
897
898
        self.attn_type = config.attn_type
        self.same_length = config.same_length
thomwolf's avatar
thomwolf committed
899

thomwolf's avatar
thomwolf committed
900
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
901
        self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True)
thomwolf's avatar
thomwolf committed
902

903
        self.init_weights()
thomwolf's avatar
thomwolf committed
904
        self.tie_weights()
thomwolf's avatar
thomwolf committed
905

thomwolf's avatar
thomwolf committed
906
907
    def tie_weights(self):
        """ Make sure we are sharing the embeddings
thomwolf's avatar
thomwolf committed
908
        """
thomwolf's avatar
thomwolf committed
909
        self._tie_or_clone_weights(self.lm_loss, self.transformer.word_embedding)
thomwolf's avatar
thomwolf committed
910

911
912
913
914
915
916
917
918
919
    def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
                token_type_ids=None, input_mask=None, head_mask=None, labels=None):
        transformer_outputs = self.transformer(input_ids,
                                               attention_mask=attention_mask,
                                               mems=mems,
                                               perm_mask=perm_mask,
                                               target_mapping=target_mapping,
                                               token_type_ids=token_type_ids,
                                               input_mask=input_mask, 
thomwolf's avatar
thomwolf committed
920
                                               head_mask=head_mask)
921
922

        logits = self.lm_loss(transformer_outputs[0])
923

924
        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
925

926
        if labels is not None:
927
928
929
            # Flatten the tokens
            loss_fct = CrossEntropyLoss(ignore_index=-1)
            loss = loss_fct(logits.view(-1, logits.size(-1)),
930
                            labels.view(-1))
931
            outputs = (loss,) + outputs
932

thomwolf's avatar
thomwolf committed
933
        return outputs  # return (loss), logits, mems, (hidden states), (attentions)
934
935


thomwolf's avatar
thomwolf committed
936
937
938
@add_start_docstrings("""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
    the pooled output) e.g. for GLUE tasks. """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
939
class XLNetForSequenceClassification(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
940
941
942
    r"""
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the sequence classification/regression loss.
LysandreJik's avatar
LysandreJik committed
943
            Indices should be in ``[0, ..., config.num_labels - 1]``.
thomwolf's avatar
thomwolf committed
944
945
946
947
948
949
950
951
952
953
954
            If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
            If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification (or regression if config.num_labels==1) loss.
        **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
955
956
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
957
958
959
960
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
961
962
963
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
964
965
966

    Examples::

wangfei's avatar
wangfei committed
967
968
969
970
971
972
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        labels = torch.tensor([1]).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, logits = outputs[:2]
973
974

    """
thomwolf's avatar
thomwolf committed
975
    def __init__(self, config):
976
        super(XLNetForSequenceClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
977
        self.num_labels = config.num_labels
978

thomwolf's avatar
thomwolf committed
979
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
980
        self.sequence_summary = SequenceSummary(config)
thomwolf's avatar
thomwolf committed
981
        self.logits_proj = nn.Linear(config.d_model, config.num_labels)
982

983
        self.init_weights()
984

985
986
987
988
989
990
991
992
993
    def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
                token_type_ids=None, input_mask=None, head_mask=None, labels=None):
        transformer_outputs = self.transformer(input_ids,
                                               attention_mask=attention_mask,
                                               mems=mems,
                                               perm_mask=perm_mask,
                                               target_mapping=target_mapping,
                                               token_type_ids=token_type_ids,
                                               input_mask=input_mask, 
thomwolf's avatar
thomwolf committed
994
                                               head_mask=head_mask)
995
        output = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
996

997
        output = self.sequence_summary(output)
998
        logits = self.logits_proj(output)
thomwolf's avatar
thomwolf committed
999

1000
        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1001

1002
1003
1004
        if labels is not None:
            if self.num_labels == 1:
                #  We are doing regression
1005
                loss_fct = MSELoss()
1006
                loss = loss_fct(logits.view(-1), labels.view(-1))
1007
            else:
1008
1009
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1010
            outputs = (loss,) + outputs
1011

thomwolf's avatar
thomwolf committed
1012
        return outputs  # return (loss), logits, mems, (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
1013

thomwolf's avatar
thomwolf committed
1014

thomwolf's avatar
thomwolf committed
1015
1016
1017
@add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
    the hidden-states output to compute `span start logits` and `span end logits`). """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
1018
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
    r"""
        **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.
        **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.
        **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels whether a question has an answer or no answer (SQuAD 2.0)
        **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
1032
1033
1034
        **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
            Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
            1.0 means token should be masked. 0.0 mean token is not masked.
thomwolf's avatar
thomwolf committed
1035
1036

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
1037
1038
        **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
thomwolf's avatar
thomwolf committed
1039
        **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1040
1041
            ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1042
        **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1043
1044
            ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
            Indices for the top config.start_n_top start token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1045
        **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1046
1047
            ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
            Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1048
        **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1049
1050
            ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
            Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1051
        **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1052
1053
            ``torch.FloatTensor`` of shape ``(batch_size,)``
            Log probabilities for the ``is_impossible`` label of the answers.
thomwolf's avatar
thomwolf committed
1054
1055
1056
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
1057
1058
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
1059
1060
1061
1062
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1063
1064
1065
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1066
1067
1068

    Examples::

wangfei's avatar
wangfei committed
1069
1070
1071
1072
1073
1074
1075
        tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
        model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        start_positions = torch.tensor([1])
        end_positions = torch.tensor([3])
        outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
        loss, start_scores, end_scores = outputs[:2]
1076

thomwolf's avatar
thomwolf committed
1077
    """
thomwolf's avatar
thomwolf committed
1078
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1079
        super(XLNetForQuestionAnswering, self).__init__(config)
thomwolf's avatar
thomwolf committed
1080
1081
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top
1082

thomwolf's avatar
thomwolf committed
1083
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
1084
1085
1086
        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)
thomwolf's avatar
thomwolf committed
1087

1088
        self.init_weights()
thomwolf's avatar
thomwolf committed
1089

1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
    def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None,
                token_type_ids=None, input_mask=None, head_mask=None,
                start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None,):
        transformer_outputs = self.transformer(input_ids,
                                               attention_mask=attention_mask,
                                               mems=mems,
                                               perm_mask=perm_mask,
                                               target_mapping=target_mapping,
                                               token_type_ids=token_type_ids,
                                               input_mask=input_mask, 
thomwolf's avatar
thomwolf committed
1100
                                               head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1101
        hidden_states = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
1102
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
1103

thomwolf's avatar
thomwolf committed
1104
        outputs = transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1105

thomwolf's avatar
thomwolf committed
1106
1107
1108
1109
1110
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)
thomwolf's avatar
thomwolf committed
1111

thomwolf's avatar
thomwolf committed
1112
1113
            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
1114

thomwolf's avatar
thomwolf committed
1115
            loss_fct = CrossEntropyLoss()
thomwolf's avatar
thomwolf committed
1116
1117
1118
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
1119

thomwolf's avatar
thomwolf committed
1120
1121
1122
1123
1124
1125
            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

1126
                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
thomwolf's avatar
thomwolf committed
1127
                total_loss += cls_loss * 0.5
1128
1129

            outputs = (total_loss,) + outputs
thomwolf's avatar
thomwolf committed
1130
1131
1132
1133
1134
1135
1136

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
            start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)

            start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
1137
1138
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
            end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)

            end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

1150
1151
            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)  # get the representation of START as weighted sum of hidden states
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)  # Shape (batch size,): one single `cls_logits` for each sample
thomwolf's avatar
thomwolf committed
1152
1153
1154

            outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs

1155
1156
        # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
        # or (if labels are provided) (total_loss,)
thomwolf's avatar
thomwolf committed
1157
        return outputs