modeling_xlnet.py 63.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
from __future__ import absolute_import, division, print_function, unicode_literals

import json
import logging
import math
import os
import sys
from io import open

import torch
from torch import nn
thomwolf's avatar
thomwolf committed
29
from torch.nn import functional as F
30
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
31

32
from .modeling_utils import (CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig, PreTrainedModel,
thomwolf's avatar
thomwolf committed
33
34
                             SequenceSummary, PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits,
                             add_start_docstrings)
35

thomwolf's avatar
thomwolf committed
36
37
38

logger = logging.getLogger(__name__)

39
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
40
    'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
41
42
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
43
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
44
    'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.json",
thomwolf's avatar
thomwolf committed
45
46
47
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json",
}

thomwolf's avatar
thomwolf committed
48

49
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
thomwolf's avatar
thomwolf committed
50
51
52
53
54
55
56
57
    """ A map of modules from TF to PyTorch.
        I use a map to keep the PyTorch model as
        identical to the original PyTorch model as possible.
    """

    tf_to_pt_map = {}

    if hasattr(model, 'transformer'):
58
59
60
        if hasattr(model, 'lm_loss'):
            # We will load also the output bias
            tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias
61
        if hasattr(model, 'sequence_summary') and 'model/sequnece_summary/summary/kernel' in tf_weights:
62
63
64
            # We will load also the sequence summary
            tf_to_pt_map['model/sequnece_summary/summary/kernel'] = model.sequence_summary.summary.weight
            tf_to_pt_map['model/sequnece_summary/summary/bias'] = model.sequence_summary.summary.bias
thomwolf's avatar
thomwolf committed
65
66
        if hasattr(model, 'logits_proj') and config.finetuning_task is not None \
                and 'model/regression_{}/logit/kernel'.format(config.finetuning_task) in tf_weights:
67
68
            tf_to_pt_map['model/regression_{}/logit/kernel'.format(config.finetuning_task)] = model.logits_proj.weight
            tf_to_pt_map['model/regression_{}/logit/bias'.format(config.finetuning_task)] = model.logits_proj.bias
69

thomwolf's avatar
thomwolf committed
70
71
72
73
74
        # Now load the rest of the transformer
        model = model.transformer

    # Embeddings and output
    tf_to_pt_map.update({'model/transformer/word_embedding/lookup_table': model.word_embedding.weight,
75
                         'model/transformer/mask_emb/mask_emb': model.mask_emb})
thomwolf's avatar
thomwolf committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118

    # Transformer blocks
    for i, b in enumerate(model.layer):
        layer_str = "model/transformer/layer_%d/" % i
        tf_to_pt_map.update({
            layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
            layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
            layer_str + "rel_attn/o/kernel": b.rel_attn.o,
            layer_str + "rel_attn/q/kernel": b.rel_attn.q,
            layer_str + "rel_attn/k/kernel": b.rel_attn.k,
            layer_str + "rel_attn/r/kernel": b.rel_attn.r,
            layer_str + "rel_attn/v/kernel": b.rel_attn.v,
            layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
            layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
            layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
            layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
            layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
            layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
        })

    # Relative positioning biases
    if config.untie_r:
        r_r_list = []
        r_w_list = []
        r_s_list = []
        seg_embed_list = []
        for b in model.layer:
            r_r_list.append(b.rel_attn.r_r_bias)
            r_w_list.append(b.rel_attn.r_w_bias)
            r_s_list.append(b.rel_attn.r_s_bias)
            seg_embed_list.append(b.rel_attn.seg_embed)
    else:
        r_r_list = [model.r_r_bias]
        r_w_list = [model.r_w_bias]
        r_s_list = [model.r_s_bias]
        seg_embed_list = [model.seg_embed]
    tf_to_pt_map.update({
        'model/transformer/r_r_bias': r_r_list,
        'model/transformer/r_w_bias': r_w_list,
        'model/transformer/r_s_bias': r_s_list,
        'model/transformer/seg_embed': seg_embed_list})
    return tf_to_pt_map

119
def load_tf_weights_in_xlnet(model, config, tf_path):
thomwolf's avatar
thomwolf committed
120
121
122
123
124
125
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import numpy as np
        import tensorflow as tf
    except ImportError:
thomwolf's avatar
thomwolf committed
126
        logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
thomwolf's avatar
thomwolf committed
127
128
129
130
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
thomwolf's avatar
thomwolf committed
131
    tf_weights = {}
thomwolf's avatar
thomwolf committed
132
    for name, shape in init_vars:
thomwolf's avatar
thomwolf committed
133
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
thomwolf's avatar
thomwolf committed
134
        array = tf.train.load_variable(tf_path, name)
thomwolf's avatar
thomwolf committed
135
        tf_weights[name] = array
thomwolf's avatar
thomwolf committed
136

137
    # Build TF to PyTorch weights loading map
138
    tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
139

thomwolf's avatar
thomwolf committed
140
    for name, pointer in tf_to_pt_map.items():
thomwolf's avatar
thomwolf committed
141
        logger.info("Importing {}".format(name))
142
        if name not in tf_weights:
thomwolf's avatar
thomwolf committed
143
            logger.info("{} not in tf pre-trained weights, skipping".format(name))
144
            continue
thomwolf's avatar
thomwolf committed
145
        array = tf_weights[name]
thomwolf's avatar
thomwolf committed
146
147
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
148
        if 'kernel' in name and ('ff' in name or 'summary' in name or 'logit' in name):
thomwolf's avatar
thomwolf committed
149
            logger.info("Transposing")
thomwolf's avatar
thomwolf committed
150
            array = np.transpose(array)
thomwolf's avatar
thomwolf committed
151
152
153
154
155
156
157
158
159
160
        if isinstance(pointer, list):
            # Here we will split the TF weigths
            assert len(pointer) == array.shape[0]
            for i, p_i in enumerate(pointer):
                arr_i = array[i, ...]
                try:
                    assert p_i.shape == arr_i.shape
                except AssertionError as e:
                    e.args += (p_i.shape, arr_i.shape)
                    raise
thomwolf's avatar
thomwolf committed
161
                logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
thomwolf's avatar
thomwolf committed
162
163
164
165
166
167
168
                p_i.data = torch.from_numpy(arr_i)
        else:
            try:
                assert pointer.shape == array.shape
            except AssertionError as e:
                e.args += (pointer.shape, array.shape)
                raise
thomwolf's avatar
thomwolf committed
169
            logger.info("Initialize PyTorch weight {}".format(name))
thomwolf's avatar
thomwolf committed
170
171
172
173
174
            pointer.data = torch.from_numpy(array)
        tf_weights.pop(name, None)
        tf_weights.pop(name + '/Adam', None)
        tf_weights.pop(name + '/Adam_1', None)

thomwolf's avatar
thomwolf committed
175
    logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
thomwolf's avatar
thomwolf committed
176
177
178
179
    return model


def gelu(x):
180
181
    """ Implementation of the gelu activation function.
        XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)
thomwolf's avatar
thomwolf committed
182
183
        Also see https://arxiv.org/abs/1606.08415
    """
184
185
    cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
    return x * cdf
thomwolf's avatar
thomwolf committed
186
187
188
189
190
191
192
193
194


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


195
class XLNetConfig(PretrainedConfig):
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
    """Configuration class to store the configuration of a ``XLNetModel``.

    Args:
        vocab_size_or_config_json_file: Vocabulary size of ``inputs_ids`` in ``XLNetModel``.
        d_model: Size of the encoder layers and the pooler layer.
        n_layer: Number of hidden layers in the Transformer encoder.
        n_head: Number of attention heads for each attention layer in
            the Transformer encoder.
        d_inner: The size of the "intermediate" (i.e., feed-forward)
            layer in the Transformer encoder.
        ff_activation: The non-linear activation function (function or string) in the
            encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
        untie_r: untie relative position biases
        attn_type: 'bi' for XLNet, 'uni' for Transformer-XL

        dropout: The dropout probabilitiy for all fully connected
            layers in the embeddings, encoder, and pooler.
        dropatt: The dropout ratio for the attention
            probabilities.
        initializer_range: The sttdev of the truncated_normal_initializer for
            initializing all weight matrices.
        layer_norm_eps: The epsilon used by LayerNorm.

        dropout: float, dropout rate.
        dropatt: float, dropout rate on attention probabilities.
        init: str, the initialization scheme, either "normal" or "uniform".
        init_range: float, initialize the parameters with a uniform distribution
            in [-init_range, init_range]. Only effective when init="uniform".
        init_std: float, initialize the parameters with a normal distribution
            with mean 0 and stddev init_std. Only effective when init="normal".
        mem_len: int, the number of tokens to cache.
        reuse_len: int, the number of tokens in the currect batch to be cached
            and reused in the future.
        bi_data: bool, whether to use bidirectional input pipeline.
            Usually set to True during pretraining and False during finetuning.
        clamp_len: int, clamp all relative distances larger than clamp_len.
            -1 means no clamping.
        same_length: bool, whether to use the same attention length for each token.
        finetuning_task: name of the glue task on which the model was fine-tuned if any
thomwolf's avatar
thomwolf committed
235
    """
236
    pretrained_config_archive_map = XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
237

thomwolf's avatar
thomwolf committed
238
    def __init__(self,
thomwolf's avatar
thomwolf committed
239
                 vocab_size_or_config_json_file=32000,
thomwolf's avatar
thomwolf committed
240
241
242
243
                 d_model=1024,
                 n_layer=24,
                 n_head=16,
                 d_inner=4096,
thomwolf's avatar
thomwolf committed
244
245
                 ff_activation="gelu",
                 untie_r=True,
thomwolf's avatar
thomwolf committed
246
                 attn_type="bi",
thomwolf's avatar
thomwolf committed
247
248

                 initializer_range=0.02,
thomwolf's avatar
thomwolf committed
249
250
251
252
253
254
255
                 layer_norm_eps=1e-12,

                 dropout=0.1,
                 mem_len=None,
                 reuse_len=None,
                 bi_data=False,
                 clamp_len=-1,
256
                 same_length=False,
thomwolf's avatar
thomwolf committed
257

thomwolf's avatar
thomwolf committed
258
259
                 finetuning_task=None,
                 num_labels=2,
thomwolf's avatar
thomwolf committed
260
261
262
                 summary_type='last',
                 summary_use_proj=True,
                 summary_activation='tanh',
263
                 summary_last_dropout=0.1,
thomwolf's avatar
thomwolf committed
264
265
                 start_n_top=5,
                 end_n_top=5,
thomwolf's avatar
thomwolf committed
266
                 **kwargs):
thomwolf's avatar
thomwolf committed
267
268
        """Constructs XLNetConfig.
        """
thomwolf's avatar
thomwolf committed
269
270
        super(XLNetConfig, self).__init__(**kwargs)

thomwolf's avatar
thomwolf committed
271
272
273
274
275
276
277
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
thomwolf's avatar
thomwolf committed
278
            self.n_token = vocab_size_or_config_json_file
thomwolf's avatar
thomwolf committed
279
280
281
            self.d_model = d_model
            self.n_layer = n_layer
            self.n_head = n_head
thomwolf's avatar
thomwolf committed
282
283
            assert d_model % n_head == 0
            self.d_head = d_model // n_head
thomwolf's avatar
thomwolf committed
284
285
286
            self.ff_activation = ff_activation
            self.d_inner = d_inner
            self.untie_r = untie_r
thomwolf's avatar
thomwolf committed
287
            self.attn_type = attn_type
thomwolf's avatar
thomwolf committed
288

thomwolf's avatar
thomwolf committed
289
290
            self.initializer_range = initializer_range
            self.layer_norm_eps = layer_norm_eps
thomwolf's avatar
thomwolf committed
291
292
293
294
295
296
297

            self.dropout = dropout
            self.mem_len = mem_len
            self.reuse_len = reuse_len
            self.bi_data = bi_data
            self.clamp_len = clamp_len
            self.same_length = same_length
thomwolf's avatar
thomwolf committed
298

299
            self.finetuning_task = finetuning_task
thomwolf's avatar
thomwolf committed
300
301
            self.num_labels = num_labels
            self.summary_type = summary_type
thomwolf's avatar
thomwolf committed
302
303
            self.summary_use_proj = summary_use_proj
            self.summary_activation = summary_activation
304
            self.summary_last_dropout = summary_last_dropout
thomwolf's avatar
thomwolf committed
305
306
            self.start_n_top = start_n_top
            self.end_n_top = end_n_top
thomwolf's avatar
thomwolf committed
307
308
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
VictorSanh's avatar
VictorSanh committed
309
                             " or the path to a pretrained model config file (str)")
thomwolf's avatar
thomwolf committed
310

311
312
313
314
    @property
    def max_position_embeddings(self):
        return -1

thomwolf's avatar
thomwolf committed
315
316
317
318
    @property
    def vocab_size(self):
        return self.n_token

thomwolf's avatar
thomwolf committed
319
320
321
322
    @vocab_size.setter
    def vocab_size(self, value):
        self.n_token = value

thomwolf's avatar
thomwolf committed
323
324
325
326
327
328
329
330
331
332
333
334
    @property
    def hidden_size(self):
        return self.d_model

    @property
    def num_attention_heads(self):
        return self.n_head

    @property
    def num_hidden_layers(self):
        return self.n_layer

thomwolf's avatar
thomwolf committed
335
336
337

try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm
雷打不动!'s avatar
雷打不动! committed
338
except (ImportError, AttributeError) as e:
thomwolf's avatar
thomwolf committed
339
340
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
    class XLNetLayerNorm(nn.Module):
thomwolf's avatar
thomwolf committed
341
        def __init__(self, d_model, eps=1e-12):
thomwolf's avatar
thomwolf committed
342
343
344
            """Construct a layernorm module in the TF style (epsilon inside the square root).
            """
            super(XLNetLayerNorm, self).__init__()
thomwolf's avatar
thomwolf committed
345
346
            self.weight = nn.Parameter(torch.ones(d_model))
            self.bias = nn.Parameter(torch.zeros(d_model))
thomwolf's avatar
thomwolf committed
347
348
349
350
351
352
353
354
            self.variance_epsilon = eps

        def forward(self, x):
            u = x.mean(-1, keepdim=True)
            s = (x - u).pow(2).mean(-1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.variance_epsilon)
            return self.weight * x + self.bias

thomwolf's avatar
thomwolf committed
355
class XLNetRelativeAttention(nn.Module):
thomwolf's avatar
thomwolf committed
356
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
357
        super(XLNetRelativeAttention, self).__init__()
thomwolf's avatar
thomwolf committed
358
359
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
360
        if config.d_model % config.n_head != 0:
thomwolf's avatar
thomwolf committed
361
362
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
thomwolf's avatar
thomwolf committed
363
                "heads (%d)" % (config.d_model, config.n_head))
thomwolf's avatar
thomwolf committed
364

thomwolf's avatar
thomwolf committed
365
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
366
367
368
369
        self.d_head = config.d_head
        self.d_model = config.d_model
        self.scale = 1 / (config.d_head ** 0.5)

thomwolf's avatar
thomwolf committed
370
371
372
373
374
        self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
        self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
375

thomwolf's avatar
thomwolf committed
376
377
378
379
        self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
        self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
        self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
        self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
380

thomwolf's avatar
thomwolf committed
381
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
382
383
384
385
386
        self.dropout = nn.Dropout(config.dropout)

    def prune_heads(self, heads):
        raise NotImplementedError

thomwolf's avatar
thomwolf committed
387
388
389
390
391
392
393
394
    @staticmethod
    def rel_shift(x, klen=-1):
        """perform relative shift to form the relative attention score."""
        x_size = x.shape

        x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
        x = x[1:, ...]
        x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
395
        # x = x[:, 0:klen, :, :]
thomwolf's avatar
thomwolf committed
396
        x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
thomwolf's avatar
thomwolf committed
397
398
399

        return x

400
    def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
401
402
403
404
405
406
407
        """Core relative positional attention operations."""

        # content based attention score
        ac = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_w_bias, k_head_h)

        # position based attention score
        bd = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_r_bias, k_head_r)
thomwolf's avatar
thomwolf committed
408
        bd = self.rel_shift(bd, klen=ac.shape[1])
thomwolf's avatar
thomwolf committed
409
410
411
412
413
414
415
416
417
418
419
420

        # segment based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)
            ef = torch.einsum('ijbs,ibns->ijbn', seg_mat, ef)

        # merge attention scores and perform masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
ziliwang's avatar
ziliwang committed
421
422
423
424
            if attn_mask.dtype == torch.float16:
                attn_score = attn_score - 65500 * attn_mask
            else:
                attn_score = attn_score - 1e30 * attn_mask
thomwolf's avatar
thomwolf committed
425
426
427
428
429

        # attention probability
        attn_prob = F.softmax(attn_score, dim=1)
        attn_prob = self.dropout(attn_prob)

430
431
432
433
        # Mask heads if we want to
        if head_mask is not None:
            attn_prob = attn_prob * head_mask

thomwolf's avatar
thomwolf committed
434
435
436
        # attention output
        attn_vec = torch.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)

437
438
439
        if self.output_attentions:
            return attn_vec, attn_prob

thomwolf's avatar
thomwolf committed
440
441
442
443
444
445
446
447
448
449
        return attn_vec

    def post_attention(self, h, attn_vec, residual=True):
        """Post-attention processing."""
        # post-attention projection (back to `d_model`)
        attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)

        attn_out = self.dropout(attn_out)
        if residual:
            attn_out = attn_out + h
thomwolf's avatar
thomwolf committed
450
        output = self.layer_norm(attn_out)
thomwolf's avatar
thomwolf committed
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480

        return output

    def forward(self, h, g,
                      attn_mask_h, attn_mask_g,
                      r, seg_mat,
                      mems=None, target_mapping=None, head_mask=None):
        if g is not None:
            ###### Two-stream attention with relative positional encoding.
            # content based attention score
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content-based key head
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)

            # content-based value head
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # position-based key head
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            ##### h-stream
            # content-stream query head
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)

            # core attention ops
            attn_vec_h = self.rel_attn_core(
481
482
483
484
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec_h, attn_prob_h = attn_vec_h
thomwolf's avatar
thomwolf committed
485
486
487
488
489
490
491
492
493
494
495
496

            # post processing
            output_h = self.post_attention(h, attn_vec_h)

            ##### g-stream
            # query-stream query head
            q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q)

            # core attention ops
            if target_mapping is not None:
                q_head_g = torch.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
                attn_vec_g = self.rel_attn_core(
497
498
499
500
501
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

thomwolf's avatar
thomwolf committed
502
503
504
                attn_vec_g = torch.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
            else:
                attn_vec_g = self.rel_attn_core(
505
506
507
508
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g
thomwolf's avatar
thomwolf committed
509
510
511

            # post processing
            output_g = self.post_attention(g, attn_vec_g)
512
513
514
515

            if self.output_attentions:
                attn_prob = attn_prob_h, attn_prob_g

thomwolf's avatar
thomwolf committed
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
        else:
            ###### Multi-head attention with relative positional encoding
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content heads
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # positional heads
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            # core attention ops
            attn_vec = self.rel_attn_core(
533
534
535
536
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec, attn_prob = attn_vec
thomwolf's avatar
thomwolf committed
537
538

            # post processing
thomwolf's avatar
thomwolf committed
539
540
            output_h = self.post_attention(h, attn_vec)
            output_g = None
thomwolf's avatar
thomwolf committed
541

542
        outputs = (output_h, output_g)
543
        if self.output_attentions:
544
            outputs = outputs + (attn_prob,)
thomwolf's avatar
thomwolf committed
545
        return outputs
thomwolf's avatar
thomwolf committed
546
547
548
549

class XLNetFeedForward(nn.Module):
    def __init__(self, config):
        super(XLNetFeedForward, self).__init__()
thomwolf's avatar
thomwolf committed
550
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
551
552
553
        self.layer_1 = nn.Linear(config.d_model, config.d_inner)
        self.layer_2 = nn.Linear(config.d_inner, config.d_model)
        self.dropout = nn.Dropout(config.dropout)
554
555
        if isinstance(config.ff_activation, str) or \
                (sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)):
thomwolf's avatar
thomwolf committed
556
557
558
559
            self.activation_function = ACT2FN[config.ff_activation]
        else:
            self.activation_function = config.ff_activation

thomwolf's avatar
thomwolf committed
560
561
562
563
564
565
566
    def forward(self, inp):
        output = inp
        output = self.layer_1(output)
        output = self.activation_function(output)
        output = self.dropout(output)
        output = self.layer_2(output)
        output = self.dropout(output)
thomwolf's avatar
thomwolf committed
567
        output = self.layer_norm(output + inp)
thomwolf's avatar
thomwolf committed
568
        return output
thomwolf's avatar
thomwolf committed
569
570

class XLNetLayer(nn.Module):
thomwolf's avatar
thomwolf committed
571
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
572
        super(XLNetLayer, self).__init__()
thomwolf's avatar
thomwolf committed
573
        self.rel_attn = XLNetRelativeAttention(config)
thomwolf's avatar
thomwolf committed
574
575
576
577
578
        self.ff = XLNetFeedForward(config)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, output_h, output_g,
                attn_mask_h, attn_mask_g,
579
580
581
582
583
584
                r, seg_mat, mems=None, target_mapping=None, head_mask=None):
        outputs = self.rel_attn(output_h, output_g, attn_mask_h, attn_mask_g,
                                r, seg_mat, mems=mems, target_mapping=target_mapping,
                                head_mask=head_mask)
        output_h, output_g = outputs[:2]

thomwolf's avatar
thomwolf committed
585
        if output_g is not None:
thomwolf's avatar
thomwolf committed
586
587
588
            output_g = self.ff(output_g)
        output_h = self.ff(output_h)

589
        outputs = (output_h, output_g) + outputs[2:]  # Add again attentions if there are there
590
        return outputs
thomwolf's avatar
thomwolf committed
591

592
593

class XLNetPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
594
595
596
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
597
    config_class = XLNetConfig
598
    pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
599
600
601
602
603
    load_tf_weights = load_tf_weights_in_xlnet
    base_model_prefix = "transformer"

    def __init__(self, *inputs, **kwargs):
        super(XLNetPreTrainedModel, self).__init__(*inputs, **kwargs)
thomwolf's avatar
thomwolf committed
604

thomwolf's avatar
thomwolf committed
605
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
606
607
608
609
610
611
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
612
613
            if isinstance(module, nn.Linear) and module.bias is not None:
                module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
614
615
616
        elif isinstance(module, XLNetLayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
617
618
619
620
621
        elif isinstance(module, XLNetRelativeAttention):
            for param in [module.q, module.k, module.v, module.o, module.r,
                          module.r_r_bias, module.r_s_bias, module.r_w_bias,
                          module.seg_embed]:
                param.data.normal_(mean=0.0, std=self.config.initializer_range)
622
623
        elif isinstance(module, XLNetModel):
                module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
thomwolf's avatar
thomwolf committed
624
625


thomwolf's avatar
thomwolf committed
626
627
628
629
630
631
XLNET_START_DOCSTRING = r"""    The XLNet model was proposed in
    `XLNet: Generalized Autoregressive Pretraining for Language Understanding`_
    by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
    XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method
    to learn bidirectional contexts by maximizing the expected likelihood over all permutations
    of the input sequence factorization order.
632

thomwolf's avatar
thomwolf committed
633
    The specific attention pattern can be controlled at training and test time using the `perm_mask` input.
634

thomwolf's avatar
thomwolf committed
635
636
637
638
639
640
    Do to the difficulty of training a fully auto-regressive model over various factorization order,
    XLNet is pretrained using only a sub-set of the output tokens as target which are selected
    with the `target_mapping` input.

    To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and
    `target_mapping` inputs to control the attention span and outputs (see examples in `examples/run_generation.py`)
641

thomwolf's avatar
thomwolf committed
642
643
    This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
    refer to the PyTorch documentation for all matter related to general usage and behavior.
644

thomwolf's avatar
thomwolf committed
645
646
    .. _`XLNet: Generalized Autoregressive Pretraining for Language Understanding`:
        http://arxiv.org/abs/1906.08237
647

thomwolf's avatar
thomwolf committed
648
649
    .. _`torch.nn.Module`:
        https://pytorch.org/docs/stable/nn.html#module
650

thomwolf's avatar
thomwolf committed
651
652
    Parameters:
        config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
653
654
            Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
thomwolf's avatar
thomwolf committed
655
656
657
658
659
660
"""

XLNET_INPUTS_DOCSTRING = r"""
    Inputs:
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
thomwolf's avatar
thomwolf committed
661
662
            XLNet is a model with relative position embeddings so you can either pad the inputs on
            the right or on the left.
thomwolf's avatar
thomwolf committed
663
664
665
666
667
668
669
            Indices can be obtained using :class:`pytorch_transformers.XLNetTokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The embeddings from these tokens will be summed with the respective token embeddings.
            Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
thomwolf's avatar
thomwolf committed
670
        **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
671
672
673
            Mask to avoid performing attention on padding token indices.
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
thomwolf's avatar
thomwolf committed
674
        **input_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
675
676
677
678
679
680
681
682
            Mask to avoid performing attention on padding token indices.
            Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
            Kept for compatibility with the original code base.
            You can only uses one of `input_mask` and `attention_mask`
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
        **mems**: (`optional`)
            list of ``torch.FloatTensor`` (one for each layer):
thomwolf's avatar
thomwolf committed
683
            that contains pre-computed hidden-states (key and values in the attention blocks) as output by the model
thomwolf's avatar
thomwolf committed
684
            (see `mems` output below). Can be used to speed up sequential decoding and attend to longer context.
thomwolf's avatar
thomwolf committed
685
686
687
            To activate mems you need to set up config.mem_len to a positive value which will be the max number of tokens in
            the memory output by the model. E.g. `model = XLNetModel.from_pretrained('xlnet-base-case, mem_len=1024)` will
            instantiate a model which can use up to 1024 tokens of memory (in addition to the input it self).
thomwolf's avatar
thomwolf committed
688
689
690
691
692
693
694
695
696
697
        **perm_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, sequence_length)``:
            Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
            If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
            if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
            If None, each token attends to all the others (full bidirectional attention).
            Only used during pretraining (to define factorization order) or for sequential decoding (generation).
        **target_mapping**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_predict, sequence_length)``:
            Mask to indicate the output tokens to use.
            If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
            Only used during pretraining for partial prediction or for sequential decoding (generation).
thomwolf's avatar
thomwolf committed
698
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
            Mask to nullify selected heads of the self-attention modules.
            Mask values selected in ``[0, 1]``:
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare XLNet Model transformer outputing raw hidden-states without any specific head on top.",
                      XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
class XLNetModel(XLNetPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
            Sequence of hidden-states at the last layer of the model.
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
714
715
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
716
717
718
719
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
720
721
722
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
723
724
725

    Examples::

wangfei's avatar
wangfei committed
726
727
728
729
730
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        model = XLNetModel.from_pretrained('xlnet-large-cased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
731

732
    """
thomwolf's avatar
thomwolf committed
733
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
734
        super(XLNetModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
735
736
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
737

thomwolf's avatar
thomwolf committed
738
739
        self.mem_len = config.mem_len
        self.reuse_len = config.reuse_len
thomwolf's avatar
thomwolf committed
740
741
742
743
744
        self.d_model = config.d_model
        self.same_length = config.same_length
        self.attn_type = config.attn_type
        self.bi_data = config.bi_data
        self.clamp_len = config.clamp_len
thomwolf's avatar
thomwolf committed
745
        self.n_layer = config.n_layer
thomwolf's avatar
thomwolf committed
746

thomwolf's avatar
thomwolf committed
747
        self.word_embedding = nn.Embedding(config.n_token, config.d_model)
thomwolf's avatar
thomwolf committed
748
        self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
749
        self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
750
        self.dropout = nn.Dropout(config.dropout)
thomwolf's avatar
thomwolf committed
751

752
753
        self.apply(self.init_weights)

thomwolf's avatar
thomwolf committed
754
755
    def _resize_token_embeddings(self, new_num_tokens):
        self.word_embedding = self._get_resized_embeddings(self.word_embedding, new_num_tokens)
thomwolf's avatar
thomwolf committed
756
        return self.word_embedding
thomwolf's avatar
thomwolf committed
757

thomwolf's avatar
thomwolf committed
758
    def _prune_heads(self, heads_to_prune):
thomwolf's avatar
thomwolf committed
759
        raise NotImplementedError
thomwolf's avatar
thomwolf committed
760

thomwolf's avatar
thomwolf committed
761
    def create_mask(self, qlen, mlen):
762
763
764
765
        """
        Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.

        Args:
766
767
            qlen: TODO Lysandre didn't fill
            mlen: TODO Lysandre didn't fill
768
769
770
771
772
773
774
775
776
777
778

        ::

                  same_length=False:      same_length=True:
                  <mlen > <  qlen >       <mlen > <  qlen >
               ^ [0 0 0 0 0 1 1 1 1]     [0 0 0 0 0 1 1 1 1]
                 [0 0 0 0 0 0 1 1 1]     [1 0 0 0 0 0 1 1 1]
            qlen [0 0 0 0 0 0 0 1 1]     [1 1 0 0 0 0 0 1 1]
                 [0 0 0 0 0 0 0 0 1]     [1 1 1 0 0 0 0 0 1]
               v [0 0 0 0 0 0 0 0 0]     [1 1 1 1 0 0 0 0 0]

thomwolf's avatar
thomwolf committed
779
780
781
782
783
784
785
786
787
788
        """
        attn_mask = torch.ones([qlen, qlen])
        mask_up = torch.triu(attn_mask, diagonal=1)
        attn_mask_pad = torch.zeros([qlen, mlen])
        ret = torch.cat([attn_mask_pad, mask_up], dim=1)
        if self.same_length:
            mask_lo = torch.tril(attn_mask, diagonal=-1)
            ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)

        ret = ret.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
        return ret

    def cache_mem(self, curr_out, prev_mem):
        """cache hidden states into memory."""
        if self.mem_len is None or self.mem_len == 0:
            return None
        else:
            if self.reuse_len is not None and self.reuse_len > 0:
                curr_out = curr_out[:self.reuse_len]

            if prev_mem is None:
                new_mem = curr_out[-self.mem_len:]
            else:
                new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len:]

        return new_mem.detach()

thomwolf's avatar
thomwolf committed
806
807
808
809
810
811
812
813
814
815
816
817
    @staticmethod
    def positional_embedding(pos_seq, inv_freq, bsz=None):
        sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq)
        pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
        pos_emb = pos_emb[:, None, :]

        if bsz is not None:
            pos_emb = pos_emb.expand(-1, bsz, -1)

        return pos_emb

    def relative_positional_encoding(self, qlen, klen, bsz=None):
thomwolf's avatar
thomwolf committed
818
        """create relative positional encoding."""
thomwolf's avatar
thomwolf committed
819
        freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
820
        inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
thomwolf's avatar
thomwolf committed
821
822
823
824
825
826
827
828
829
830
831

        if self.attn_type == 'bi':
            # beg, end = klen - 1, -qlen
            beg, end = klen, -qlen
        elif self.attn_type == 'uni':
            # beg, end = klen - 1, -1
            beg, end = klen, -1
        else:
            raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))

        if self.bi_data:
thomwolf's avatar
thomwolf committed
832
833
            fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
            bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
thomwolf's avatar
thomwolf committed
834
835
836
837
838
839

            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
                bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)

            if bsz is not None:
thomwolf's avatar
thomwolf committed
840
841
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
thomwolf's avatar
thomwolf committed
842
            else:
thomwolf's avatar
thomwolf committed
843
844
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
thomwolf's avatar
thomwolf committed
845
846
847

            pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
        else:
thomwolf's avatar
thomwolf committed
848
            fwd_pos_seq = torch.arange(beg, end, -1.0)
thomwolf's avatar
thomwolf committed
849
850
            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
thomwolf's avatar
thomwolf committed
851
            pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
thomwolf's avatar
thomwolf committed
852

thomwolf's avatar
thomwolf committed
853
        pos_emb = pos_emb.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
854
855
        return pos_emb

thomwolf's avatar
thomwolf committed
856
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
857
                mems=None, perm_mask=None, target_mapping=None, head_mask=None):
858
859
860
        # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
        # but we want a unified interface in the library with the batch size on the first dimension
        # so we move here the first dimension (batch) to the end
thomwolf's avatar
thomwolf committed
861
        input_ids = input_ids.transpose(0, 1).contiguous()
thomwolf's avatar
thomwolf committed
862
        token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
863
        input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
thomwolf's avatar
thomwolf committed
864
        attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
865
866
867
        perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
        target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None

thomwolf's avatar
thomwolf committed
868
        qlen, bsz = input_ids.shape[0], input_ids.shape[1]
thomwolf's avatar
thomwolf committed
869
        mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
thomwolf's avatar
thomwolf committed
870
        klen = mlen + qlen
thomwolf's avatar
thomwolf committed
871
872
873

        dtype_float = next(self.parameters()).dtype
        device = next(self.parameters()).device
thomwolf's avatar
thomwolf committed
874
875
876
877

        ##### Attention mask
        # causal attention mask
        if self.attn_type == 'uni':
thomwolf's avatar
thomwolf committed
878
            attn_mask = self.create_mask(qlen, mlen)
thomwolf's avatar
thomwolf committed
879
880
881
882
883
884
885
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == 'bi':
            attn_mask = None
        else:
            raise ValueError('Unsupported attention type: {}'.format(self.attn_type))

        # data mask: input mask & perm mask
886
887
888
889
890
891
892
893
894
        assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
        "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
        if input_mask is None and attention_mask is not None:
            input_mask = 1.0 - attention_mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask
        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
thomwolf's avatar
thomwolf committed
895
896
897
898
899
900
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
thomwolf's avatar
thomwolf committed
901
            mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
thomwolf's avatar
thomwolf committed
902
903
904
905
906
907
908
            data_mask = torch.cat([mems_mask, data_mask], dim=1)
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
909
            attn_mask = (attn_mask > 0).to(dtype_float)
thomwolf's avatar
thomwolf committed
910
911

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
912
913
914
            non_tgt_mask = -torch.eye(qlen).to(attn_mask)
            non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
            non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
thomwolf's avatar
thomwolf committed
915
916
917
        else:
            non_tgt_mask = None

thomwolf's avatar
thomwolf committed
918
        ##### Word embeddings and prepare h & g hidden states
thomwolf's avatar
thomwolf committed
919
        word_emb_k = self.word_embedding(input_ids)
thomwolf's avatar
thomwolf committed
920
        output_h = self.dropout(word_emb_k)
921
922
923
924
925
        if target_mapping is not None:
            word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
        # else:  # We removed the inp_q input which was same as target mapping
        #     inp_q_ext = inp_q[:, :, None]
        #     word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
thomwolf's avatar
thomwolf committed
926
927
928
929
930
            output_g = self.dropout(word_emb_q)
        else:
            output_g = None

        ##### Segment embedding
thomwolf's avatar
thomwolf committed
931
932
        if token_type_ids is not None:
            # Convert `token_type_ids` to one-hot `seg_mat`
thomwolf's avatar
thomwolf committed
933
            mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
thomwolf's avatar
thomwolf committed
934
            cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
thomwolf's avatar
thomwolf committed
935
936

            # `1` indicates not in the same segment [qlen x klen x bsz]
thomwolf's avatar
thomwolf committed
937
            seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
thomwolf's avatar
thomwolf committed
938
            seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
thomwolf's avatar
thomwolf committed
939
940
941
942
        else:
            seg_mat = None

        ##### Positional encoding
thomwolf's avatar
thomwolf committed
943
        pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
thomwolf's avatar
thomwolf committed
944
945
        pos_emb = self.dropout(pos_emb)

thomwolf's avatar
thomwolf committed
946
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
947
948
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
949
950
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
        # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
thomwolf's avatar
thomwolf committed
951
952
        if head_mask is not None:
            if head_mask.dim() == 1:
thomwolf's avatar
thomwolf committed
953
954
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
                head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
955
            elif head_mask.dim() == 2:
thomwolf's avatar
thomwolf committed
956
                head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
thomwolf's avatar
thomwolf committed
957
958
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
        else:
thomwolf's avatar
thomwolf committed
959
            head_mask = [None] * self.n_layer
thomwolf's avatar
thomwolf committed
960

961
        new_mems = ()
thomwolf's avatar
thomwolf committed
962
963
964
        if mems is None:
            mems = [None] * len(self.layer)

965
        attentions = []
966
        hidden_states = []
thomwolf's avatar
thomwolf committed
967
968
        for i, layer_module in enumerate(self.layer):
            # cache new mems
969
            new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
970
971
972
973
974
            if self.output_hidden_states:
                hidden_states.append((output_h, output_g) if output_g is not None else output_h)

            outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask,
                                   r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping,
thomwolf's avatar
thomwolf committed
975
                                   head_mask=head_mask[i])
976
977
            output_h, output_g = outputs[:2]
            if self.output_attentions:
thomwolf's avatar
thomwolf committed
978
                attentions.append(outputs[2])
979
980
981

        # Add last hidden state
        if self.output_hidden_states:
thomwolf's avatar
thomwolf committed
982
            hidden_states.append((output_h, output_g) if output_g is not None else output_h)
thomwolf's avatar
thomwolf committed
983
984
985

        output = self.dropout(output_g if output_g is not None else output_h)

986
        # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
987
        outputs = (output.permute(1, 0, 2).contiguous(), new_mems)
988
989
        if self.output_hidden_states:
            if output_g is not None:
990
                hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
991
            else:
992
                hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
993
            outputs = outputs + (hidden_states,)
994
        if self.output_attentions:
995
            attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
996
            outputs = outputs + (attentions,)
997

998
        return outputs  # outputs, new_mems, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
999
1000


thomwolf's avatar
thomwolf committed
1001
1002
1003
@add_start_docstrings("""XLNet Model with a language modeling head on top
    (linear layer with weights tied to the input embeddings). """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
1004
class XLNetLMHeadModel(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
    r"""
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for language modeling.
            Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Language modeling loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
1021
1022
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
1023
1024
1025
1026
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1027
1028
1029
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1030
1031
1032

    Examples::

wangfei's avatar
wangfei committed
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
        # We show how to setup inputs to predict a next token using a bi-directional context.
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>")).unsqueeze(0)  # We will predict the masked token
        perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
        perm_mask[:, :, -1] = 1.0  # Previous tokens don't see last token
        target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float)  # Shape [1, 1, seq_length] => let's predict one token
        target_mapping[0, 0, -1] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)
        outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
        next_token_logits = outputs[0]  # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
1043

thomwolf's avatar
thomwolf committed
1044
    """
thomwolf's avatar
thomwolf committed
1045
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1046
        super(XLNetLMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
1047
1048
        self.attn_type = config.attn_type
        self.same_length = config.same_length
thomwolf's avatar
thomwolf committed
1049

thomwolf's avatar
thomwolf committed
1050
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
1051
        self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True)
thomwolf's avatar
thomwolf committed
1052

thomwolf's avatar
thomwolf committed
1053
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
1054
        self.tie_weights()
thomwolf's avatar
thomwolf committed
1055

thomwolf's avatar
thomwolf committed
1056
1057
    def tie_weights(self):
        """ Make sure we are sharing the embeddings
thomwolf's avatar
thomwolf committed
1058
        """
thomwolf's avatar
thomwolf committed
1059
        self._tie_or_clone_weights(self.lm_loss, self.transformer.word_embedding)
thomwolf's avatar
thomwolf committed
1060

thomwolf's avatar
thomwolf committed
1061
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
1062
                mems=None, perm_mask=None, target_mapping=None,
1063
                labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1064
1065
1066
1067
        transformer_outputs = self.transformer(input_ids, token_type_ids=token_type_ids,
                                               input_mask=input_mask, attention_mask=attention_mask,
                                               mems=mems, perm_mask=perm_mask, target_mapping=target_mapping,
                                               head_mask=head_mask)
1068
1069

        logits = self.lm_loss(transformer_outputs[0])
1070

1071
        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1072

1073
        if labels is not None:
1074
1075
1076
            # Flatten the tokens
            loss_fct = CrossEntropyLoss(ignore_index=-1)
            loss = loss_fct(logits.view(-1, logits.size(-1)),
1077
                            labels.view(-1))
1078
            outputs = (loss,) + outputs
1079

thomwolf's avatar
thomwolf committed
1080
        return outputs  # return (loss), logits, mems, (hidden states), (attentions)
1081
1082


thomwolf's avatar
thomwolf committed
1083
1084
1085
@add_start_docstrings("""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
    the pooled output) e.g. for GLUE tasks. """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
1086
class XLNetForSequenceClassification(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
1087
1088
1089
    r"""
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for computing the sequence classification/regression loss.
LysandreJik's avatar
LysandreJik committed
1090
            Indices should be in ``[0, ..., config.num_labels - 1]``.
thomwolf's avatar
thomwolf committed
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
            If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
            If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification (or regression if config.num_labels==1) loss.
        **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
1102
1103
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
1104
1105
1106
1107
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1108
1109
1110
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1111
1112
1113

    Examples::

wangfei's avatar
wangfei committed
1114
1115
1116
1117
1118
1119
        tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
        model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        labels = torch.tensor([1]).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=labels)
        loss, logits = outputs[:2]
1120
1121

    """
thomwolf's avatar
thomwolf committed
1122
    def __init__(self, config):
1123
        super(XLNetForSequenceClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
1124
        self.num_labels = config.num_labels
1125

thomwolf's avatar
thomwolf committed
1126
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
1127
        self.sequence_summary = SequenceSummary(config)
thomwolf's avatar
thomwolf committed
1128
        self.logits_proj = nn.Linear(config.d_model, config.num_labels)
1129

thomwolf's avatar
thomwolf committed
1130
        self.apply(self.init_weights)
1131

thomwolf's avatar
thomwolf committed
1132
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
1133
                mems=None, perm_mask=None, target_mapping=None,
1134
                labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1135
1136
1137
1138
        transformer_outputs = self.transformer(input_ids, token_type_ids=token_type_ids,
                                               input_mask=input_mask, attention_mask=attention_mask,
                                               mems=mems, perm_mask=perm_mask, target_mapping=target_mapping,
                                               head_mask=head_mask)
1139
        output = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
1140

1141
        output = self.sequence_summary(output)
1142
        logits = self.logits_proj(output)
thomwolf's avatar
thomwolf committed
1143

1144
        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1145

1146
1147
1148
        if labels is not None:
            if self.num_labels == 1:
                #  We are doing regression
1149
                loss_fct = MSELoss()
1150
                loss = loss_fct(logits.view(-1), labels.view(-1))
1151
            else:
1152
1153
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1154
            outputs = (loss,) + outputs
1155

thomwolf's avatar
thomwolf committed
1156
        return outputs  # return (loss), logits, mems, (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
1157

thomwolf's avatar
thomwolf committed
1158

thomwolf's avatar
thomwolf committed
1159
1160
1161
@add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
    the hidden-states output to compute `span start logits` and `span end logits`). """,
    XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
1162
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
    r"""
        **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.
        **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`).
            Position outside of the sequence are not taken into account for computing the loss.
        **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels whether a question has an answer or no answer (SQuAD 2.0)
        **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
            Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
1176
1177
1178
        **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
            Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
            1.0 means token should be masked. 0.0 mean token is not masked.
thomwolf's avatar
thomwolf committed
1179
1180

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
1181
1182
        **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
thomwolf's avatar
thomwolf committed
1183
        **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1184
1185
            ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1186
        **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1187
1188
            ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
            Indices for the top config.start_n_top start token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1189
        **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1190
1191
            ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
            Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1192
        **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1193
1194
            ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
            Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1195
        **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1196
1197
            ``torch.FloatTensor`` of shape ``(batch_size,)``
            Log probabilities for the ``is_impossible`` label of the answers.
thomwolf's avatar
thomwolf committed
1198
1199
1200
        **mems**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
thomwolf's avatar
thomwolf committed
1201
1202
            if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context.
            See details in the docstring of the `mems` input above.
thomwolf's avatar
thomwolf committed
1203
1204
1205
1206
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
1207
1208
1209
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
1210
1211
1212

    Examples::

wangfei's avatar
wangfei committed
1213
1214
1215
1216
1217
1218
1219
        tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
        model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        start_positions = torch.tensor([1])
        end_positions = torch.tensor([3])
        outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
        loss, start_scores, end_scores = outputs[:2]
1220

thomwolf's avatar
thomwolf committed
1221
    """
thomwolf's avatar
thomwolf committed
1222
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1223
        super(XLNetForQuestionAnswering, self).__init__(config)
thomwolf's avatar
thomwolf committed
1224
1225
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top
1226

thomwolf's avatar
thomwolf committed
1227
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
1228
1229
1230
        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)
thomwolf's avatar
thomwolf committed
1231

thomwolf's avatar
thomwolf committed
1232
1233
        self.apply(self.init_weights)

thomwolf's avatar
thomwolf committed
1234
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
1235
                mems=None, perm_mask=None, target_mapping=None,
thomwolf's avatar
thomwolf committed
1236
1237
                start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None,
                head_mask=None):
thomwolf's avatar
thomwolf committed
1238
1239
1240
1241
        transformer_outputs = self.transformer(input_ids, token_type_ids=token_type_ids,
                                               input_mask=input_mask, attention_mask=attention_mask,
                                               mems=mems, perm_mask=perm_mask, target_mapping=target_mapping,
                                               head_mask=head_mask)
thomwolf's avatar
thomwolf committed
1242
        hidden_states = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
1243
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
1244

thomwolf's avatar
thomwolf committed
1245
        outputs = transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1246

thomwolf's avatar
thomwolf committed
1247
1248
1249
1250
1251
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)
thomwolf's avatar
thomwolf committed
1252

thomwolf's avatar
thomwolf committed
1253
1254
            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
1255

thomwolf's avatar
thomwolf committed
1256
            loss_fct = CrossEntropyLoss()
thomwolf's avatar
thomwolf committed
1257
1258
1259
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
1260

thomwolf's avatar
thomwolf committed
1261
1262
1263
1264
1265
1266
            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

1267
                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
thomwolf's avatar
thomwolf committed
1268
                total_loss += cls_loss * 0.5
1269
1270

            outputs = (total_loss,) + outputs
thomwolf's avatar
thomwolf committed
1271
1272
1273
1274
1275
1276
1277

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
            start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)

            start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
1278
1279
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
            end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)

            end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

1291
1292
            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)  # get the representation of START as weighted sum of hidden states
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)  # Shape (batch size,): one single `cls_logits` for each sample
thomwolf's avatar
thomwolf committed
1293
1294
1295

            outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs

1296
1297
        # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
        # or (if labels are provided) (total_loss,)
thomwolf's avatar
thomwolf committed
1298
        return outputs