"megatron/legacy/model/fused_softmax.py" did not exist on "051f58f1a5a8a7450ffea5c3aadaa2ea4b3a8630"
modeling_xlnet.py 59 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from __future__ import absolute_import, division, print_function, unicode_literals

import copy
import json
import logging
import math
import os
import sys
from io import open

import torch
from torch import nn
thomwolf's avatar
thomwolf committed
32
from torch.nn import functional as F
33
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
34

35
from .file_utils import cached_path
36
from .model_utils import CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig, PreTrainedModel
37

thomwolf's avatar
thomwolf committed
38
39
40
41
42
43
44
45
46
47

logger = logging.getLogger(__name__)

PRETRAINED_MODEL_ARCHIVE_MAP = {
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json",
}

thomwolf's avatar
thomwolf committed
48

49
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
thomwolf's avatar
thomwolf committed
50
51
52
53
54
55
56
57
    """ A map of modules from TF to PyTorch.
        I use a map to keep the PyTorch model as
        identical to the original PyTorch model as possible.
    """

    tf_to_pt_map = {}

    if hasattr(model, 'transformer'):
58
59
60
        if hasattr(model, 'lm_loss'):
            # We will load also the output bias
            tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias
61
        if hasattr(model, 'sequence_summary') and 'model/sequnece_summary/summary/kernel' in tf_weights:
62
63
64
            # We will load also the sequence summary
            tf_to_pt_map['model/sequnece_summary/summary/kernel'] = model.sequence_summary.summary.weight
            tf_to_pt_map['model/sequnece_summary/summary/bias'] = model.sequence_summary.summary.bias
thomwolf's avatar
thomwolf committed
65
66
        if hasattr(model, 'logits_proj') and config.finetuning_task is not None \
                and 'model/regression_{}/logit/kernel'.format(config.finetuning_task) in tf_weights:
67
68
            tf_to_pt_map['model/regression_{}/logit/kernel'.format(config.finetuning_task)] = model.logits_proj.weight
            tf_to_pt_map['model/regression_{}/logit/bias'.format(config.finetuning_task)] = model.logits_proj.bias
69

thomwolf's avatar
thomwolf committed
70
71
72
73
74
        # Now load the rest of the transformer
        model = model.transformer

    # Embeddings and output
    tf_to_pt_map.update({'model/transformer/word_embedding/lookup_table': model.word_embedding.weight,
75
                         'model/transformer/mask_emb/mask_emb': model.mask_emb})
thomwolf's avatar
thomwolf committed
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118

    # Transformer blocks
    for i, b in enumerate(model.layer):
        layer_str = "model/transformer/layer_%d/" % i
        tf_to_pt_map.update({
            layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
            layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
            layer_str + "rel_attn/o/kernel": b.rel_attn.o,
            layer_str + "rel_attn/q/kernel": b.rel_attn.q,
            layer_str + "rel_attn/k/kernel": b.rel_attn.k,
            layer_str + "rel_attn/r/kernel": b.rel_attn.r,
            layer_str + "rel_attn/v/kernel": b.rel_attn.v,
            layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
            layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
            layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
            layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
            layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
            layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
        })

    # Relative positioning biases
    if config.untie_r:
        r_r_list = []
        r_w_list = []
        r_s_list = []
        seg_embed_list = []
        for b in model.layer:
            r_r_list.append(b.rel_attn.r_r_bias)
            r_w_list.append(b.rel_attn.r_w_bias)
            r_s_list.append(b.rel_attn.r_s_bias)
            seg_embed_list.append(b.rel_attn.seg_embed)
    else:
        r_r_list = [model.r_r_bias]
        r_w_list = [model.r_w_bias]
        r_s_list = [model.r_s_bias]
        seg_embed_list = [model.seg_embed]
    tf_to_pt_map.update({
        'model/transformer/r_r_bias': r_r_list,
        'model/transformer/r_w_bias': r_w_list,
        'model/transformer/r_s_bias': r_s_list,
        'model/transformer/seg_embed': seg_embed_list})
    return tf_to_pt_map

119
def load_tf_weights_in_xlnet(model, config, tf_path):
thomwolf's avatar
thomwolf committed
120
121
122
123
124
125
126
127
128
129
130
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import numpy as np
        import tensorflow as tf
    except ImportError:
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
thomwolf's avatar
thomwolf committed
131
    tf_weights = {}
thomwolf's avatar
thomwolf committed
132
133
134
    for name, shape in init_vars:
        print("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
thomwolf's avatar
thomwolf committed
135
        tf_weights[name] = array
thomwolf's avatar
thomwolf committed
136

137
    # Build TF to PyTorch weights loading map
138
    tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
139

thomwolf's avatar
thomwolf committed
140
141
    for name, pointer in tf_to_pt_map.items():
        print("Importing {}".format(name))
142
143
144
        if name not in tf_weights:
            print("{} not in tf pre-trained weights, skipping".format(name))
            continue
thomwolf's avatar
thomwolf committed
145
        array = tf_weights[name]
thomwolf's avatar
thomwolf committed
146
147
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
148
        if 'kernel' in name and ('ff' in name or 'summary' in name or 'logit' in name):
thomwolf's avatar
thomwolf committed
149
            print("Transposing")
thomwolf's avatar
thomwolf committed
150
            array = np.transpose(array)
thomwolf's avatar
thomwolf committed
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
        if isinstance(pointer, list):
            # Here we will split the TF weigths
            assert len(pointer) == array.shape[0]
            for i, p_i in enumerate(pointer):
                arr_i = array[i, ...]
                try:
                    assert p_i.shape == arr_i.shape
                except AssertionError as e:
                    e.args += (p_i.shape, arr_i.shape)
                    raise
                print("Initialize PyTorch weight {} for layer {}".format(name, i))
                p_i.data = torch.from_numpy(arr_i)
        else:
            try:
                assert pointer.shape == array.shape
            except AssertionError as e:
                e.args += (pointer.shape, array.shape)
                raise
            print("Initialize PyTorch weight {}".format(name))
            pointer.data = torch.from_numpy(array)
        tf_weights.pop(name, None)
        tf_weights.pop(name + '/Adam', None)
        tf_weights.pop(name + '/Adam_1', None)

    print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
thomwolf's avatar
thomwolf committed
176
177
178
179
    return model


def gelu(x):
180
181
    """ Implementation of the gelu activation function.
        XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)
thomwolf's avatar
thomwolf committed
182
183
        Also see https://arxiv.org/abs/1606.08415
    """
184
185
    cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
    return x * cdf
thomwolf's avatar
thomwolf committed
186
187
188
189
190
191
192
193
194


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


195
class XLNetConfig(PretrainedConfig):
thomwolf's avatar
thomwolf committed
196
197
    """Configuration class to store the configuration of a `XLNetModel`.
    """
198
199
    pretrained_config_archive_map = PRETRAINED_CONFIG_ARCHIVE_MAP

thomwolf's avatar
thomwolf committed
200
    def __init__(self,
thomwolf's avatar
thomwolf committed
201
                 vocab_size_or_config_json_file=32000,
thomwolf's avatar
thomwolf committed
202
203
204
205
                 d_model=1024,
                 n_layer=24,
                 n_head=16,
                 d_inner=4096,
thomwolf's avatar
thomwolf committed
206
207
                 ff_activation="gelu",
                 untie_r=True,
thomwolf's avatar
thomwolf committed
208
                 attn_type="bi",
thomwolf's avatar
thomwolf committed
209
210
211

                 max_position_embeddings=512,
                 initializer_range=0.02,
thomwolf's avatar
thomwolf committed
212
213
214
215
216
217
218
219
220
221
222
                 layer_norm_eps=1e-12,

                 dropout=0.1,
                 dropatt=0.1,
                 init="normal",
                 init_range=0.1,
                 init_std=0.02,
                 mem_len=None,
                 reuse_len=None,
                 bi_data=False,
                 clamp_len=-1,
223
                 same_length=False,
thomwolf's avatar
thomwolf committed
224
225
226
227
228
229
                 
                 finetuning_task=None,
                 num_labels=2,
                 summary_type="last",
                 use_proj=True,
                 **kwargs):
thomwolf's avatar
thomwolf committed
230
231
232
233
234
235
236
237
238
239
240
241
242
        """Constructs XLNetConfig.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XLNetModel`.
            d_model: Size of the encoder layers and the pooler layer.
            n_layer: Number of hidden layers in the Transformer encoder.
            n_head: Number of attention heads for each attention layer in
                the Transformer encoder.
            d_inner: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            ff_activation: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            untie_r: untie relative position biases
thomwolf's avatar
thomwolf committed
243
            attn_type: 'bi' for XLNet, 'uni' for Transformer-XL
thomwolf's avatar
thomwolf committed
244
245
246
247
248
249
250
251
252
253
254

            dropout: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            dropatt: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
            layer_norm_eps: The epsilon used by LayerNorm.
thomwolf's avatar
thomwolf committed
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270

            dropout: float, dropout rate.
            dropatt: float, dropout rate on attention probabilities.
            init: str, the initialization scheme, either "normal" or "uniform".
            init_range: float, initialize the parameters with a uniform distribution
                in [-init_range, init_range]. Only effective when init="uniform".
            init_std: float, initialize the parameters with a normal distribution
                with mean 0 and stddev init_std. Only effective when init="normal".
            mem_len: int, the number of tokens to cache.
            reuse_len: int, the number of tokens in the currect batch to be cached
                and reused in the future.
            bi_data: bool, whether to use bidirectional input pipeline.
                Usually set to True during pretraining and False during finetuning.
            clamp_len: int, clamp all relative distances larger than clamp_len.
                -1 means no clamping.
            same_length: bool, whether to use the same attention length for each token.
271
            finetuning_task: name of the glue task on which the model was fine-tuned if any
thomwolf's avatar
thomwolf committed
272
        """
thomwolf's avatar
thomwolf committed
273
274
        super(XLNetConfig, self).__init__(**kwargs)

thomwolf's avatar
thomwolf committed
275
276
277
278
279
280
281
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
thomwolf's avatar
thomwolf committed
282
            self.n_token = vocab_size_or_config_json_file
thomwolf's avatar
thomwolf committed
283
284
285
            self.d_model = d_model
            self.n_layer = n_layer
            self.n_head = n_head
thomwolf's avatar
thomwolf committed
286
287
            assert d_model % n_head == 0
            self.d_head = d_model // n_head
thomwolf's avatar
thomwolf committed
288
289
290
            self.ff_activation = ff_activation
            self.d_inner = d_inner
            self.untie_r = untie_r
thomwolf's avatar
thomwolf committed
291
            self.attn_type = attn_type
thomwolf's avatar
thomwolf committed
292

thomwolf's avatar
thomwolf committed
293
294
295
            self.max_position_embeddings = max_position_embeddings
            self.initializer_range = initializer_range
            self.layer_norm_eps = layer_norm_eps
thomwolf's avatar
thomwolf committed
296
297
298
299
300
301
302
303
304
305
306

            self.init = init
            self.init_range = init_range
            self.init_std = init_std
            self.dropout = dropout
            self.dropatt = dropatt
            self.mem_len = mem_len
            self.reuse_len = reuse_len
            self.bi_data = bi_data
            self.clamp_len = clamp_len
            self.same_length = same_length
thomwolf's avatar
thomwolf committed
307

308
            self.finetuning_task = finetuning_task
thomwolf's avatar
thomwolf committed
309
310
311
            self.num_labels = num_labels
            self.summary_type = summary_type
            self.use_proj = use_proj
thomwolf's avatar
thomwolf committed
312
313
314
315
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
                             "or the path to a pretrained model config file (str)")

thomwolf's avatar
thomwolf committed
316
317
318
319
320
321
322
323
324
325
326
327
    @property
    def hidden_size(self):
        return self.d_model

    @property
    def num_attention_heads(self):
        return self.n_head

    @property
    def num_hidden_layers(self):
        return self.n_layer

thomwolf's avatar
thomwolf committed
328
329
330
331
332
333

try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm
except ImportError:
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
    class XLNetLayerNorm(nn.Module):
thomwolf's avatar
thomwolf committed
334
        def __init__(self, d_model, eps=1e-12):
thomwolf's avatar
thomwolf committed
335
336
337
            """Construct a layernorm module in the TF style (epsilon inside the square root).
            """
            super(XLNetLayerNorm, self).__init__()
thomwolf's avatar
thomwolf committed
338
339
            self.weight = nn.Parameter(torch.ones(d_model))
            self.bias = nn.Parameter(torch.zeros(d_model))
thomwolf's avatar
thomwolf committed
340
341
342
343
344
345
346
347
            self.variance_epsilon = eps

        def forward(self, x):
            u = x.mean(-1, keepdim=True)
            s = (x - u).pow(2).mean(-1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.variance_epsilon)
            return self.weight * x + self.bias

thomwolf's avatar
thomwolf committed
348
class XLNetRelativeAttention(nn.Module):
thomwolf's avatar
thomwolf committed
349
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
350
        super(XLNetRelativeAttention, self).__init__()
thomwolf's avatar
thomwolf committed
351
352
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
353
        if config.d_model % config.n_head != 0:
thomwolf's avatar
thomwolf committed
354
355
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
thomwolf's avatar
thomwolf committed
356
                "heads (%d)" % (config.d_model, config.n_head))
thomwolf's avatar
thomwolf committed
357

thomwolf's avatar
thomwolf committed
358
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
359
360
361
362
363
364
365
366
367
368
369
370
371
        self.d_head = config.d_head
        self.d_model = config.d_model
        self.scale = 1 / (config.d_head ** 0.5)

        self.q = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.k = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.v = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.o = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.r = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))

        self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
        self.r_s_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
        self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
372
        self.seg_embed = nn.Parameter(torch.Tensor(2, self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
373

thomwolf's avatar
thomwolf committed
374
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
375
376
377
378
379
        self.dropout = nn.Dropout(config.dropout)

    def prune_heads(self, heads):
        raise NotImplementedError

thomwolf's avatar
thomwolf committed
380
381
382
383
384
385
386
387
388
389
390
391
    @staticmethod
    def rel_shift(x, klen=-1):
        """perform relative shift to form the relative attention score."""
        x_size = x.shape

        x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
        x = x[1:, ...]
        x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
        x = x[:, 0:klen, :, :]

        return x

392
    def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
393
394
395
396
397
398
399
        """Core relative positional attention operations."""

        # content based attention score
        ac = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_w_bias, k_head_h)

        # position based attention score
        bd = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_r_bias, k_head_r)
thomwolf's avatar
thomwolf committed
400
        bd = self.rel_shift(bd, klen=ac.shape[1])
thomwolf's avatar
thomwolf committed
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418

        # segment based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)
            ef = torch.einsum('ijbs,ibns->ijbn', seg_mat, ef)

        # merge attention scores and perform masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
            attn_score = attn_score - 1e30 * attn_mask

        # attention probability
        attn_prob = F.softmax(attn_score, dim=1)
        attn_prob = self.dropout(attn_prob)

419
420
421
422
        # Mask heads if we want to
        if head_mask is not None:
            attn_prob = attn_prob * head_mask

thomwolf's avatar
thomwolf committed
423
424
425
        # attention output
        attn_vec = torch.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)

426
427
428
        if self.output_attentions:
            return attn_vec, attn_prob

thomwolf's avatar
thomwolf committed
429
430
431
432
433
434
435
436
437
438
        return attn_vec

    def post_attention(self, h, attn_vec, residual=True):
        """Post-attention processing."""
        # post-attention projection (back to `d_model`)
        attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)

        attn_out = self.dropout(attn_out)
        if residual:
            attn_out = attn_out + h
thomwolf's avatar
thomwolf committed
439
        output = self.layer_norm(attn_out)
thomwolf's avatar
thomwolf committed
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469

        return output

    def forward(self, h, g,
                      attn_mask_h, attn_mask_g,
                      r, seg_mat,
                      mems=None, target_mapping=None, head_mask=None):
        if g is not None:
            ###### Two-stream attention with relative positional encoding.
            # content based attention score
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content-based key head
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)

            # content-based value head
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # position-based key head
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            ##### h-stream
            # content-stream query head
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)

            # core attention ops
            attn_vec_h = self.rel_attn_core(
470
471
472
473
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec_h, attn_prob_h = attn_vec_h
thomwolf's avatar
thomwolf committed
474
475
476
477
478
479
480
481
482
483
484
485

            # post processing
            output_h = self.post_attention(h, attn_vec_h)

            ##### g-stream
            # query-stream query head
            q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q)

            # core attention ops
            if target_mapping is not None:
                q_head_g = torch.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
                attn_vec_g = self.rel_attn_core(
486
487
488
489
490
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

thomwolf's avatar
thomwolf committed
491
492
493
                attn_vec_g = torch.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
            else:
                attn_vec_g = self.rel_attn_core(
494
495
496
497
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g
thomwolf's avatar
thomwolf committed
498
499
500

            # post processing
            output_g = self.post_attention(g, attn_vec_g)
501
502
503
504

            if self.output_attentions:
                attn_prob = attn_prob_h, attn_prob_g

thomwolf's avatar
thomwolf committed
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
        else:
            ###### Multi-head attention with relative positional encoding
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content heads
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # positional heads
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            # core attention ops
            attn_vec = self.rel_attn_core(
522
523
524
525
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec, attn_prob = attn_vec
thomwolf's avatar
thomwolf committed
526
527

            # post processing
thomwolf's avatar
thomwolf committed
528
529
            output_h = self.post_attention(h, attn_vec)
            output_g = None
thomwolf's avatar
thomwolf committed
530

thomwolf's avatar
thomwolf committed
531
        outputs = [output_h, output_g]
532
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
533
534
            outputs = outputs + [attn_prob]
        return outputs
thomwolf's avatar
thomwolf committed
535
536
537
538

class XLNetFeedForward(nn.Module):
    def __init__(self, config):
        super(XLNetFeedForward, self).__init__()
thomwolf's avatar
thomwolf committed
539
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
540
541
542
        self.layer_1 = nn.Linear(config.d_model, config.d_inner)
        self.layer_2 = nn.Linear(config.d_inner, config.d_model)
        self.dropout = nn.Dropout(config.dropout)
543
544
        if isinstance(config.ff_activation, str) or \
                (sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)):
thomwolf's avatar
thomwolf committed
545
546
547
548
            self.activation_function = ACT2FN[config.ff_activation]
        else:
            self.activation_function = config.ff_activation

thomwolf's avatar
thomwolf committed
549
550
551
552
553
554
555
    def forward(self, inp):
        output = inp
        output = self.layer_1(output)
        output = self.activation_function(output)
        output = self.dropout(output)
        output = self.layer_2(output)
        output = self.dropout(output)
thomwolf's avatar
thomwolf committed
556
        output = self.layer_norm(output + inp)
thomwolf's avatar
thomwolf committed
557
        return output
thomwolf's avatar
thomwolf committed
558
559

class XLNetLayer(nn.Module):
thomwolf's avatar
thomwolf committed
560
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
561
        super(XLNetLayer, self).__init__()
thomwolf's avatar
thomwolf committed
562
        self.rel_attn = XLNetRelativeAttention(config)
thomwolf's avatar
thomwolf committed
563
564
565
566
567
        self.ff = XLNetFeedForward(config)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, output_h, output_g,
                attn_mask_h, attn_mask_g,
568
569
570
571
572
573
                r, seg_mat, mems=None, target_mapping=None, head_mask=None):
        outputs = self.rel_attn(output_h, output_g, attn_mask_h, attn_mask_g,
                                r, seg_mat, mems=mems, target_mapping=target_mapping,
                                head_mask=head_mask)
        output_h, output_g = outputs[:2]

thomwolf's avatar
thomwolf committed
574
        if output_g is not None:
thomwolf's avatar
thomwolf committed
575
576
577
            output_g = self.ff(output_g)
        output_h = self.ff(output_h)

578
579
        outputs = [output_h, output_g] + outputs[2:]  # Add again attentions if there are there
        return outputs
thomwolf's avatar
thomwolf committed
580

581
582

class XLNetPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
583
584
585
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
586
587
588
589
590
591
592
    config_class = XLNetConfig
    pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP
    load_tf_weights = load_tf_weights_in_xlnet
    base_model_prefix = "transformer"

    def __init__(self, *inputs, **kwargs):
        super(XLNetPreTrainedModel, self).__init__(*inputs, **kwargs)
thomwolf's avatar
thomwolf committed
593

thomwolf's avatar
thomwolf committed
594
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
595
596
597
598
599
600
601
602
603
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, XLNetLayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
604
605
606
607
608
        elif isinstance(module, XLNetRelativeAttention):
            for param in [module.q, module.k, module.v, module.o, module.r,
                          module.r_r_bias, module.r_s_bias, module.r_w_bias,
                          module.seg_embed]:
                param.data.normal_(mean=0.0, std=self.config.initializer_range)
thomwolf's avatar
thomwolf committed
609
610
611
612
613
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()


class XLNetModel(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
614
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
615
        super(XLNetModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
616
617
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
618

thomwolf's avatar
thomwolf committed
619
620
        self.mem_len = config.mem_len
        self.reuse_len = config.reuse_len
thomwolf's avatar
thomwolf committed
621
622
623
624
625
        self.d_model = config.d_model
        self.same_length = config.same_length
        self.attn_type = config.attn_type
        self.bi_data = config.bi_data
        self.clamp_len = config.clamp_len
thomwolf's avatar
thomwolf committed
626
        self.n_layer = config.n_layer
thomwolf's avatar
thomwolf committed
627

thomwolf's avatar
thomwolf committed
628
629
        self.word_embedding = nn.Embedding(config.n_token, config.d_model)
        self.mask_emb = nn.Parameter(torch.Tensor(1, 1, config.d_model))
thomwolf's avatar
thomwolf committed
630
        layer = XLNetLayer(config)
thomwolf's avatar
thomwolf committed
631
632
        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layer)])
        self.dropout = nn.Dropout(config.dropout)
thomwolf's avatar
thomwolf committed
633

thomwolf's avatar
thomwolf committed
634
635
636
    def _prune_heads(self, heads_to_prune):
        logger.info("Head pruning is not implemented for XLNet")
        pass
thomwolf's avatar
thomwolf committed
637

thomwolf's avatar
thomwolf committed
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
    def create_mask(self, qlen, mlen):
        """ create causal attention mask.
            float mask where 1.0 indicate masked, 0.0 indicated not-masked.
             same_length=False:      same_length=True:
             <mlen > <  qlen >       <mlen > <  qlen >
          ^ [0 0 0 0 0 1 1 1 1]     [0 0 0 0 0 1 1 1 1]
            [0 0 0 0 0 0 1 1 1]     [1 0 0 0 0 0 1 1 1]
       qlen [0 0 0 0 0 0 0 1 1]     [1 1 0 0 0 0 0 1 1]
            [0 0 0 0 0 0 0 0 1]     [1 1 1 0 0 0 0 0 1]
          v [0 0 0 0 0 0 0 0 0]     [1 1 1 1 0 0 0 0 0]
        """
        attn_mask = torch.ones([qlen, qlen])
        mask_up = torch.triu(attn_mask, diagonal=1)
        attn_mask_pad = torch.zeros([qlen, mlen])
        ret = torch.cat([attn_mask_pad, mask_up], dim=1)
        if self.same_length:
            mask_lo = torch.tril(attn_mask, diagonal=-1)
            ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)

        ret = ret.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
        return ret

    def cache_mem(self, curr_out, prev_mem):
        """cache hidden states into memory."""
        if self.mem_len is None or self.mem_len == 0:
            return None
        else:
            if self.reuse_len is not None and self.reuse_len > 0:
                curr_out = curr_out[:self.reuse_len]

            if prev_mem is None:
                new_mem = curr_out[-self.mem_len:]
            else:
                new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len:]

        return new_mem.detach()

thomwolf's avatar
thomwolf committed
675
676
677
678
679
680
681
682
683
684
685
686
    @staticmethod
    def positional_embedding(pos_seq, inv_freq, bsz=None):
        sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq)
        pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
        pos_emb = pos_emb[:, None, :]

        if bsz is not None:
            pos_emb = pos_emb.expand(-1, bsz, -1)

        return pos_emb

    def relative_positional_encoding(self, qlen, klen, bsz=None):
thomwolf's avatar
thomwolf committed
687
        """create relative positional encoding."""
thomwolf's avatar
thomwolf committed
688
689
        freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
        inv_freq = 1 / (10000 ** (freq_seq / self.d_model))
thomwolf's avatar
thomwolf committed
690
691
692
693
694
695
696
697
698
699
700

        if self.attn_type == 'bi':
            # beg, end = klen - 1, -qlen
            beg, end = klen, -qlen
        elif self.attn_type == 'uni':
            # beg, end = klen - 1, -1
            beg, end = klen, -1
        else:
            raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))

        if self.bi_data:
thomwolf's avatar
thomwolf committed
701
702
            fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
            bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
thomwolf's avatar
thomwolf committed
703
704
705
706
707
708

            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
                bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)

            if bsz is not None:
thomwolf's avatar
thomwolf committed
709
710
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
thomwolf's avatar
thomwolf committed
711
            else:
thomwolf's avatar
thomwolf committed
712
713
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
thomwolf's avatar
thomwolf committed
714
715
716

            pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
        else:
thomwolf's avatar
thomwolf committed
717
            fwd_pos_seq = torch.arange(beg, end, -1.0)
thomwolf's avatar
thomwolf committed
718
719
            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
thomwolf's avatar
thomwolf committed
720
            pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
thomwolf's avatar
thomwolf committed
721

thomwolf's avatar
thomwolf committed
722
        pos_emb = pos_emb.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
723
724
        return pos_emb

thomwolf's avatar
thomwolf committed
725
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
726
                mems=None, perm_mask=None, target_mapping=None, inp_q=None, head_mask=None):
thomwolf's avatar
thomwolf committed
727
728
        """
        Args:
thomwolf's avatar
thomwolf committed
729
            input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
730
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
731
            input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask.
thomwolf's avatar
thomwolf committed
732
                0 for real tokens and 1 for padding.
733
734
735
736
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
thomwolf's avatar
thomwolf committed
737
            mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
thomwolf's avatar
thomwolf committed
738
739
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
740
741
742
            perm_mask: [optional] float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
thomwolf's avatar
thomwolf committed
743
                If None, each position attends to all the others.
744
745
            target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
thomwolf's avatar
thomwolf committed
746
747
748
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
749
            inp_q: [optional] float32 Tensor in shape [bsz, len].
thomwolf's avatar
thomwolf committed
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
                1 for tokens with losses and 0 for tokens without losses.
                Only used during pretraining for two-stream attention.
                Set to None during finetuning.

            mem_len: int, the number of tokens to cache.
            reuse_len: int, the number of tokens in the currect batch to be cached
                and reused in the future.
            bi_data: bool, whether to use bidirectional input pipeline.
                Usually set to True during pretraining and False during finetuning.
            clamp_len: int, clamp all relative distances larger than clamp_len.
                -1 means no clamping.
            same_length: bool, whether to use the same attention length for each token.
            summary_type: str, "last", "first", "mean", or "attn". The method
                to pool the input to get a vector representation.
        """
765
766
767
        # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
        # but we want a unified interface in the library with the batch size on the first dimension
        # so we move here the first dimension (batch) to the end
thomwolf's avatar
thomwolf committed
768
        input_ids = input_ids.transpose(0, 1).contiguous()
thomwolf's avatar
thomwolf committed
769
        token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
770
        input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
thomwolf's avatar
thomwolf committed
771
        attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
772
773
774
775
        perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
        target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
        inp_q = inp_q.transpose(0, 1).contiguous() if inp_q is not None else None

thomwolf's avatar
thomwolf committed
776
        qlen, bsz = input_ids.shape[0], input_ids.shape[1]
thomwolf's avatar
thomwolf committed
777
778
        mlen = mems[0].shape[0] if mems is not None else 0
        klen = mlen + qlen
thomwolf's avatar
thomwolf committed
779
780
781

        dtype_float = next(self.parameters()).dtype
        device = next(self.parameters()).device
thomwolf's avatar
thomwolf committed
782
783
784
785

        ##### Attention mask
        # causal attention mask
        if self.attn_type == 'uni':
thomwolf's avatar
thomwolf committed
786
            attn_mask = self.create_mask(qlen, mlen)
thomwolf's avatar
thomwolf committed
787
788
789
790
791
792
793
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == 'bi':
            attn_mask = None
        else:
            raise ValueError('Unsupported attention type: {}'.format(self.attn_type))

        # data mask: input mask & perm mask
794
795
796
797
798
799
800
801
802
        assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
        "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
        if input_mask is None and attention_mask is not None:
            input_mask = 1.0 - attention_mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask
        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
thomwolf's avatar
thomwolf committed
803
804
805
806
807
808
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
thomwolf's avatar
thomwolf committed
809
            mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
thomwolf's avatar
thomwolf committed
810
811
812
813
814
815
816
            data_mask = torch.cat([mems_mask, data_mask], dim=1)
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
817
            attn_mask = (attn_mask > 0).to(dtype_float)
thomwolf's avatar
thomwolf committed
818
819

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
820
821
822
            non_tgt_mask = -torch.eye(qlen).to(attn_mask)
            non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
            non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
thomwolf's avatar
thomwolf committed
823
824
825
        else:
            non_tgt_mask = None

thomwolf's avatar
thomwolf committed
826
        ##### Word embeddings and prepare h & g hidden states
thomwolf's avatar
thomwolf committed
827
        word_emb_k = self.word_embedding(input_ids)
thomwolf's avatar
thomwolf committed
828
829
830
        output_h = self.dropout(word_emb_k)
        if inp_q is not None:
            if target_mapping is not None:
831
                word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
thomwolf's avatar
thomwolf committed
832
833
            else:
                inp_q_ext = inp_q[:, :, None]
834
                word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
thomwolf's avatar
thomwolf committed
835
836
837
838
839
            output_g = self.dropout(word_emb_q)
        else:
            output_g = None

        ##### Segment embedding
thomwolf's avatar
thomwolf committed
840
841
        if token_type_ids is not None:
            # Convert `token_type_ids` to one-hot `seg_mat`
thomwolf's avatar
thomwolf committed
842
            mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
thomwolf's avatar
thomwolf committed
843
            cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
thomwolf's avatar
thomwolf committed
844
845

            # `1` indicates not in the same segment [qlen x klen x bsz]
thomwolf's avatar
thomwolf committed
846
            seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
thomwolf's avatar
thomwolf committed
847
            seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
thomwolf's avatar
thomwolf committed
848
849
850
851
        else:
            seg_mat = None

        ##### Positional encoding
thomwolf's avatar
thomwolf committed
852
        pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
thomwolf's avatar
thomwolf committed
853
854
        pos_emb = self.dropout(pos_emb)

thomwolf's avatar
thomwolf committed
855
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
856
857
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
858
859
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
        # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
thomwolf's avatar
thomwolf committed
860
861
        if head_mask is not None:
            if head_mask.dim() == 1:
thomwolf's avatar
thomwolf committed
862
863
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
                head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
864
            elif head_mask.dim() == 2:
thomwolf's avatar
thomwolf committed
865
                head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
thomwolf's avatar
thomwolf committed
866
867
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
        else:
thomwolf's avatar
thomwolf committed
868
            head_mask = [None] * self.n_layer
thomwolf's avatar
thomwolf committed
869
870
871
872
873

        new_mems = []
        if mems is None:
            mems = [None] * len(self.layer)

874
        attentions = []
875
        hidden_states = []
thomwolf's avatar
thomwolf committed
876
877
878
        for i, layer_module in enumerate(self.layer):
            # cache new mems
            new_mems.append(self.cache_mem(output_h, mems[i]))
879
880
881
882
883
            if self.output_hidden_states:
                hidden_states.append((output_h, output_g) if output_g is not None else output_h)

            outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask,
                                   r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping,
thomwolf's avatar
thomwolf committed
884
                                   head_mask=head_mask[i])
885
886
            output_h, output_g = outputs[:2]
            if self.output_attentions:
thomwolf's avatar
thomwolf committed
887
                attentions.append(outputs[2])
888
889
890

        # Add last hidden state
        if self.output_hidden_states:
thomwolf's avatar
thomwolf committed
891
            hidden_states.append((output_h, output_g) if output_g is not None else output_h)
thomwolf's avatar
thomwolf committed
892
893
894

        output = self.dropout(output_g if output_g is not None else output_h)

895
896
897
898
899
900
901
902
903
        # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
        outputs = [output.permute(1, 0, 2).contiguous(), new_mems]
        if self.output_hidden_states:
            if output_g is not None:
                hidden_states = [h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs]
            else:
                hidden_states = [hs.permute(1, 0, 2).contiguous() for hs in hidden_states]
            outputs.append(hidden_states)
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
904
            attentions = list(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
905
            outputs.append(attentions)
906

907
        return outputs  # outputs, new_mems, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
908
909
910


class XLNetLMHeadModel(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
911
912
913
914
915
916
917
918
919
    """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding").

    Params:
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False

    Inputs:
thomwolf's avatar
thomwolf committed
920
        input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
921
        token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
922
        input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask.
923
            0 for real tokens and 1 for padding.
924
925
926
927
        attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
            but with 1 for real tokens and 0 for padding.
            Added for easy compatibility with the BERT model (which uses this negative masking).
            You can only uses one among `input_mask` and `attention_mask`
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
        mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
            from previous batches. The length of the list equals n_layer.
            If None, no memory is used.
        perm_mask: [optional] float32 Tensor in shape [bsz, len, len].
            If perm_mask[k, i, j] = 0, i attend to j in batch k;
            if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
            If None, each position attends to all the others.
        target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len].
            If target_mapping[k, i, j] = 1, the i-th predict in batch k is
            on the j-th token.
            Only used during pretraining for partial prediction.
            Set to None during finetuning.
        inp_q: [optional] float32 Tensor in shape [bsz, len].
            1 for tokens with losses and 0 for tokens without losses.
            Only used during pretraining for two-stream attention.
            Set to None during finetuning.
thomwolf's avatar
thomwolf committed
944
945
946
947
948
949


    Outputs: Tuple of (encoded_layers, pooled_output)
        `encoded_layers`: controled by `output_all_encoded_layers` argument:
            - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
                of each attention block (i.e. 12 full sequences for XLNet-base, 24 for XLNet-large), each
thomwolf's avatar
thomwolf committed
950
                encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, d_model],
thomwolf's avatar
thomwolf committed
951
            - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
thomwolf's avatar
thomwolf committed
952
953
                to the last attention block of shape [batch_size, sequence_length, d_model],
        `pooled_output`: a torch.FloatTensor of size [batch_size, d_model] which is the output of a
thomwolf's avatar
thomwolf committed
954
955
956
957
958
959
960
            classifier pretrained on top of the hidden state associated to the first character of the
            input (`CLS`) to train on the Next-Sentence task (see XLNet's paper).

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
961
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
962
963
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

thomwolf's avatar
thomwolf committed
964
    config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768,
thomwolf's avatar
thomwolf committed
965
        n_layer=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
966
967

    model = modeling.XLNetModel(config=config)
968
    all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
thomwolf's avatar
thomwolf committed
969
970
    ```
    """
thomwolf's avatar
thomwolf committed
971
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
972
        super(XLNetLMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
973
974
        self.attn_type = config.attn_type
        self.same_length = config.same_length
thomwolf's avatar
thomwolf committed
975

thomwolf's avatar
thomwolf committed
976
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
977
        self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True)
thomwolf's avatar
thomwolf committed
978

thomwolf's avatar
thomwolf committed
979
980
        # Tie weights

thomwolf's avatar
thomwolf committed
981
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
982
        self.tie_weights()
thomwolf's avatar
thomwolf committed
983

thomwolf's avatar
thomwolf committed
984
985
    def tie_weights(self):
        """ Make sure we are sharing the embeddings
thomwolf's avatar
thomwolf committed
986
        """
thomwolf's avatar
thomwolf committed
987
        self.lm_loss.weight = self.transformer.word_embedding.weight
thomwolf's avatar
thomwolf committed
988

thomwolf's avatar
thomwolf committed
989
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
thomwolf's avatar
thomwolf committed
990
                mems=None, perm_mask=None, target_mapping=None, inp_q=None,
991
                labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
992
993
        """
        Args:
thomwolf's avatar
thomwolf committed
994
            input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
995
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
996
            input_mask: float32 Tensor in shape [bsz, len], the input mask.
thomwolf's avatar
thomwolf committed
997
                0 for real tokens and 1 for padding.
998
999
1000
1001
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
thomwolf's avatar
thomwolf committed
1002
1003
1004
            mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
1005
1006
1007
            perm_mask: float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
thomwolf's avatar
thomwolf committed
1008
                If None, each position attends to all the others.
1009
1010
            target_mapping: float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
thomwolf's avatar
thomwolf committed
1011
1012
1013
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
1014
            inp_q: float32 Tensor in shape [bsz, len].
thomwolf's avatar
thomwolf committed
1015
1016
1017
1018
1019
1020
1021
                1 for tokens with losses and 0 for tokens without losses.
                Only used during pretraining for two-stream attention.
                Set to None during finetuning.

            summary_type: str, "last", "first", "mean", or "attn". The method
                to pool the input to get a vector representation.
        """
thomwolf's avatar
thomwolf committed
1022
        transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask,
1023
1024
1025
                                               mems, perm_mask, target_mapping, inp_q, head_mask)

        logits = self.lm_loss(transformer_outputs[0])
1026

1027
        outputs = [logits] + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1028

1029
        if labels is not None:
1030
1031
1032
            # Flatten the tokens
            loss_fct = CrossEntropyLoss(ignore_index=-1)
            loss = loss_fct(logits.view(-1, logits.size(-1)),
1033
                            labels.view(-1))
1034
            outputs = [loss] + outputs
1035

1036
        return outputs  # return (loss), logits, (mems), (hidden states), (attentions)
1037

1038
class XLNetSequenceSummary(nn.Module):
thomwolf's avatar
thomwolf committed
1039
    def __init__(self, config):
1040
        super(XLNetSequenceSummary, self).__init__()
thomwolf's avatar
thomwolf committed
1041
1042
        self.summary_type = config.summary_type
        if config.use_proj:
thomwolf's avatar
thomwolf committed
1043
            self.summary = nn.Linear(config.d_model, config.d_model)
1044
1045
        else:
            self.summary = None
thomwolf's avatar
thomwolf committed
1046
        if config.summary_type == 'attn':
1047
1048
1049
1050
1051
1052
1053
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError
        self.dropout = nn.Dropout(config.dropout)
        self.activation = nn.Tanh()

thomwolf's avatar
thomwolf committed
1054
1055
    def forward(self, hidden_states):
        """ hidden_states: float Tensor in shape [bsz, seq_len, d_model], the hidden-states of the last layer."""
1056
        if self.summary_type == 'last':
thomwolf's avatar
thomwolf committed
1057
            output = hidden_states[:, -1]
1058
        elif self.summary_type == 'first':
thomwolf's avatar
thomwolf committed
1059
            output = hidden_states[:, 0]
1060
        elif self.summary_type == 'mean':
thomwolf's avatar
thomwolf committed
1061
            output = hidden_states.mean(dim=1)
1062
1063
1064
1065
1066
        elif summary_type == 'attn':
            raise NotImplementedError

        output = self.summary(output)
        output = self.activation(output)
thomwolf's avatar
thomwolf committed
1067
        output = self.dropout(output)
1068
1069
        return output

1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082

class XLNetForSequenceClassification(XLNetPreTrainedModel):
    """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding").

    Params:
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
        `summary_type`: str, "last", "first", "mean", or "attn". The method
            to pool the input to get a vector representation. Default: last

    Inputs:
thomwolf's avatar
thomwolf committed
1083
        input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
1084
        token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
1085
        input_mask: float32 Tensor in shape [bsz, len], the input mask.
1086
            0 for real tokens and 1 for padding.
1087
1088
1089
1090
        attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
            but with 1 for real tokens and 0 for padding.
            Added for easy compatibility with the BERT model (which uses this negative masking).
            You can only uses one among `input_mask` and `attention_mask`
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
        mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
            from previous batches. The length of the list equals n_layer.
            If None, no memory is used.
        perm_mask: float32 Tensor in shape [bsz, len, len].
            If perm_mask[k, i, j] = 0, i attend to j in batch k;
            if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
            If None, each position attends to all the others.
        target_mapping: float32 Tensor in shape [bsz, num_predict, len].
            If target_mapping[k, i, j] = 1, the i-th predict in batch k is
            on the j-th token.
            Only used during pretraining for partial prediction.
            Set to None during finetuning.
        inp_q: float32 Tensor in shape [bsz, len].
            1 for tokens with losses and 0 for tokens without losses.
            Only used during pretraining for two-stream attention.
            Set to None during finetuning.
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.


    Outputs: Tuple of (logits or loss, mems)
        `logits or loss`:
1113
            if labels is None:
1114
1115
1116
1117
1118
                Token logits with shape [batch_size, sequence_length] 
            else:
                CrossEntropy loss with the targets
        `new_mems`: list (num layers) of updated mem states at the entry of each layer
            each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
1119
            Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `labels`
1120
1121
1122
1123
1124

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
1125
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
1126
1127
1128
1129
1130
1131
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

    config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768,
        n_layer=12, num_attention_heads=12, intermediate_size=3072)

    model = modeling.XLNetModel(config=config)
1132
    all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
1133
1134
    ```
    """
thomwolf's avatar
thomwolf committed
1135
    def __init__(self, config):
1136
        super(XLNetForSequenceClassification, self).__init__(config)
1137

thomwolf's avatar
thomwolf committed
1138
1139
1140
        self.transformer = XLNetModel(config)
        self.sequence_summary = XLNetSequenceSummary(config)
        self.logits_proj = nn.Linear(config.d_model, config.num_labels)
1141

thomwolf's avatar
thomwolf committed
1142
        self.apply(self.init_weights)
1143

thomwolf's avatar
thomwolf committed
1144
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
1145
                mems=None, perm_mask=None, target_mapping=None, inp_q=None,
1146
                labels=None, head_mask=None):
1147
1148
        """
        Args:
thomwolf's avatar
thomwolf committed
1149
            input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
1150
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
1151
            input_mask: float32 Tensor in shape [bsz, len], the input mask.
1152
                0 for real tokens and 1 for padding.
1153
1154
1155
1156
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
            mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
            perm_mask: float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
                If None, each position attends to all the others.
            target_mapping: float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
            inp_q: float32 Tensor in shape [bsz, len].
                1 for tokens with losses and 0 for tokens without losses.
                Only used during pretraining for two-stream attention.
                Set to None during finetuning.
        """
thomwolf's avatar
thomwolf committed
1174
        transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask,
1175
                                               mems, perm_mask, target_mapping, inp_q, head_mask)
1176
        output = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
1177

1178
        output = self.sequence_summary(output)
1179
        logits = self.logits_proj(output)
thomwolf's avatar
thomwolf committed
1180

1181
1182
        outputs = [logits] + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it

1183
1184
1185
        if labels is not None:
            if self.num_labels == 1:
                #  We are doing regression
1186
                loss_fct = MSELoss()
1187
                loss = loss_fct(logits.view(-1), labels.view(-1))
1188
            else:
1189
1190
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1191
1192
1193
            outputs = [loss] + outputs

        return outputs  # return (loss), logits, (mems), (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
1194

thomwolf's avatar
thomwolf committed
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209

class XLNetForQuestionAnswering(XLNetPreTrainedModel):
    """XLNet model for Question Answering (span extraction).
    This module is composed of the XLNet model with a linear layer on top of
    the sequence output that computes start_logits and end_logits

    Params:
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
1210
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
1211
1212
1213
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see XLNet paper for more details).
1214
1215
1216
1217
1218
        `attention_mask`: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
            but with 1 for real tokens and 0 for padding.
            Added for easy compatibility with the BERT model (which uses this negative masking).
            You can only uses one among `input_mask` and `attention_mask`
        `input_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
thomwolf's avatar
thomwolf committed
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
        `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.

    Outputs:
        if `start_positions` and `end_positions` are not `None`:
            Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
        if `start_positions` or `end_positions` is `None`:
            Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
            position tokens of shape [batch_size, sequence_length].

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
1242
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
1243
1244
1245
1246
1247
1248
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

    config = XLNetConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)

    model = XLNetForQuestionAnswering(config)
1249
    start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
thomwolf's avatar
thomwolf committed
1250
1251
    ```
    """
thomwolf's avatar
thomwolf committed
1252
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1253
        super(XLNetForQuestionAnswering, self).__init__(config)
1254

thomwolf's avatar
thomwolf committed
1255
1256
1257
        self.transformer = XLNetModel(config)
        self.qa_outputs = nn.Linear(config.d_model, config.num_labels)

thomwolf's avatar
thomwolf committed
1258
1259
        self.apply(self.init_weights)

thomwolf's avatar
thomwolf committed
1260
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
thomwolf's avatar
thomwolf committed
1261
                mems=None, perm_mask=None, target_mapping=None, inp_q=None,
1262
                start_positions=None, end_positions=None, head_mask=None):
thomwolf's avatar
thomwolf committed
1263
        transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask,
1264
                                            mems, perm_mask, target_mapping, inp_q, head_mask)
thomwolf's avatar
thomwolf committed
1265

1266
1267
        logits = self.qa_outputs(transformer_outputs[0])

thomwolf's avatar
thomwolf committed
1268
1269
1270
1271
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

1272
1273
        outputs = [start_logits, end_logits] + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it

thomwolf's avatar
thomwolf committed
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.size(1)
            start_positions.clamp_(0, ignored_index)
            end_positions.clamp_(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
1289
1290
1291
            outputs = [total_loss] + outputs

        return outputs  # return (loss), logits, (mems), (hidden states), (attentions)