".github/git@developer.sourcefind.cn:zhaoyu6/sglang.git" did not exist on "5fa3058f01549242263df81243a142071524ad66"
modeling_xlnet.py 59.2 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from __future__ import absolute_import, division, print_function, unicode_literals

import json
import logging
import math
import os
import sys
from io import open

import torch
from torch import nn
thomwolf's avatar
thomwolf committed
31
from torch.nn import functional as F
32
from torch.nn import CrossEntropyLoss, MSELoss
thomwolf's avatar
thomwolf committed
33

34
from .modeling_utils import (CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig, PreTrainedModel,
35
                             SequenceSummary, PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits)
36

thomwolf's avatar
thomwolf committed
37
38
39

logger = logging.getLogger(__name__)

40
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
thomwolf's avatar
thomwolf committed
41
42
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
43
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
thomwolf's avatar
thomwolf committed
44
45
46
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json",
}

thomwolf's avatar
thomwolf committed
47

48
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
thomwolf's avatar
thomwolf committed
49
50
51
52
53
54
55
56
    """ A map of modules from TF to PyTorch.
        I use a map to keep the PyTorch model as
        identical to the original PyTorch model as possible.
    """

    tf_to_pt_map = {}

    if hasattr(model, 'transformer'):
57
58
59
        if hasattr(model, 'lm_loss'):
            # We will load also the output bias
            tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias
60
        if hasattr(model, 'sequence_summary') and 'model/sequnece_summary/summary/kernel' in tf_weights:
61
62
63
            # We will load also the sequence summary
            tf_to_pt_map['model/sequnece_summary/summary/kernel'] = model.sequence_summary.summary.weight
            tf_to_pt_map['model/sequnece_summary/summary/bias'] = model.sequence_summary.summary.bias
thomwolf's avatar
thomwolf committed
64
65
        if hasattr(model, 'logits_proj') and config.finetuning_task is not None \
                and 'model/regression_{}/logit/kernel'.format(config.finetuning_task) in tf_weights:
66
67
            tf_to_pt_map['model/regression_{}/logit/kernel'.format(config.finetuning_task)] = model.logits_proj.weight
            tf_to_pt_map['model/regression_{}/logit/bias'.format(config.finetuning_task)] = model.logits_proj.bias
68

thomwolf's avatar
thomwolf committed
69
70
71
72
73
        # Now load the rest of the transformer
        model = model.transformer

    # Embeddings and output
    tf_to_pt_map.update({'model/transformer/word_embedding/lookup_table': model.word_embedding.weight,
74
                         'model/transformer/mask_emb/mask_emb': model.mask_emb})
thomwolf's avatar
thomwolf committed
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117

    # Transformer blocks
    for i, b in enumerate(model.layer):
        layer_str = "model/transformer/layer_%d/" % i
        tf_to_pt_map.update({
            layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
            layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
            layer_str + "rel_attn/o/kernel": b.rel_attn.o,
            layer_str + "rel_attn/q/kernel": b.rel_attn.q,
            layer_str + "rel_attn/k/kernel": b.rel_attn.k,
            layer_str + "rel_attn/r/kernel": b.rel_attn.r,
            layer_str + "rel_attn/v/kernel": b.rel_attn.v,
            layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
            layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
            layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
            layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
            layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
            layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
        })

    # Relative positioning biases
    if config.untie_r:
        r_r_list = []
        r_w_list = []
        r_s_list = []
        seg_embed_list = []
        for b in model.layer:
            r_r_list.append(b.rel_attn.r_r_bias)
            r_w_list.append(b.rel_attn.r_w_bias)
            r_s_list.append(b.rel_attn.r_s_bias)
            seg_embed_list.append(b.rel_attn.seg_embed)
    else:
        r_r_list = [model.r_r_bias]
        r_w_list = [model.r_w_bias]
        r_s_list = [model.r_s_bias]
        seg_embed_list = [model.seg_embed]
    tf_to_pt_map.update({
        'model/transformer/r_r_bias': r_r_list,
        'model/transformer/r_w_bias': r_w_list,
        'model/transformer/r_s_bias': r_s_list,
        'model/transformer/seg_embed': seg_embed_list})
    return tf_to_pt_map

118
def load_tf_weights_in_xlnet(model, config, tf_path):
thomwolf's avatar
thomwolf committed
119
120
121
122
123
124
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import numpy as np
        import tensorflow as tf
    except ImportError:
thomwolf's avatar
thomwolf committed
125
        logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
thomwolf's avatar
thomwolf committed
126
127
128
129
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
thomwolf's avatar
thomwolf committed
130
    tf_weights = {}
thomwolf's avatar
thomwolf committed
131
    for name, shape in init_vars:
thomwolf's avatar
thomwolf committed
132
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
thomwolf's avatar
thomwolf committed
133
        array = tf.train.load_variable(tf_path, name)
thomwolf's avatar
thomwolf committed
134
        tf_weights[name] = array
thomwolf's avatar
thomwolf committed
135

136
    # Build TF to PyTorch weights loading map
137
    tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
138

thomwolf's avatar
thomwolf committed
139
    for name, pointer in tf_to_pt_map.items():
thomwolf's avatar
thomwolf committed
140
        logger.info("Importing {}".format(name))
141
        if name not in tf_weights:
thomwolf's avatar
thomwolf committed
142
            logger.info("{} not in tf pre-trained weights, skipping".format(name))
143
            continue
thomwolf's avatar
thomwolf committed
144
        array = tf_weights[name]
thomwolf's avatar
thomwolf committed
145
146
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
147
        if 'kernel' in name and ('ff' in name or 'summary' in name or 'logit' in name):
thomwolf's avatar
thomwolf committed
148
            logger.info("Transposing")
thomwolf's avatar
thomwolf committed
149
            array = np.transpose(array)
thomwolf's avatar
thomwolf committed
150
151
152
153
154
155
156
157
158
159
        if isinstance(pointer, list):
            # Here we will split the TF weigths
            assert len(pointer) == array.shape[0]
            for i, p_i in enumerate(pointer):
                arr_i = array[i, ...]
                try:
                    assert p_i.shape == arr_i.shape
                except AssertionError as e:
                    e.args += (p_i.shape, arr_i.shape)
                    raise
thomwolf's avatar
thomwolf committed
160
                logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
thomwolf's avatar
thomwolf committed
161
162
163
164
165
166
167
                p_i.data = torch.from_numpy(arr_i)
        else:
            try:
                assert pointer.shape == array.shape
            except AssertionError as e:
                e.args += (pointer.shape, array.shape)
                raise
thomwolf's avatar
thomwolf committed
168
            logger.info("Initialize PyTorch weight {}".format(name))
thomwolf's avatar
thomwolf committed
169
170
171
172
173
            pointer.data = torch.from_numpy(array)
        tf_weights.pop(name, None)
        tf_weights.pop(name + '/Adam', None)
        tf_weights.pop(name + '/Adam_1', None)

thomwolf's avatar
thomwolf committed
174
    logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
thomwolf's avatar
thomwolf committed
175
176
177
178
    return model


def gelu(x):
179
180
    """ Implementation of the gelu activation function.
        XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)
thomwolf's avatar
thomwolf committed
181
182
        Also see https://arxiv.org/abs/1606.08415
    """
183
184
    cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
    return x * cdf
thomwolf's avatar
thomwolf committed
185
186
187
188
189
190
191
192
193


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}


194
class XLNetConfig(PretrainedConfig):
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
    """Configuration class to store the configuration of a ``XLNetModel``.

    Args:
        vocab_size_or_config_json_file: Vocabulary size of ``inputs_ids`` in ``XLNetModel``.
        d_model: Size of the encoder layers and the pooler layer.
        n_layer: Number of hidden layers in the Transformer encoder.
        n_head: Number of attention heads for each attention layer in
            the Transformer encoder.
        d_inner: The size of the "intermediate" (i.e., feed-forward)
            layer in the Transformer encoder.
        ff_activation: The non-linear activation function (function or string) in the
            encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
        untie_r: untie relative position biases
        attn_type: 'bi' for XLNet, 'uni' for Transformer-XL

        dropout: The dropout probabilitiy for all fully connected
            layers in the embeddings, encoder, and pooler.
        dropatt: The dropout ratio for the attention
            probabilities.
        initializer_range: The sttdev of the truncated_normal_initializer for
            initializing all weight matrices.
        layer_norm_eps: The epsilon used by LayerNorm.

        dropout: float, dropout rate.
        dropatt: float, dropout rate on attention probabilities.
        init: str, the initialization scheme, either "normal" or "uniform".
        init_range: float, initialize the parameters with a uniform distribution
            in [-init_range, init_range]. Only effective when init="uniform".
        init_std: float, initialize the parameters with a normal distribution
            with mean 0 and stddev init_std. Only effective when init="normal".
        mem_len: int, the number of tokens to cache.
        reuse_len: int, the number of tokens in the currect batch to be cached
            and reused in the future.
        bi_data: bool, whether to use bidirectional input pipeline.
            Usually set to True during pretraining and False during finetuning.
        clamp_len: int, clamp all relative distances larger than clamp_len.
            -1 means no clamping.
        same_length: bool, whether to use the same attention length for each token.
        finetuning_task: name of the glue task on which the model was fine-tuned if any
thomwolf's avatar
thomwolf committed
234
    """
235
    pretrained_config_archive_map = XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
236

thomwolf's avatar
thomwolf committed
237
    def __init__(self,
thomwolf's avatar
thomwolf committed
238
                 vocab_size_or_config_json_file=32000,
thomwolf's avatar
thomwolf committed
239
240
241
242
                 d_model=1024,
                 n_layer=24,
                 n_head=16,
                 d_inner=4096,
thomwolf's avatar
thomwolf committed
243
244
                 ff_activation="gelu",
                 untie_r=True,
thomwolf's avatar
thomwolf committed
245
                 attn_type="bi",
thomwolf's avatar
thomwolf committed
246
247

                 initializer_range=0.02,
thomwolf's avatar
thomwolf committed
248
249
250
251
252
253
254
                 layer_norm_eps=1e-12,

                 dropout=0.1,
                 mem_len=None,
                 reuse_len=None,
                 bi_data=False,
                 clamp_len=-1,
255
                 same_length=False,
thomwolf's avatar
thomwolf committed
256

thomwolf's avatar
thomwolf committed
257
258
                 finetuning_task=None,
                 num_labels=2,
thomwolf's avatar
thomwolf committed
259
260
261
                 summary_type='last',
                 summary_use_proj=True,
                 summary_activation='tanh',
262
                 summary_last_dropout=0.1,
thomwolf's avatar
thomwolf committed
263
264
                 start_n_top=5,
                 end_n_top=5,
thomwolf's avatar
thomwolf committed
265
                 **kwargs):
thomwolf's avatar
thomwolf committed
266
267
        """Constructs XLNetConfig.
        """
thomwolf's avatar
thomwolf committed
268
269
        super(XLNetConfig, self).__init__(**kwargs)

thomwolf's avatar
thomwolf committed
270
271
272
273
274
275
276
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
thomwolf's avatar
thomwolf committed
277
            self.n_token = vocab_size_or_config_json_file
thomwolf's avatar
thomwolf committed
278
279
280
            self.d_model = d_model
            self.n_layer = n_layer
            self.n_head = n_head
thomwolf's avatar
thomwolf committed
281
282
            assert d_model % n_head == 0
            self.d_head = d_model // n_head
thomwolf's avatar
thomwolf committed
283
284
285
            self.ff_activation = ff_activation
            self.d_inner = d_inner
            self.untie_r = untie_r
thomwolf's avatar
thomwolf committed
286
            self.attn_type = attn_type
thomwolf's avatar
thomwolf committed
287

thomwolf's avatar
thomwolf committed
288
289
            self.initializer_range = initializer_range
            self.layer_norm_eps = layer_norm_eps
thomwolf's avatar
thomwolf committed
290
291
292
293
294
295
296

            self.dropout = dropout
            self.mem_len = mem_len
            self.reuse_len = reuse_len
            self.bi_data = bi_data
            self.clamp_len = clamp_len
            self.same_length = same_length
thomwolf's avatar
thomwolf committed
297

298
            self.finetuning_task = finetuning_task
thomwolf's avatar
thomwolf committed
299
300
            self.num_labels = num_labels
            self.summary_type = summary_type
thomwolf's avatar
thomwolf committed
301
302
            self.summary_use_proj = summary_use_proj
            self.summary_activation = summary_activation
303
            self.summary_last_dropout = summary_last_dropout
thomwolf's avatar
thomwolf committed
304
305
            self.start_n_top = start_n_top
            self.end_n_top = end_n_top
thomwolf's avatar
thomwolf committed
306
307
308
309
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
                             "or the path to a pretrained model config file (str)")

310
311
312
313
    @property
    def max_position_embeddings(self):
        return -1

thomwolf's avatar
thomwolf committed
314
315
316
317
    @property
    def vocab_size(self):
        return self.n_token

thomwolf's avatar
thomwolf committed
318
319
320
321
    @vocab_size.setter
    def vocab_size(self, value):
        self.n_token = value

thomwolf's avatar
thomwolf committed
322
323
324
325
326
327
328
329
330
331
332
333
    @property
    def hidden_size(self):
        return self.d_model

    @property
    def num_attention_heads(self):
        return self.n_head

    @property
    def num_hidden_layers(self):
        return self.n_layer

thomwolf's avatar
thomwolf committed
334
335
336
337
338
339

try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm
except ImportError:
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
    class XLNetLayerNorm(nn.Module):
thomwolf's avatar
thomwolf committed
340
        def __init__(self, d_model, eps=1e-12):
thomwolf's avatar
thomwolf committed
341
342
343
            """Construct a layernorm module in the TF style (epsilon inside the square root).
            """
            super(XLNetLayerNorm, self).__init__()
thomwolf's avatar
thomwolf committed
344
345
            self.weight = nn.Parameter(torch.ones(d_model))
            self.bias = nn.Parameter(torch.zeros(d_model))
thomwolf's avatar
thomwolf committed
346
347
348
349
350
351
352
353
            self.variance_epsilon = eps

        def forward(self, x):
            u = x.mean(-1, keepdim=True)
            s = (x - u).pow(2).mean(-1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.variance_epsilon)
            return self.weight * x + self.bias

thomwolf's avatar
thomwolf committed
354
class XLNetRelativeAttention(nn.Module):
thomwolf's avatar
thomwolf committed
355
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
356
        super(XLNetRelativeAttention, self).__init__()
thomwolf's avatar
thomwolf committed
357
358
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
359
        if config.d_model % config.n_head != 0:
thomwolf's avatar
thomwolf committed
360
361
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
thomwolf's avatar
thomwolf committed
362
                "heads (%d)" % (config.d_model, config.n_head))
thomwolf's avatar
thomwolf committed
363

thomwolf's avatar
thomwolf committed
364
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
365
366
367
368
369
370
371
372
373
374
375
376
377
        self.d_head = config.d_head
        self.d_model = config.d_model
        self.scale = 1 / (config.d_head ** 0.5)

        self.q = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.k = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.v = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.o = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.r = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))

        self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
        self.r_s_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
        self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
378
        self.seg_embed = nn.Parameter(torch.Tensor(2, self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
379

thomwolf's avatar
thomwolf committed
380
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
381
382
383
384
385
        self.dropout = nn.Dropout(config.dropout)

    def prune_heads(self, heads):
        raise NotImplementedError

thomwolf's avatar
thomwolf committed
386
387
388
389
390
391
392
393
    @staticmethod
    def rel_shift(x, klen=-1):
        """perform relative shift to form the relative attention score."""
        x_size = x.shape

        x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
        x = x[1:, ...]
        x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
394
        # x = x[:, 0:klen, :, :]
thomwolf's avatar
thomwolf committed
395
        x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
thomwolf's avatar
thomwolf committed
396
397
398

        return x

399
    def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
400
401
402
403
404
405
406
        """Core relative positional attention operations."""

        # content based attention score
        ac = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_w_bias, k_head_h)

        # position based attention score
        bd = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_r_bias, k_head_r)
thomwolf's avatar
thomwolf committed
407
        bd = self.rel_shift(bd, klen=ac.shape[1])
thomwolf's avatar
thomwolf committed
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425

        # segment based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)
            ef = torch.einsum('ijbs,ibns->ijbn', seg_mat, ef)

        # merge attention scores and perform masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
            attn_score = attn_score - 1e30 * attn_mask

        # attention probability
        attn_prob = F.softmax(attn_score, dim=1)
        attn_prob = self.dropout(attn_prob)

426
427
428
429
        # Mask heads if we want to
        if head_mask is not None:
            attn_prob = attn_prob * head_mask

thomwolf's avatar
thomwolf committed
430
431
432
        # attention output
        attn_vec = torch.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)

433
434
435
        if self.output_attentions:
            return attn_vec, attn_prob

thomwolf's avatar
thomwolf committed
436
437
438
439
440
441
442
443
444
445
        return attn_vec

    def post_attention(self, h, attn_vec, residual=True):
        """Post-attention processing."""
        # post-attention projection (back to `d_model`)
        attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)

        attn_out = self.dropout(attn_out)
        if residual:
            attn_out = attn_out + h
thomwolf's avatar
thomwolf committed
446
        output = self.layer_norm(attn_out)
thomwolf's avatar
thomwolf committed
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476

        return output

    def forward(self, h, g,
                      attn_mask_h, attn_mask_g,
                      r, seg_mat,
                      mems=None, target_mapping=None, head_mask=None):
        if g is not None:
            ###### Two-stream attention with relative positional encoding.
            # content based attention score
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content-based key head
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)

            # content-based value head
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # position-based key head
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            ##### h-stream
            # content-stream query head
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)

            # core attention ops
            attn_vec_h = self.rel_attn_core(
477
478
479
480
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec_h, attn_prob_h = attn_vec_h
thomwolf's avatar
thomwolf committed
481
482
483
484
485
486
487
488
489
490
491
492

            # post processing
            output_h = self.post_attention(h, attn_vec_h)

            ##### g-stream
            # query-stream query head
            q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q)

            # core attention ops
            if target_mapping is not None:
                q_head_g = torch.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
                attn_vec_g = self.rel_attn_core(
493
494
495
496
497
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

thomwolf's avatar
thomwolf committed
498
499
500
                attn_vec_g = torch.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
            else:
                attn_vec_g = self.rel_attn_core(
501
502
503
504
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask)

                if self.output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g
thomwolf's avatar
thomwolf committed
505
506
507

            # post processing
            output_g = self.post_attention(g, attn_vec_g)
508
509
510
511

            if self.output_attentions:
                attn_prob = attn_prob_h, attn_prob_g

thomwolf's avatar
thomwolf committed
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
        else:
            ###### Multi-head attention with relative positional encoding
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content heads
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # positional heads
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            # core attention ops
            attn_vec = self.rel_attn_core(
529
530
531
532
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask)

            if self.output_attentions:
                attn_vec, attn_prob = attn_vec
thomwolf's avatar
thomwolf committed
533
534

            # post processing
thomwolf's avatar
thomwolf committed
535
536
            output_h = self.post_attention(h, attn_vec)
            output_g = None
thomwolf's avatar
thomwolf committed
537

538
        outputs = (output_h, output_g)
539
        if self.output_attentions:
540
            outputs = outputs + (attn_prob,)
thomwolf's avatar
thomwolf committed
541
        return outputs
thomwolf's avatar
thomwolf committed
542
543
544
545

class XLNetFeedForward(nn.Module):
    def __init__(self, config):
        super(XLNetFeedForward, self).__init__()
thomwolf's avatar
thomwolf committed
546
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
547
548
549
        self.layer_1 = nn.Linear(config.d_model, config.d_inner)
        self.layer_2 = nn.Linear(config.d_inner, config.d_model)
        self.dropout = nn.Dropout(config.dropout)
550
551
        if isinstance(config.ff_activation, str) or \
                (sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)):
thomwolf's avatar
thomwolf committed
552
553
554
555
            self.activation_function = ACT2FN[config.ff_activation]
        else:
            self.activation_function = config.ff_activation

thomwolf's avatar
thomwolf committed
556
557
558
559
560
561
562
    def forward(self, inp):
        output = inp
        output = self.layer_1(output)
        output = self.activation_function(output)
        output = self.dropout(output)
        output = self.layer_2(output)
        output = self.dropout(output)
thomwolf's avatar
thomwolf committed
563
        output = self.layer_norm(output + inp)
thomwolf's avatar
thomwolf committed
564
        return output
thomwolf's avatar
thomwolf committed
565
566

class XLNetLayer(nn.Module):
thomwolf's avatar
thomwolf committed
567
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
568
        super(XLNetLayer, self).__init__()
thomwolf's avatar
thomwolf committed
569
        self.rel_attn = XLNetRelativeAttention(config)
thomwolf's avatar
thomwolf committed
570
571
572
573
574
        self.ff = XLNetFeedForward(config)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, output_h, output_g,
                attn_mask_h, attn_mask_g,
575
576
577
578
579
580
                r, seg_mat, mems=None, target_mapping=None, head_mask=None):
        outputs = self.rel_attn(output_h, output_g, attn_mask_h, attn_mask_g,
                                r, seg_mat, mems=mems, target_mapping=target_mapping,
                                head_mask=head_mask)
        output_h, output_g = outputs[:2]

thomwolf's avatar
thomwolf committed
581
        if output_g is not None:
thomwolf's avatar
thomwolf committed
582
583
584
            output_g = self.ff(output_g)
        output_h = self.ff(output_h)

585
        outputs = (output_h, output_g) + outputs[2:]  # Add again attentions if there are there
586
        return outputs
thomwolf's avatar
thomwolf committed
587

588
589

class XLNetPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
590
591
592
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
593
    config_class = XLNetConfig
594
    pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
595
596
597
598
599
    load_tf_weights = load_tf_weights_in_xlnet
    base_model_prefix = "transformer"

    def __init__(self, *inputs, **kwargs):
        super(XLNetPreTrainedModel, self).__init__(*inputs, **kwargs)
thomwolf's avatar
thomwolf committed
600

thomwolf's avatar
thomwolf committed
601
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
602
603
604
605
606
607
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
608
609
            if isinstance(module, nn.Linear) and module.bias is not None:
                module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
610
611
612
        elif isinstance(module, XLNetLayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
613
614
615
616
617
        elif isinstance(module, XLNetRelativeAttention):
            for param in [module.q, module.k, module.v, module.o, module.r,
                          module.r_r_bias, module.r_s_bias, module.r_w_bias,
                          module.seg_embed]:
                param.data.normal_(mean=0.0, std=self.config.initializer_range)
618
619
        elif isinstance(module, XLNetModel):
                module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
thomwolf's avatar
thomwolf committed
620
621
622


class XLNetModel(XLNetPreTrainedModel):
623
624
    """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding").

625
    TODO Lysandre filled: this was copied from the XLNetLMHeadModel, check that it's ok.
626
627
628
629
630
631
632

    Args:
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False

633
634
635
636
637
638
639
640
641

    Example::

        config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768,
            n_layer=12, num_attention_heads=12, intermediate_size=3072)

        model = modeling.XLNetModel(config=config)

    TODO Lysandre filled: Added example usage
642
    """
thomwolf's avatar
thomwolf committed
643
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
644
        super(XLNetModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
645
646
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
647

thomwolf's avatar
thomwolf committed
648
649
        self.mem_len = config.mem_len
        self.reuse_len = config.reuse_len
thomwolf's avatar
thomwolf committed
650
651
652
653
654
        self.d_model = config.d_model
        self.same_length = config.same_length
        self.attn_type = config.attn_type
        self.bi_data = config.bi_data
        self.clamp_len = config.clamp_len
thomwolf's avatar
thomwolf committed
655
        self.n_layer = config.n_layer
thomwolf's avatar
thomwolf committed
656

thomwolf's avatar
thomwolf committed
657
658
        self.word_embedding = nn.Embedding(config.n_token, config.d_model)
        self.mask_emb = nn.Parameter(torch.Tensor(1, 1, config.d_model))
659
        self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
660
        self.dropout = nn.Dropout(config.dropout)
thomwolf's avatar
thomwolf committed
661

662
663
        self.apply(self.init_weights)

thomwolf's avatar
thomwolf committed
664
665
    def _resize_token_embeddings(self, new_num_tokens):
        self.word_embedding = self._get_resized_embeddings(self.word_embedding, new_num_tokens)
thomwolf's avatar
thomwolf committed
666
        return self.word_embedding
thomwolf's avatar
thomwolf committed
667

thomwolf's avatar
thomwolf committed
668
    def _prune_heads(self, heads_to_prune):
thomwolf's avatar
thomwolf committed
669
        raise NotImplementedError
thomwolf's avatar
thomwolf committed
670

thomwolf's avatar
thomwolf committed
671
    def create_mask(self, qlen, mlen):
672
673
674
675
        """
        Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.

        Args:
676
677
            qlen: TODO Lysandre didn't fill
            mlen: TODO Lysandre didn't fill
678
679
680
681
682
683
684
685
686
687
688

        ::

                  same_length=False:      same_length=True:
                  <mlen > <  qlen >       <mlen > <  qlen >
               ^ [0 0 0 0 0 1 1 1 1]     [0 0 0 0 0 1 1 1 1]
                 [0 0 0 0 0 0 1 1 1]     [1 0 0 0 0 0 1 1 1]
            qlen [0 0 0 0 0 0 0 1 1]     [1 1 0 0 0 0 0 1 1]
                 [0 0 0 0 0 0 0 0 1]     [1 1 1 0 0 0 0 0 1]
               v [0 0 0 0 0 0 0 0 0]     [1 1 1 1 0 0 0 0 0]

thomwolf's avatar
thomwolf committed
689
690
691
692
693
694
695
696
697
698
        """
        attn_mask = torch.ones([qlen, qlen])
        mask_up = torch.triu(attn_mask, diagonal=1)
        attn_mask_pad = torch.zeros([qlen, mlen])
        ret = torch.cat([attn_mask_pad, mask_up], dim=1)
        if self.same_length:
            mask_lo = torch.tril(attn_mask, diagonal=-1)
            ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)

        ret = ret.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
        return ret

    def cache_mem(self, curr_out, prev_mem):
        """cache hidden states into memory."""
        if self.mem_len is None or self.mem_len == 0:
            return None
        else:
            if self.reuse_len is not None and self.reuse_len > 0:
                curr_out = curr_out[:self.reuse_len]

            if prev_mem is None:
                new_mem = curr_out[-self.mem_len:]
            else:
                new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len:]

        return new_mem.detach()

thomwolf's avatar
thomwolf committed
716
717
718
719
720
721
722
723
724
725
726
727
    @staticmethod
    def positional_embedding(pos_seq, inv_freq, bsz=None):
        sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq)
        pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
        pos_emb = pos_emb[:, None, :]

        if bsz is not None:
            pos_emb = pos_emb.expand(-1, bsz, -1)

        return pos_emb

    def relative_positional_encoding(self, qlen, klen, bsz=None):
thomwolf's avatar
thomwolf committed
728
        """create relative positional encoding."""
thomwolf's avatar
thomwolf committed
729
        freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
730
        inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
thomwolf's avatar
thomwolf committed
731
732
733
734
735
736
737
738
739
740
741

        if self.attn_type == 'bi':
            # beg, end = klen - 1, -qlen
            beg, end = klen, -qlen
        elif self.attn_type == 'uni':
            # beg, end = klen - 1, -1
            beg, end = klen, -1
        else:
            raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))

        if self.bi_data:
thomwolf's avatar
thomwolf committed
742
743
            fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
            bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
thomwolf's avatar
thomwolf committed
744
745
746
747
748
749

            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
                bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)

            if bsz is not None:
thomwolf's avatar
thomwolf committed
750
751
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
thomwolf's avatar
thomwolf committed
752
            else:
thomwolf's avatar
thomwolf committed
753
754
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
thomwolf's avatar
thomwolf committed
755
756
757

            pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
        else:
thomwolf's avatar
thomwolf committed
758
            fwd_pos_seq = torch.arange(beg, end, -1.0)
thomwolf's avatar
thomwolf committed
759
760
            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
thomwolf's avatar
thomwolf committed
761
            pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
thomwolf's avatar
thomwolf committed
762

thomwolf's avatar
thomwolf committed
763
        pos_emb = pos_emb.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
764
765
        return pos_emb

thomwolf's avatar
thomwolf committed
766
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
767
                mems=None, perm_mask=None, target_mapping=None, head_mask=None):
thomwolf's avatar
thomwolf committed
768
        """
769
770
        Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.**

thomwolf's avatar
thomwolf committed
771
        Args:
thomwolf's avatar
thomwolf committed
772
            input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
773
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
774
            input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask.
thomwolf's avatar
thomwolf committed
775
                0 for real tokens and 1 for padding.
776
777
778
779
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
thomwolf's avatar
thomwolf committed
780
            mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
thomwolf's avatar
thomwolf committed
781
782
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
783
784
785
            perm_mask: [optional] float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
thomwolf's avatar
thomwolf committed
786
                If None, each position attends to all the others.
787
788
            target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
thomwolf's avatar
thomwolf committed
789
790
791
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
            head_mask: TODO Lysandre didn't fill


        Returns:
            TODO Lysandre didn't fill: Missing returns!

        Example::

            # Already been converted into WordPiece token ids
            input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
            input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
            token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

            all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
            # or
            all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask)

        TODO Lysandre filled: Filled with the LMHead example, is probably different since it has a different output
thomwolf's avatar
thomwolf committed
810
811

        """
812
813
814
        # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
        # but we want a unified interface in the library with the batch size on the first dimension
        # so we move here the first dimension (batch) to the end
thomwolf's avatar
thomwolf committed
815
        input_ids = input_ids.transpose(0, 1).contiguous()
thomwolf's avatar
thomwolf committed
816
        token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
817
        input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
thomwolf's avatar
thomwolf committed
818
        attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
819
820
821
        perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
        target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None

thomwolf's avatar
thomwolf committed
822
        qlen, bsz = input_ids.shape[0], input_ids.shape[1]
thomwolf's avatar
thomwolf committed
823
824
        mlen = mems[0].shape[0] if mems is not None else 0
        klen = mlen + qlen
thomwolf's avatar
thomwolf committed
825
826
827

        dtype_float = next(self.parameters()).dtype
        device = next(self.parameters()).device
thomwolf's avatar
thomwolf committed
828
829
830
831

        ##### Attention mask
        # causal attention mask
        if self.attn_type == 'uni':
thomwolf's avatar
thomwolf committed
832
            attn_mask = self.create_mask(qlen, mlen)
thomwolf's avatar
thomwolf committed
833
834
835
836
837
838
839
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == 'bi':
            attn_mask = None
        else:
            raise ValueError('Unsupported attention type: {}'.format(self.attn_type))

        # data mask: input mask & perm mask
840
841
842
843
844
845
846
847
848
        assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
        "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
        if input_mask is None and attention_mask is not None:
            input_mask = 1.0 - attention_mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask
        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
thomwolf's avatar
thomwolf committed
849
850
851
852
853
854
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
thomwolf's avatar
thomwolf committed
855
            mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
thomwolf's avatar
thomwolf committed
856
857
858
859
860
861
862
            data_mask = torch.cat([mems_mask, data_mask], dim=1)
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
863
            attn_mask = (attn_mask > 0).to(dtype_float)
thomwolf's avatar
thomwolf committed
864
865

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
866
867
868
            non_tgt_mask = -torch.eye(qlen).to(attn_mask)
            non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
            non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
thomwolf's avatar
thomwolf committed
869
870
871
        else:
            non_tgt_mask = None

thomwolf's avatar
thomwolf committed
872
        ##### Word embeddings and prepare h & g hidden states
thomwolf's avatar
thomwolf committed
873
        word_emb_k = self.word_embedding(input_ids)
thomwolf's avatar
thomwolf committed
874
        output_h = self.dropout(word_emb_k)
875
876
877
878
879
        if target_mapping is not None:
            word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
        # else:  # We removed the inp_q input which was same as target mapping
        #     inp_q_ext = inp_q[:, :, None]
        #     word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
thomwolf's avatar
thomwolf committed
880
881
882
883
884
            output_g = self.dropout(word_emb_q)
        else:
            output_g = None

        ##### Segment embedding
thomwolf's avatar
thomwolf committed
885
886
        if token_type_ids is not None:
            # Convert `token_type_ids` to one-hot `seg_mat`
thomwolf's avatar
thomwolf committed
887
            mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
thomwolf's avatar
thomwolf committed
888
            cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
thomwolf's avatar
thomwolf committed
889
890

            # `1` indicates not in the same segment [qlen x klen x bsz]
thomwolf's avatar
thomwolf committed
891
            seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
thomwolf's avatar
thomwolf committed
892
            seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
thomwolf's avatar
thomwolf committed
893
894
895
896
        else:
            seg_mat = None

        ##### Positional encoding
thomwolf's avatar
thomwolf committed
897
        pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
thomwolf's avatar
thomwolf committed
898
899
        pos_emb = self.dropout(pos_emb)

thomwolf's avatar
thomwolf committed
900
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
901
902
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
903
904
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
        # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
thomwolf's avatar
thomwolf committed
905
906
        if head_mask is not None:
            if head_mask.dim() == 1:
thomwolf's avatar
thomwolf committed
907
908
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
                head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
909
            elif head_mask.dim() == 2:
thomwolf's avatar
thomwolf committed
910
                head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
thomwolf's avatar
thomwolf committed
911
912
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
        else:
thomwolf's avatar
thomwolf committed
913
            head_mask = [None] * self.n_layer
thomwolf's avatar
thomwolf committed
914

915
        new_mems = ()
thomwolf's avatar
thomwolf committed
916
917
918
        if mems is None:
            mems = [None] * len(self.layer)

919
        attentions = []
920
        hidden_states = []
thomwolf's avatar
thomwolf committed
921
922
        for i, layer_module in enumerate(self.layer):
            # cache new mems
923
            new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
924
925
926
927
928
            if self.output_hidden_states:
                hidden_states.append((output_h, output_g) if output_g is not None else output_h)

            outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask,
                                   r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping,
thomwolf's avatar
thomwolf committed
929
                                   head_mask=head_mask[i])
930
931
            output_h, output_g = outputs[:2]
            if self.output_attentions:
thomwolf's avatar
thomwolf committed
932
                attentions.append(outputs[2])
933
934
935

        # Add last hidden state
        if self.output_hidden_states:
thomwolf's avatar
thomwolf committed
936
            hidden_states.append((output_h, output_g) if output_g is not None else output_h)
thomwolf's avatar
thomwolf committed
937
938
939

        output = self.dropout(output_g if output_g is not None else output_h)

940
        # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
941
        outputs = (output.permute(1, 0, 2).contiguous(), new_mems)
942
943
        if self.output_hidden_states:
            if output_g is not None:
944
                hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
945
            else:
946
                hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
947
            outputs = outputs + (hidden_states,)
948
        if self.output_attentions:
949
            attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
950
            outputs = outputs + (attentions,)
951

952
        return outputs  # outputs, new_mems, (hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
953
954
955


class XLNetLMHeadModel(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
956
957
    """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding").

958
    Args:
thomwolf's avatar
thomwolf committed
959
960
961
962
963
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False

964
965
966
967
968
    Example::

        config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768,
            n_layer=12, num_attention_heads=12, intermediate_size=3072)

969
970
971
        model = modeling.XLNetLMHeadModel(config=config)

    TODO Lysandre modified: Changed XLNetModel to XLNetLMHeadModel in the example
thomwolf's avatar
thomwolf committed
972
    """
thomwolf's avatar
thomwolf committed
973
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
974
        super(XLNetLMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
975
976
        self.attn_type = config.attn_type
        self.same_length = config.same_length
thomwolf's avatar
thomwolf committed
977

thomwolf's avatar
thomwolf committed
978
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
979
        self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True)
thomwolf's avatar
thomwolf committed
980

thomwolf's avatar
thomwolf committed
981
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
982
        self.tie_weights()
thomwolf's avatar
thomwolf committed
983

thomwolf's avatar
thomwolf committed
984
985
    def tie_weights(self):
        """ Make sure we are sharing the embeddings
thomwolf's avatar
thomwolf committed
986
        """
thomwolf's avatar
thomwolf committed
987
        self._tie_or_clone_weights(self.lm_loss, self.transformer.word_embedding)
thomwolf's avatar
thomwolf committed
988

thomwolf's avatar
thomwolf committed
989
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
990
                mems=None, perm_mask=None, target_mapping=None,
991
                labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
992
        """
993
994
         all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)

thomwolf's avatar
thomwolf committed
995
        Args:
thomwolf's avatar
thomwolf committed
996
            input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
997
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
998
            input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask.
thomwolf's avatar
thomwolf committed
999
                0 for real tokens and 1 for padding.
1000
1001
1002
1003
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
1004
            mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
thomwolf's avatar
thomwolf committed
1005
1006
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
1007
            perm_mask: [optional] float32 Tensor in shape [bsz, len, len].
1008
1009
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
thomwolf's avatar
thomwolf committed
1010
                If None, each position attends to all the others.
1011
            target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len].
1012
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
thomwolf's avatar
thomwolf committed
1013
1014
1015
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042

        Returns:
            A ``tuple(encoded_layers, pooled_output)``, with

                ``encoded_layers``: controlled by ``output_all_encoded_layers`` argument:

                    - ``output_all_encoded_layers=True``: outputs a list of the full sequences of encoded-hidden-states \
                    at the end of each attention block (i.e. 12 full sequences for XLNet-base, 24 for XLNet-large), \
                    each encoded-hidden-state is a ``torch.FloatTensor`` of size [batch_size, sequence_length, d_model],

                    - ``output_all_encoded_layers=False``: outputs only the full sequence of hidden-states corresponding \
                    to the last attention block of shape [batch_size, sequence_length, d_model],

                ``pooled_output``: a ``torch.FloatTensor`` of size [batch_size, d_model] which is the output of a \
                classifier pretrained on top of the hidden state associated to the first character of the \
                input (`CLS`) to train on the Next-Sentence task (see XLNet's paper).

        Example::

            # Already been converted into WordPiece token ids
            input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
            input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
            token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

            all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
            # or
            all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask)
thomwolf's avatar
thomwolf committed
1043
        """
thomwolf's avatar
thomwolf committed
1044
        transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask,
1045
                                               mems, perm_mask, target_mapping, head_mask)
1046
1047

        logits = self.lm_loss(transformer_outputs[0])
1048

1049
        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1050

1051
        if labels is not None:
1052
1053
1054
            # Flatten the tokens
            loss_fct = CrossEntropyLoss(ignore_index=-1)
            loss = loss_fct(logits.view(-1, logits.size(-1)),
1055
                            labels.view(-1))
1056
            outputs = (loss,) + outputs
1057

1058
        return outputs  # return (loss), logits, (mems), (hidden states), (attentions)
1059
1060
1061
1062
1063


class XLNetForSequenceClassification(XLNetPreTrainedModel):
    """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding").

1064
    Args:
1065
1066
1067
1068
1069
1070
1071
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
        `summary_type`: str, "last", "first", "mean", or "attn". The method
            to pool the input to get a vector representation. Default: last

1072
1073
1074
1075
1076
1077
1078
1079
1080
1081


    Example::

        # Already been converted into WordPiece token ids
        input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
        input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
        token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

        all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
1082
    """
thomwolf's avatar
thomwolf committed
1083
    def __init__(self, config):
1084
        super(XLNetForSequenceClassification, self).__init__(config)
thomwolf's avatar
thomwolf committed
1085
        self.num_labels = config.num_labels
1086

thomwolf's avatar
thomwolf committed
1087
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
1088
        self.sequence_summary = SequenceSummary(config)
thomwolf's avatar
thomwolf committed
1089
        self.logits_proj = nn.Linear(config.d_model, config.num_labels)
1090

thomwolf's avatar
thomwolf committed
1091
        self.apply(self.init_weights)
1092

thomwolf's avatar
thomwolf committed
1093
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
1094
                mems=None, perm_mask=None, target_mapping=None,
1095
                labels=None, head_mask=None):
1096
        """
1097
1098
        Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.**

1099
        Args:
thomwolf's avatar
thomwolf committed
1100
            input_ids: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
1101
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
1102
            input_mask: float32 Tensor in shape [bsz, len], the input mask.
1103
                0 for real tokens and 1 for padding.
1104
1105
1106
1107
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
            mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
            perm_mask: float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
                If None, each position attends to all the others.
            target_mapping: float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
                on the j-th token.
1118
1119
                Only used during pre-training for partial prediction.
                Set to None during fine-tuning.
1120
            labels: TODO Lysandre didn't fill
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
            head_mask: an optional ``torch.Tensor`` of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
                It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.


        Returns:
            A ``tuple(logits_or_loss, mems)``

                ``logits_or_loss``: if ``labels`` is ``None``, ``logits_or_loss`` corresponds to token logits with shape \
                [batch_size, sequence_length]. If it is not ``None``, it corresponds to the ``CrossEntropy`` loss \
                with the targets.

                ``new_mems``: list (num layers) of updated mem states at the entry of each layer \
                each mem state is a ``torch.FloatTensor`` of size [self.config.mem_len, batch_size, self.config.d_model] \
                Note that the first two dimensions are transposed in ``mems`` with regards to ``input_ids`` and ``labels``

        Example::

            # Already been converted into WordPiece token ids
            input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
            input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
            token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

            all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
            # or
            all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask)
1146
        """
thomwolf's avatar
thomwolf committed
1147
        transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask,
1148
                                               mems, perm_mask, target_mapping, head_mask)
1149
        output = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
1150

1151
        output = self.sequence_summary(output)
1152
        logits = self.logits_proj(output)
thomwolf's avatar
thomwolf committed
1153

1154
        outputs = (logits,) + transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1155

1156
1157
1158
        if labels is not None:
            if self.num_labels == 1:
                #  We are doing regression
1159
                loss_fct = MSELoss()
1160
                loss = loss_fct(logits.view(-1), labels.view(-1))
1161
            else:
1162
1163
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1164
            outputs = (loss,) + outputs
1165
1166

        return outputs  # return (loss), logits, (mems), (hidden states), (attentions)
thomwolf's avatar
thomwolf committed
1167

thomwolf's avatar
thomwolf committed
1168
1169

class XLNetForQuestionAnswering(XLNetPreTrainedModel):
1170
1171
1172
    """
    XLNet model for Question Answering (span extraction).

thomwolf's avatar
thomwolf committed
1173
    This module is composed of the XLNet model with a linear layer on top of
1174
    the sequence output that computes ``start_logits`` and ``end_logits``
thomwolf's avatar
thomwolf committed
1175

1176
    Args:
thomwolf's avatar
thomwolf committed
1177
1178
1179
1180
1181
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False

1182
1183
1184
1185
1186
1187
    Example::

        config = XLNetConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
            num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)

        model = XLNetForQuestionAnswering(config)
thomwolf's avatar
thomwolf committed
1188
    """
thomwolf's avatar
thomwolf committed
1189
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1190
        super(XLNetForQuestionAnswering, self).__init__(config)
thomwolf's avatar
thomwolf committed
1191
1192
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top
1193

thomwolf's avatar
thomwolf committed
1194
        self.transformer = XLNetModel(config)
thomwolf's avatar
thomwolf committed
1195
1196
1197
        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)
thomwolf's avatar
thomwolf committed
1198

thomwolf's avatar
thomwolf committed
1199
1200
        self.apply(self.init_weights)

thomwolf's avatar
thomwolf committed
1201
    def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,
1202
                mems=None, perm_mask=None, target_mapping=None,
thomwolf's avatar
thomwolf committed
1203
1204
                start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None,
                head_mask=None):
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251

        """
        Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.**

        Args:
            `input_ids`: a ``torch.LongTensor`` of shape [batch_size, sequence_length]
                with the word token indices in the vocabulary(see the tokens pre-processing logic in the scripts
                `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
            `token_type_ids`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with the token
                types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
                a `sentence B` token (see XLNet paper for more details).
            `attention_mask`: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among ``input_mask`` and ``attention_mask``
            `input_mask`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with indices
                selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
                input sequence length in the current batch. It's the mask that we typically use for attention when
                a batch has varying length sentences.
            `start_positions`: position of the first token for the labeled span: ``torch.LongTensor`` of shape [batch_size].
                Positions are clamped to the length of the sequence and position outside of the sequence are not taken
                into account for computing the loss.
            `end_positions`: position of the last token for the labeled span: ``torch.LongTensor`` of shape [batch_size].
                Positions are clamped to the length of the sequence and position outside of the sequence are not taken
                into account for computing the loss.
            `head_mask`: an optional ``torch.Tensor`` of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
                It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.

        Returns:
            if ``start_positions`` and ``end_positions`` are not ``None``, outputs the total_loss which is the sum of the \
            ``CrossEntropy`` loss for the start and end token positions.

            if ``start_positions`` or ``end_positions`` is ``None``, outputs a tuple of ``start_logits``, ``end_logits``
            which are the logits respectively for the start and end position tokens of shape \
            [batch_size, sequence_length].

        Example::

            # Already been converted into WordPiece token ids
            input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
            input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
            token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

            start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
            # or
            start_logits, end_logits = model.forward(input_ids, token_type_ids, input_mask)
        """
thomwolf's avatar
thomwolf committed
1252
        transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask,
1253
                                               mems, perm_mask, target_mapping, head_mask)
thomwolf's avatar
thomwolf committed
1254
1255
        hidden_states = transformer_outputs[0]
        start_logits = self.start_logits(hidden_states, p_mask)
thomwolf's avatar
thomwolf committed
1256

thomwolf's avatar
thomwolf committed
1257
        outputs = transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it
1258

thomwolf's avatar
thomwolf committed
1259
1260
1261
1262
1263
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)
thomwolf's avatar
thomwolf committed
1264

thomwolf's avatar
thomwolf committed
1265
1266
            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
1267

thomwolf's avatar
thomwolf committed
1268
            loss_fct = CrossEntropyLoss()
thomwolf's avatar
thomwolf committed
1269
1270
1271
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
1272

thomwolf's avatar
thomwolf committed
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is
                # comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
                outputs = (total_loss, start_logits, end_logits, cls_logits) + outputs
            else:
                outputs = (total_loss, start_logits, end_logits) + outputs

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
            start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)

            start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
            start_top_index = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index) # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
            end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)

            end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

            outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs

        # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits, mems, (hidden states), (attentions)
        # or (if labels are provided) total_loss, start_logits, end_logits, (cls_logits), mems, (hidden states), (attentions)
        return outputs