modeling_xlnet.py 70.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from __future__ import absolute_import, division, print_function, unicode_literals

import copy
import json
import logging
import math
import os
import sys
from io import open

import torch
from torch import nn
thomwolf's avatar
thomwolf committed
32
from torch.nn import functional as F
thomwolf's avatar
thomwolf committed
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from torch.nn import CrossEntropyLoss

from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME

logger = logging.getLogger(__name__)

PRETRAINED_MODEL_ARCHIVE_MAP = {
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
    'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json",
}
XLNET_CONFIG_NAME = 'xlnet_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'

thomwolf's avatar
thomwolf committed
48

49
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
thomwolf's avatar
thomwolf committed
50
51
52
53
54
55
56
57
    """ A map of modules from TF to PyTorch.
        I use a map to keep the PyTorch model as
        identical to the original PyTorch model as possible.
    """

    tf_to_pt_map = {}

    if hasattr(model, 'transformer'):
58
59
60
61
62
63
64
65
66
        if hasattr(model, 'lm_loss'):
            # We will load also the output bias
            tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias
        elif hasattr(model, 'sequence_summary') and 'model/sequnece_summary/summary/kernel' in tf_weights:
            # We will load also the sequence summary
            tf_to_pt_map['model/sequnece_summary/summary/kernel'] = model.sequence_summary.summary.weight
            tf_to_pt_map['model/sequnece_summary/summary/bias'] = model.sequence_summary.summary.bias
        elif hasattr(model, 'proj_loss') and any('model/regression' in name for name in tf_weights.keys()):
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        # Now load the rest of the transformer
        model = model.transformer

    # Embeddings and output
    tf_to_pt_map.update({'model/transformer/word_embedding/lookup_table': model.word_embedding.weight,
                    'model/transformer/mask_emb/mask_emb': model.mask_emb})

    # Transformer blocks
    for i, b in enumerate(model.layer):
        layer_str = "model/transformer/layer_%d/" % i
        tf_to_pt_map.update({
            layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
            layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
            layer_str + "rel_attn/o/kernel": b.rel_attn.o,
            layer_str + "rel_attn/q/kernel": b.rel_attn.q,
            layer_str + "rel_attn/k/kernel": b.rel_attn.k,
            layer_str + "rel_attn/r/kernel": b.rel_attn.r,
            layer_str + "rel_attn/v/kernel": b.rel_attn.v,
            layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
            layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
            layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
            layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
            layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
            layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
        })

    # Relative positioning biases
    if config.untie_r:
        r_r_list = []
        r_w_list = []
        r_s_list = []
        seg_embed_list = []
        for b in model.layer:
            r_r_list.append(b.rel_attn.r_r_bias)
            r_w_list.append(b.rel_attn.r_w_bias)
            r_s_list.append(b.rel_attn.r_s_bias)
            seg_embed_list.append(b.rel_attn.seg_embed)
    else:
        r_r_list = [model.r_r_bias]
        r_w_list = [model.r_w_bias]
        r_s_list = [model.r_s_bias]
        seg_embed_list = [model.seg_embed]
    tf_to_pt_map.update({
        'model/transformer/r_r_bias': r_r_list,
        'model/transformer/r_w_bias': r_w_list,
        'model/transformer/r_s_bias': r_s_list,
        'model/transformer/seg_embed': seg_embed_list})
    return tf_to_pt_map

def load_tf_weights_in_xlnet(model, config, tf_path):
thomwolf's avatar
thomwolf committed
117
118
119
120
121
122
123
124
125
126
127
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import numpy as np
        import tensorflow as tf
    except ImportError:
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
thomwolf's avatar
thomwolf committed
128
    tf_weights = {}
thomwolf's avatar
thomwolf committed
129
130
131
    for name, shape in init_vars:
        print("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
thomwolf's avatar
thomwolf committed
132
        tf_weights[name] = array
thomwolf's avatar
thomwolf committed
133

134
135
136
    # Build TF to PyTorch weights loading map
    tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)

thomwolf's avatar
thomwolf committed
137
138
    for name, pointer in tf_to_pt_map.items():
        print("Importing {}".format(name))
139
140
141
        if name not in tf_weights:
            print("{} not in tf pre-trained weights, skipping".format(name))
            continue
thomwolf's avatar
thomwolf committed
142
        array = tf_weights[name]
thomwolf's avatar
thomwolf committed
143
144
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
        # which are not required for using pretrained model
thomwolf's avatar
thomwolf committed
145
146
        if 'kernel' in name and 'ff' in name:
            print("Transposing")
thomwolf's avatar
thomwolf committed
147
            array = np.transpose(array)
thomwolf's avatar
thomwolf committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
        if isinstance(pointer, list):
            # Here we will split the TF weigths
            assert len(pointer) == array.shape[0]
            for i, p_i in enumerate(pointer):
                arr_i = array[i, ...]
                try:
                    assert p_i.shape == arr_i.shape
                except AssertionError as e:
                    e.args += (p_i.shape, arr_i.shape)
                    raise
                print("Initialize PyTorch weight {} for layer {}".format(name, i))
                p_i.data = torch.from_numpy(arr_i)
        else:
            try:
                assert pointer.shape == array.shape
            except AssertionError as e:
                e.args += (pointer.shape, array.shape)
                raise
            print("Initialize PyTorch weight {}".format(name))
            pointer.data = torch.from_numpy(array)
        tf_weights.pop(name, None)
        tf_weights.pop(name + '/Adam', None)
        tf_weights.pop(name + '/Adam_1', None)

    print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
thomwolf's avatar
thomwolf committed
173
174
175
176
    return model


def gelu(x):
177
178
    """ Implementation of the gelu activation function.
        XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)
thomwolf's avatar
thomwolf committed
179
180
        Also see https://arxiv.org/abs/1606.08415
    """
181
182
    cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
    return x * cdf
thomwolf's avatar
thomwolf committed
183
184
185
186
187
188
189
190
191
192
193


def swish(x):
    return x * torch.sigmoid(x)


ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}

class XLNetBaseConfig(object):
    @classmethod
    def from_dict(cls, json_object):
thomwolf's avatar
thomwolf committed
194
195
        """Constructs a `XLNetBaseConfig` from a Python dictionary of parameters."""
        config = cls(vocab_size_or_config_json_file=-1)
thomwolf's avatar
thomwolf committed
196
197
198
199
200
201
        for key, value in json_object.items():
            config.__dict__[key] = value
        return config

    @classmethod
    def from_json_file(cls, json_file):
thomwolf's avatar
thomwolf committed
202
        """Constructs a `XLNetBaseConfig` from a json file of parameters."""
thomwolf's avatar
thomwolf committed
203
204
205
206
        with open(json_file, "r", encoding='utf-8') as reader:
            text = reader.read()
        return cls.from_dict(json.loads(text))

thomwolf's avatar
thomwolf committed
207
208
209
210
211
    def update(self, other):
        dict_b = other.to_dict()
        for key, value in dict_b.items():
            self.__dict__[key] = value

thomwolf's avatar
thomwolf committed
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
    def __repr__(self):
        return str(self.to_json_string())

    def to_dict(self):
        """Serializes this instance to a Python dictionary."""
        output = copy.deepcopy(self.__dict__)
        return output

    def to_json_string(self):
        """Serializes this instance to a JSON string."""
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"

    def to_json_file(self, json_file_path):
        """ Save this instance to a json file."""
        with open(json_file_path, "w", encoding='utf-8') as writer:
            writer.write(self.to_json_string())


class XLNetConfig(XLNetBaseConfig):
    """Configuration class to store the configuration of a `XLNetModel`.
    """
    def __init__(self,
                 vocab_size_or_config_json_file,
thomwolf's avatar
thomwolf committed
235
236
237
238
                 d_model=1024,
                 n_layer=24,
                 n_head=16,
                 d_inner=4096,
thomwolf's avatar
thomwolf committed
239
240
                 ff_activation="gelu",
                 untie_r=True,
thomwolf's avatar
thomwolf committed
241
                 attn_type="bi",
thomwolf's avatar
thomwolf committed
242
243
244

                 max_position_embeddings=512,
                 initializer_range=0.02,
thomwolf's avatar
thomwolf committed
245
246
247
248
249
250
251
252
253
254
255
256
                 layer_norm_eps=1e-12,

                 dropout=0.1,
                 dropatt=0.1,
                 init="normal",
                 init_range=0.1,
                 init_std=0.02,
                 mem_len=None,
                 reuse_len=None,
                 bi_data=False,
                 clamp_len=-1,
                 same_length=False):
thomwolf's avatar
thomwolf committed
257
258
259
260
261
262
263
264
265
266
267
268
269
        """Constructs XLNetConfig.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XLNetModel`.
            d_model: Size of the encoder layers and the pooler layer.
            n_layer: Number of hidden layers in the Transformer encoder.
            n_head: Number of attention heads for each attention layer in
                the Transformer encoder.
            d_inner: The size of the "intermediate" (i.e., feed-forward)
                layer in the Transformer encoder.
            ff_activation: The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            untie_r: untie relative position biases
thomwolf's avatar
thomwolf committed
270
            attn_type: 'bi' for XLNet, 'uni' for Transformer-XL
thomwolf's avatar
thomwolf committed
271
272
273
274
275
276
277
278
279
280
281

            dropout: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            dropatt: The dropout ratio for the attention
                probabilities.
            max_position_embeddings: The maximum sequence length that this model might
                ever be used with. Typically set this to something large just in case
                (e.g., 512 or 1024 or 2048).
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
            layer_norm_eps: The epsilon used by LayerNorm.
thomwolf's avatar
thomwolf committed
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297

            dropout: float, dropout rate.
            dropatt: float, dropout rate on attention probabilities.
            init: str, the initialization scheme, either "normal" or "uniform".
            init_range: float, initialize the parameters with a uniform distribution
                in [-init_range, init_range]. Only effective when init="uniform".
            init_std: float, initialize the parameters with a normal distribution
                with mean 0 and stddev init_std. Only effective when init="normal".
            mem_len: int, the number of tokens to cache.
            reuse_len: int, the number of tokens in the currect batch to be cached
                and reused in the future.
            bi_data: bool, whether to use bidirectional input pipeline.
                Usually set to True during pretraining and False during finetuning.
            clamp_len: int, clamp all relative distances larger than clamp_len.
                -1 means no clamping.
            same_length: bool, whether to use the same attention length for each token.
thomwolf's avatar
thomwolf committed
298
299
300
301
302
303
304
305
        """
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
thomwolf's avatar
thomwolf committed
306
            self.n_token = vocab_size_or_config_json_file
thomwolf's avatar
thomwolf committed
307
308
309
            self.d_model = d_model
            self.n_layer = n_layer
            self.n_head = n_head
thomwolf's avatar
thomwolf committed
310
311
            assert d_model % n_head == 0
            self.d_head = d_model // n_head
thomwolf's avatar
thomwolf committed
312
313
314
            self.ff_activation = ff_activation
            self.d_inner = d_inner
            self.untie_r = untie_r
thomwolf's avatar
thomwolf committed
315
            self.attn_type = attn_type
thomwolf's avatar
thomwolf committed
316

thomwolf's avatar
thomwolf committed
317
318
319
            self.max_position_embeddings = max_position_embeddings
            self.initializer_range = initializer_range
            self.layer_norm_eps = layer_norm_eps
thomwolf's avatar
thomwolf committed
320
321
322
323
324
325
326
327
328
329
330

            self.init = init
            self.init_range = init_range
            self.init_std = init_std
            self.dropout = dropout
            self.dropatt = dropatt
            self.mem_len = mem_len
            self.reuse_len = reuse_len
            self.bi_data = bi_data
            self.clamp_len = clamp_len
            self.same_length = same_length
thomwolf's avatar
thomwolf committed
331
332
333
334
335
336
337
338
339
340
341
342
        else:
            raise ValueError("First argument must be either a vocabulary size (int)"
                             "or the path to a pretrained model config file (str)")


class XLNetRunConfig(XLNetBaseConfig):
    """XLNetRunConfig contains hyperparameters that could be different
    between pretraining and finetuning.
    These hyperparameters can also be changed from run to run.
    We store them separately from XLNetConfig for flexibility.
    """
    def __init__(self, 
thomwolf's avatar
thomwolf committed
343
344
                 dropout=0.1,
                 dropatt=0.1,
thomwolf's avatar
thomwolf committed
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
                 init="normal",
                 init_range=0.1,
                 init_std=0.02,
                 mem_len=None,
                 reuse_len=None,
                 bi_data=False,
                 clamp_len=-1,
                 same_length=False):
        """
        Args:
        dropout: float, dropout rate.
        dropatt: float, dropout rate on attention probabilities.
        init: str, the initialization scheme, either "normal" or "uniform".
        init_range: float, initialize the parameters with a uniform distribution
            in [-init_range, init_range]. Only effective when init="uniform".
        init_std: float, initialize the parameters with a normal distribution
            with mean 0 and stddev init_std. Only effective when init="normal".
        mem_len: int, the number of tokens to cache.
        reuse_len: int, the number of tokens in the currect batch to be cached
            and reused in the future.
        bi_data: bool, whether to use bidirectional input pipeline.
            Usually set to True during pretraining and False during finetuning.
        clamp_len: int, clamp all relative distances larger than clamp_len.
            -1 means no clamping.
        same_length: bool, whether to use the same attention length for each token.
        """

        self.init = init
        self.init_range = init_range
        self.init_std = init_std
        self.dropout = dropout
        self.dropatt = dropatt
        self.mem_len = mem_len
        self.reuse_len = reuse_len
        self.bi_data = bi_data
        self.clamp_len = clamp_len
        self.same_length = same_length

try:
    from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm
except ImportError:
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
    class XLNetLayerNorm(nn.Module):
thomwolf's avatar
thomwolf committed
388
        def __init__(self, d_model, eps=1e-12):
thomwolf's avatar
thomwolf committed
389
390
391
            """Construct a layernorm module in the TF style (epsilon inside the square root).
            """
            super(XLNetLayerNorm, self).__init__()
thomwolf's avatar
thomwolf committed
392
393
            self.weight = nn.Parameter(torch.ones(d_model))
            self.bias = nn.Parameter(torch.zeros(d_model))
thomwolf's avatar
thomwolf committed
394
395
396
397
398
399
400
401
            self.variance_epsilon = eps

        def forward(self, x):
            u = x.mean(-1, keepdim=True)
            s = (x - u).pow(2).mean(-1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.variance_epsilon)
            return self.weight * x + self.bias

thomwolf's avatar
thomwolf committed
402
403
404
405
class XLNetRelativeAttention(nn.Module):
    def __init__(self, config, output_attentions=False, keep_multihead_output=False):
        super(XLNetRelativeAttention, self).__init__()
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
406
        if config.d_model % config.n_head != 0:
thomwolf's avatar
thomwolf committed
407
408
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
thomwolf's avatar
thomwolf committed
409
                "heads (%d)" % (config.d_model, config.n_head))
thomwolf's avatar
thomwolf committed
410
411
412
413
        self.output_attentions = output_attentions
        self.keep_multihead_output = keep_multihead_output
        self.multihead_output = None

thomwolf's avatar
thomwolf committed
414
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
415
416
417
418
419
420
421
422
423
424
425
426
427
        self.d_head = config.d_head
        self.d_model = config.d_model
        self.scale = 1 / (config.d_head ** 0.5)

        self.q = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.k = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.v = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.o = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))
        self.r = nn.Parameter(torch.Tensor(config.d_model, self.n_head, self.d_head))

        self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
        self.r_s_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
        self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
428
        self.seg_embed = nn.Parameter(torch.Tensor(2, self.n_head, self.d_head))
thomwolf's avatar
thomwolf committed
429

thomwolf's avatar
thomwolf committed
430
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
431
432
433
434
435
        self.dropout = nn.Dropout(config.dropout)

    def prune_heads(self, heads):
        raise NotImplementedError

thomwolf's avatar
thomwolf committed
436
437
438
439
440
441
442
443
444
445
446
447
    @staticmethod
    def rel_shift(x, klen=-1):
        """perform relative shift to form the relative attention score."""
        x_size = x.shape

        x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
        x = x[1:, ...]
        x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
        x = x[:, 0:klen, :, :]

        return x

thomwolf's avatar
thomwolf committed
448
449
450
451
452
453
454
455
    def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None):
        """Core relative positional attention operations."""

        # content based attention score
        ac = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_w_bias, k_head_h)

        # position based attention score
        bd = torch.einsum('ibnd,jbnd->ijbn', q_head + self.r_r_bias, k_head_r)
thomwolf's avatar
thomwolf committed
456
        bd = self.rel_shift(bd, klen=ac.shape[1])
thomwolf's avatar
thomwolf committed
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487

        # segment based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)
            ef = torch.einsum('ijbs,ibns->ijbn', seg_mat, ef)

        # merge attention scores and perform masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
            attn_score = attn_score - 1e30 * attn_mask

        # attention probability
        attn_prob = F.softmax(attn_score, dim=1)
        attn_prob = self.dropout(attn_prob)

        # attention output
        attn_vec = torch.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)

        return attn_vec

    def post_attention(self, h, attn_vec, residual=True):
        """Post-attention processing."""
        # post-attention projection (back to `d_model`)
        attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)

        attn_out = self.dropout(attn_out)
        if residual:
            attn_out = attn_out + h
thomwolf's avatar
thomwolf committed
488
        output = self.layer_norm(attn_out)
thomwolf's avatar
thomwolf committed
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559

        return output

    def forward(self, h, g,
                      attn_mask_h, attn_mask_g,
                      r, seg_mat,
                      mems=None, target_mapping=None, head_mask=None):
        if g is not None:
            ###### Two-stream attention with relative positional encoding.
            # content based attention score
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content-based key head
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)

            # content-based value head
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # position-based key head
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            ##### h-stream
            # content-stream query head
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)

            # core attention ops
            attn_vec_h = self.rel_attn_core(
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h)

            # post processing
            output_h = self.post_attention(h, attn_vec_h)

            ##### g-stream
            # query-stream query head
            q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q)

            # core attention ops
            if target_mapping is not None:
                q_head_g = torch.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
                attn_vec_g = self.rel_attn_core(
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g)
                attn_vec_g = torch.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
            else:
                attn_vec_g = self.rel_attn_core(
                    q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g)

            # post processing
            output_g = self.post_attention(g, attn_vec_g)
        else:
            ###### Multi-head attention with relative positional encoding
            if mems is not None and mems.dim() > 1:
                cat = torch.cat([mems, h], dim=0)
            else:
                cat = h

            # content heads
            q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q)
            k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k)
            v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v)

            # positional heads
            k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r)

            # core attention ops
            attn_vec = self.rel_attn_core(
                q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h)

            # post processing
thomwolf's avatar
thomwolf committed
560
561
            output_h = self.post_attention(h, attn_vec)
            output_g = None
thomwolf's avatar
thomwolf committed
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580


        # Mask heads if we want to
        # if head_mask is not None:
        #     attention_probs = attention_probs * head_mask

        # context_layer = torch.matmul(attention_probs, value_layer)
        # if self.keep_multihead_output:
        #     self.multihead_output = context_layer
        #     self.multihead_output.retain_grad()

        # context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        # new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        # context_layer = context_layer.view(*new_context_layer_shape)

        # if self.output_attentions:
        #     attentions, self_output = self_output
        # if self.output_attentions:
        #     return attentions, attention_output
thomwolf's avatar
thomwolf committed
581
        return output_h, output_g
thomwolf's avatar
thomwolf committed
582
583
584
585

class XLNetFeedForward(nn.Module):
    def __init__(self, config):
        super(XLNetFeedForward, self).__init__()
thomwolf's avatar
thomwolf committed
586
        self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
thomwolf's avatar
thomwolf committed
587
588
589
590
591
592
593
594
        self.layer_1 = nn.Linear(config.d_model, config.d_inner)
        self.layer_2 = nn.Linear(config.d_inner, config.d_model)
        self.dropout = nn.Dropout(config.dropout)
        if isinstance(config.ff_activation, str) or (sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)):
            self.activation_function = ACT2FN[config.ff_activation]
        else:
            self.activation_function = config.ff_activation

thomwolf's avatar
thomwolf committed
595
596
597
598
599
600
601
    def forward(self, inp):
        output = inp
        output = self.layer_1(output)
        output = self.activation_function(output)
        output = self.dropout(output)
        output = self.layer_2(output)
        output = self.dropout(output)
thomwolf's avatar
thomwolf committed
602
        output = self.layer_norm(output + inp)
thomwolf's avatar
thomwolf committed
603
        return output
thomwolf's avatar
thomwolf committed
604
605
606
607
608
609
610
611
612
613
614
615

class XLNetLayer(nn.Module):
    def __init__(self, config, output_attentions=False, keep_multihead_output=False):
        super(XLNetLayer, self).__init__()
        self.output_attentions = output_attentions
        self.rel_attn = XLNetRelativeAttention(config, output_attentions=output_attentions,
                                               keep_multihead_output=keep_multihead_output)
        self.ff = XLNetFeedForward(config)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, output_h, output_g,
                attn_mask_h, attn_mask_g,
thomwolf's avatar
thomwolf committed
616
617
                r, seg_mat,
                mems=None, target_mapping=None, head_mask=None):
thomwolf's avatar
thomwolf committed
618
619
620
621
        output_h, output_g = self.rel_attn(output_h, output_g,
                                           attn_mask_h, attn_mask_g,
                                           r, seg_mat,
                                           mems=mems, target_mapping=target_mapping, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
622
        if output_g is not None:
thomwolf's avatar
thomwolf committed
623
624
625
626
627
628
629
            output_g = self.ff(output_g)
        output_h = self.ff(output_h)

        # if self.output_attentions:
        #     return attentions, layer_output
        return output_h, output_g

thomwolf's avatar
thomwolf committed
630
631
632
633
634
635
class XLNetPreTrainedModel(nn.Module):
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
    def __init__(self, config, *inputs, **kwargs):
        super(XLNetPreTrainedModel, self).__init__()
thomwolf's avatar
thomwolf committed
636
        if not isinstance(config, XLNetBaseConfig):
thomwolf's avatar
thomwolf committed
637
            raise ValueError(
thomwolf's avatar
thomwolf committed
638
                "Parameter config in `{}(config)` should be an instance of class `XLNetBaseConfig`. "
thomwolf's avatar
thomwolf committed
639
640
641
642
643
644
                "To create a model from a Google pretrained model use "
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
                    self.__class__.__name__, self.__class__.__name__
                ))
        self.config = config

thomwolf's avatar
thomwolf committed
645
    def init_weights(self, module):
thomwolf's avatar
thomwolf committed
646
647
648
649
650
651
652
653
654
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, XLNetLayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
655
656
657
658
659
        elif isinstance(module, XLNetRelativeAttention):
            for param in [module.q, module.k, module.v, module.o, module.r,
                          module.r_r_bias, module.r_s_bias, module.r_w_bias,
                          module.seg_embed]:
                param.data.normal_(mean=0.0, std=self.config.initializer_range)
thomwolf's avatar
thomwolf committed
660
661
662
663
664
665
666
667
668
669
670
671
672
673
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
        """
        Instantiate a XLNetPreTrainedModel from a pre-trained model file or a pytorch state dict.
        Download and cache the pre-trained model file if needed.

        Params:
            pretrained_model_name_or_path: either:
                - a str with the name of a pre-trained model to load selected in the list of:
                    . `xlnet-large-cased`
                - a path or url to a pretrained model archive containing:
674
                    . `config.json` a configuration file for the model
thomwolf's avatar
thomwolf committed
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
                    . `pytorch_model.bin` a PyTorch dump of a XLNetForPreTraining instance
                - a path or url to a pretrained model archive containing:
                    . `xlnet_config.json` a configuration file for the model
                    . `model.chkpt` a TensorFlow checkpoint
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
            state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
            *inputs, **kwargs: additional input for the specific XLNet class
                (ex: num_labels for XLNetForSequenceClassification)
        """
        state_dict = kwargs.get('state_dict', None)
        kwargs.pop('state_dict', None)
        cache_dir = kwargs.get('cache_dir', None)
        kwargs.pop('cache_dir', None)
        from_tf = kwargs.get('from_tf', False)
        kwargs.pop('from_tf', None)

        if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
            config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
        else:
            if from_tf:
                # Directly load from a TensorFlow checkpoint
                archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME)
                config_file = os.path.join(pretrained_model_name_or_path, XLNET_CONFIG_NAME)
            else:
                archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
                config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
        # redirect to the cache, if necessary
        try:
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
        except EnvironmentError:
            if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
                logger.error(
                    "Couldn't reach server at '{}' to download pretrained weights.".format(
                        archive_file))
            else:
                logger.error(
                    "Model name '{}' was not found in model name list ({}). "
                    "We assumed '{}' was a path or url but couldn't find any file "
                    "associated to this path or url.".format(
                        pretrained_model_name_or_path,
                        ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
                        archive_file))
            return None
        try:
            resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
        except EnvironmentError:
            if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP:
                logger.error(
                    "Couldn't reach server at '{}' to download pretrained model configuration file.".format(
                        config_file))
            else:
                logger.error(
                    "Model name '{}' was not found in model name list ({}). "
                    "We assumed '{}' was a path or url but couldn't find any file "
                    "associated to this path or url.".format(
                        pretrained_model_name_or_path,
                        ', '.join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()),
                        config_file))
            return None
        if resolved_archive_file == archive_file and resolved_config_file == config_file:
            logger.info("loading weights file {}".format(archive_file))
            logger.info("loading configuration file {}".format(config_file))
        else:
            logger.info("loading weights file {} from cache at {}".format(
                archive_file, resolved_archive_file))
            logger.info("loading configuration file {} from cache at {}".format(
                config_file, resolved_config_file))
744

thomwolf's avatar
thomwolf committed
745
746
        # Load config
        config = XLNetConfig.from_json_file(resolved_config_file)
747
748

        # Update config with kwargs if needed
749
750
        to_remove = []
        for key, value in kwargs.items():
751
752
            if hasattr(config, key):
                setattr(config, key, value)
753
754
755
756
757
                to_remove.append(key)
        for key in to_remove:
            kwargs.pop(key, None)

        logger.info("Model config {}".format(config))
758

thomwolf's avatar
thomwolf committed
759
760
761
762
763
764
        # Instantiate model.
        model = cls(config, *inputs, **kwargs)
        if state_dict is None and not from_tf:
            state_dict = torch.load(resolved_archive_file, map_location='cpu')
        if from_tf:
            # Directly load from a TensorFlow checkpoint
765
766
            return load_tf_weights_in_xlnet(model, config, resolved_archive_file)

thomwolf's avatar
thomwolf committed
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
        # Load from a PyTorch state_dict
        missing_keys = []
        unexpected_keys = []
        error_msgs = []
        # copy state_dict so _load_from_state_dict can modify it
        metadata = getattr(state_dict, '_metadata', None)
        state_dict = state_dict.copy()
        if metadata is not None:
            state_dict._metadata = metadata

        def load(module, prefix=''):
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
            module._load_from_state_dict(
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
            for name, child in module._modules.items():
                if child is not None:
                    load(child, prefix + name + '.')
        start_prefix = ''
785
786
        if not hasattr(model, 'transformer') and any(s.startswith('transformer') for s in state_dict.keys()):
            start_prefix = 'transformer.'
thomwolf's avatar
thomwolf committed
787
788
789
790
791
792
793
794
795
796
        load(model, prefix=start_prefix)
        if len(missing_keys) > 0:
            logger.info("Weights of {} not initialized from pretrained model: {}".format(
                model.__class__.__name__, missing_keys))
        if len(unexpected_keys) > 0:
            logger.info("Weights from pretrained model not used in {}: {}".format(
                model.__class__.__name__, unexpected_keys))
        if len(error_msgs) > 0:
            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
                               model.__class__.__name__, "\n\t".join(error_msgs)))
797
798
        if isinstance(model, XLNetLMHeadModel):
            model.tie_weights()  # make sure word embedding weights are still tied
thomwolf's avatar
thomwolf committed
799
800
801
802
        return model


class XLNetModel(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
803
    def __init__(self, config, output_attentions=False, keep_multihead_output=False):
thomwolf's avatar
thomwolf committed
804
        super(XLNetModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
805
806
807
        self.output_attentions = output_attentions
        self.mem_len = config.mem_len
        self.reuse_len = config.reuse_len
thomwolf's avatar
thomwolf committed
808
809
810
811
812
        self.d_model = config.d_model
        self.same_length = config.same_length
        self.attn_type = config.attn_type
        self.bi_data = config.bi_data
        self.clamp_len = config.clamp_len
thomwolf's avatar
thomwolf committed
813

thomwolf's avatar
thomwolf committed
814
815
        self.word_embedding = nn.Embedding(config.n_token, config.d_model)
        self.mask_emb = nn.Parameter(torch.Tensor(1, 1, config.d_model))
thomwolf's avatar
thomwolf committed
816
817
818
819
        layer = XLNetLayer(config, output_attentions=output_attentions,
                                   keep_multihead_output=keep_multihead_output)
        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layer)])
        self.dropout = nn.Dropout(config.dropout)
thomwolf's avatar
thomwolf committed
820

thomwolf's avatar
thomwolf committed
821
822
823
824
825
826
827
828
829
830
831
832
833
    def prune_heads(self, heads_to_prune):
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        """
        for layer, heads in heads_to_prune.items():
            self.layer[layer].attention.prune_heads(heads)

    def get_multihead_outputs(self):
        """ Gather all multi-head outputs.
            Return: list (layers) of multihead module outputs with gradients
        """
        return [layer.attention.self.multihead_output for layer in self.layer]

thomwolf's avatar
thomwolf committed
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
    def create_mask(self, qlen, mlen):
        """ create causal attention mask.
            float mask where 1.0 indicate masked, 0.0 indicated not-masked.
             same_length=False:      same_length=True:
             <mlen > <  qlen >       <mlen > <  qlen >
          ^ [0 0 0 0 0 1 1 1 1]     [0 0 0 0 0 1 1 1 1]
            [0 0 0 0 0 0 1 1 1]     [1 0 0 0 0 0 1 1 1]
       qlen [0 0 0 0 0 0 0 1 1]     [1 1 0 0 0 0 0 1 1]
            [0 0 0 0 0 0 0 0 1]     [1 1 1 0 0 0 0 0 1]
          v [0 0 0 0 0 0 0 0 0]     [1 1 1 1 0 0 0 0 0]
        """
        attn_mask = torch.ones([qlen, qlen])
        mask_up = torch.triu(attn_mask, diagonal=1)
        attn_mask_pad = torch.zeros([qlen, mlen])
        ret = torch.cat([attn_mask_pad, mask_up], dim=1)
        if self.same_length:
            mask_lo = torch.tril(attn_mask, diagonal=-1)
            ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)

        ret = ret.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
        return ret

    def cache_mem(self, curr_out, prev_mem):
        """cache hidden states into memory."""
        if self.mem_len is None or self.mem_len == 0:
            return None
        else:
            if self.reuse_len is not None and self.reuse_len > 0:
                curr_out = curr_out[:self.reuse_len]

            if prev_mem is None:
                new_mem = curr_out[-self.mem_len:]
            else:
                new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len:]

        return new_mem.detach()

thomwolf's avatar
thomwolf committed
871
872
873
874
875
876
877
878
879
880
881
882
    @staticmethod
    def positional_embedding(pos_seq, inv_freq, bsz=None):
        sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq)
        pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
        pos_emb = pos_emb[:, None, :]

        if bsz is not None:
            pos_emb = pos_emb.expand(-1, bsz, -1)

        return pos_emb

    def relative_positional_encoding(self, qlen, klen, bsz=None):
thomwolf's avatar
thomwolf committed
883
        """create relative positional encoding."""
thomwolf's avatar
thomwolf committed
884
885
        freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
        inv_freq = 1 / (10000 ** (freq_seq / self.d_model))
thomwolf's avatar
thomwolf committed
886
887
888
889
890
891
892
893
894
895
896

        if self.attn_type == 'bi':
            # beg, end = klen - 1, -qlen
            beg, end = klen, -qlen
        elif self.attn_type == 'uni':
            # beg, end = klen - 1, -1
            beg, end = klen, -1
        else:
            raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))

        if self.bi_data:
thomwolf's avatar
thomwolf committed
897
898
            fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
            bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
thomwolf's avatar
thomwolf committed
899
900
901
902
903
904

            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
                bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)

            if bsz is not None:
thomwolf's avatar
thomwolf committed
905
906
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2)
thomwolf's avatar
thomwolf committed
907
            else:
thomwolf's avatar
thomwolf committed
908
909
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
thomwolf's avatar
thomwolf committed
910
911
912

            pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
        else:
thomwolf's avatar
thomwolf committed
913
            fwd_pos_seq = torch.arange(beg, end, -1.0)
thomwolf's avatar
thomwolf committed
914
915
            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
thomwolf's avatar
thomwolf committed
916
            pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
thomwolf's avatar
thomwolf committed
917

thomwolf's avatar
thomwolf committed
918
        pos_emb = pos_emb.to(next(self.parameters()))
thomwolf's avatar
thomwolf committed
919
920
        return pos_emb

921
    def forward(self, inp_k, token_type_ids=None, input_mask=None, attention_mask=None,
thomwolf's avatar
thomwolf committed
922
923
924
925
                mems=None, perm_mask=None, target_mapping=None, inp_q=None,
                output_all_encoded_layers=True, head_mask=None):
        """
        Args:
926
            inp_k: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
927
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
928
            input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask.
thomwolf's avatar
thomwolf committed
929
                0 for real tokens and 1 for padding.
930
931
932
933
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
thomwolf's avatar
thomwolf committed
934
            mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
thomwolf's avatar
thomwolf committed
935
936
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
937
938
939
            perm_mask: [optional] float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
thomwolf's avatar
thomwolf committed
940
                If None, each position attends to all the others.
941
942
            target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
thomwolf's avatar
thomwolf committed
943
944
945
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
946
            inp_q: [optional] float32 Tensor in shape [bsz, len].
thomwolf's avatar
thomwolf committed
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
                1 for tokens with losses and 0 for tokens without losses.
                Only used during pretraining for two-stream attention.
                Set to None during finetuning.

            mem_len: int, the number of tokens to cache.
            reuse_len: int, the number of tokens in the currect batch to be cached
                and reused in the future.
            bi_data: bool, whether to use bidirectional input pipeline.
                Usually set to True during pretraining and False during finetuning.
            clamp_len: int, clamp all relative distances larger than clamp_len.
                -1 means no clamping.
            same_length: bool, whether to use the same attention length for each token.
            summary_type: str, "last", "first", "mean", or "attn". The method
                to pool the input to get a vector representation.
        """
962
963
964
965
        # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
        # but we want a unified interface in the library with the batch size on the first dimension
        # so we move here the first dimension (batch) to the end
        inp_k = inp_k.transpose(0, 1).contiguous()
thomwolf's avatar
thomwolf committed
966
        token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
967
        input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
thomwolf's avatar
thomwolf committed
968
        attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
969
970
971
972
        perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
        target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
        inp_q = inp_q.transpose(0, 1).contiguous() if inp_q is not None else None

thomwolf's avatar
thomwolf committed
973
        qlen, bsz = inp_k.shape[0], inp_k.shape[1]
thomwolf's avatar
thomwolf committed
974
975
        mlen = mems[0].shape[0] if mems is not None else 0
        klen = mlen + qlen
thomwolf's avatar
thomwolf committed
976
977
978

        dtype_float = next(self.parameters()).dtype
        device = next(self.parameters()).device
thomwolf's avatar
thomwolf committed
979
980
981
982

        ##### Attention mask
        # causal attention mask
        if self.attn_type == 'uni':
thomwolf's avatar
thomwolf committed
983
            attn_mask = self.create_mask(qlen, mlen)
thomwolf's avatar
thomwolf committed
984
985
986
987
988
989
990
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == 'bi':
            attn_mask = None
        else:
            raise ValueError('Unsupported attention type: {}'.format(self.attn_type))

        # data mask: input mask & perm mask
991
992
993
994
995
996
997
998
999
        assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
        "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
        if input_mask is None and attention_mask is not None:
            input_mask = 1.0 - attention_mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask
        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
thomwolf's avatar
thomwolf committed
1000
1001
1002
1003
1004
1005
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
thomwolf's avatar
thomwolf committed
1006
            mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
thomwolf's avatar
thomwolf committed
1007
1008
1009
1010
1011
1012
1013
            data_mask = torch.cat([mems_mask, data_mask], dim=1)
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
1014
            attn_mask = (attn_mask > 0).to(dtype_float)
thomwolf's avatar
thomwolf committed
1015
1016

        if attn_mask is not None:
thomwolf's avatar
thomwolf committed
1017
1018
1019
            non_tgt_mask = -torch.eye(qlen).to(attn_mask)
            non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
            non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
thomwolf's avatar
thomwolf committed
1020
1021
1022
        else:
            non_tgt_mask = None

thomwolf's avatar
thomwolf committed
1023
1024
        ##### Word embeddings and prepare h & g hidden states
        word_emb_k = self.word_embedding(inp_k)
thomwolf's avatar
thomwolf committed
1025
1026
1027
        output_h = self.dropout(word_emb_k)
        if inp_q is not None:
            if target_mapping is not None:
1028
                word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
thomwolf's avatar
thomwolf committed
1029
1030
            else:
                inp_q_ext = inp_q[:, :, None]
1031
                word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
thomwolf's avatar
thomwolf committed
1032
1033
1034
1035
1036
            output_g = self.dropout(word_emb_q)
        else:
            output_g = None

        ##### Segment embedding
thomwolf's avatar
thomwolf committed
1037
1038
        if token_type_ids is not None:
            # Convert `token_type_ids` to one-hot `seg_mat`
thomwolf's avatar
thomwolf committed
1039
            mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
thomwolf's avatar
thomwolf committed
1040
            cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
thomwolf's avatar
thomwolf committed
1041
1042

            # `1` indicates not in the same segment [qlen x klen x bsz]
thomwolf's avatar
thomwolf committed
1043
            seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
thomwolf's avatar
thomwolf committed
1044
            seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
thomwolf's avatar
thomwolf committed
1045
1046
1047
1048
        else:
            seg_mat = None

        ##### Positional encoding
thomwolf's avatar
thomwolf committed
1049
        pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
thomwolf's avatar
thomwolf committed
1050
1051
1052
1053
1054
        pos_emb = self.dropout(pos_emb)

        ##### Head mask if needed (for bertology/pruning)
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
thomwolf's avatar
thomwolf committed
1055
1056
        # input head_mask has shape [num_heads] or [n_layer x num_heads]
        # and head_mask is converted to shape [n_layer x batch x num_heads x seq_length x seq_length]
thomwolf's avatar
thomwolf committed
1057
1058
1059
        if head_mask is not None:
            if head_mask.dim() == 1:
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
1060
                head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
thomwolf's avatar
thomwolf committed
1061
1062
1063
1064
            elif head_mask.dim() == 2:
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
        else:
thomwolf's avatar
thomwolf committed
1065
            head_mask = [None] * self.config.n_layer
thomwolf's avatar
thomwolf committed
1066
1067
1068
1069
1070

        new_mems = []
        if mems is None:
            mems = [None] * len(self.layer)

1071
        hidden_states = []
thomwolf's avatar
thomwolf committed
1072
1073
1074
1075
1076
        for i, layer_module in enumerate(self.layer):
            # cache new mems
            new_mems.append(self.cache_mem(output_h, mems[i]))

            output_h, output_g = layer_module(output_h, output_g,
thomwolf's avatar
thomwolf committed
1077
1078
                                              attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask,
                                              r=pos_emb, seg_mat=seg_mat,
thomwolf's avatar
thomwolf committed
1079
1080
                                              mems=mems[i], target_mapping=target_mapping,
                                              head_mask=head_mask)
1081
            hidden_states.append(output_h)
thomwolf's avatar
thomwolf committed
1082
1083
        output = self.dropout(output_g if output_g is not None else output_h)

1084
1085
1086
1087
1088
        # We transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
        output = output.permute(1, 0, 2).contiguous()
        hidden_states = [hs.permute(1, 0, 2).contiguous() for hs in hidden_states]

        return output, hidden_states, new_mems
thomwolf's avatar
thomwolf committed
1089
1090
1091


class XLNetLMHeadModel(XLNetPreTrainedModel):
thomwolf's avatar
thomwolf committed
1092
1093
1094
1095
1096
1097
1098
1099
1100
    """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding").

    Params:
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False

    Inputs:
1101
        inp_k: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
1102
        token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
1103
        input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask.
1104
            0 for real tokens and 1 for padding.
1105
1106
1107
1108
        attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
            but with 1 for real tokens and 0 for padding.
            Added for easy compatibility with the BERT model (which uses this negative masking).
            You can only uses one among `input_mask` and `attention_mask`
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
        mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
            from previous batches. The length of the list equals n_layer.
            If None, no memory is used.
        perm_mask: [optional] float32 Tensor in shape [bsz, len, len].
            If perm_mask[k, i, j] = 0, i attend to j in batch k;
            if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
            If None, each position attends to all the others.
        target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len].
            If target_mapping[k, i, j] = 1, the i-th predict in batch k is
            on the j-th token.
            Only used during pretraining for partial prediction.
            Set to None during finetuning.
        inp_q: [optional] float32 Tensor in shape [bsz, len].
            1 for tokens with losses and 0 for tokens without losses.
            Only used during pretraining for two-stream attention.
            Set to None during finetuning.
thomwolf's avatar
thomwolf committed
1125
1126
1127
1128
1129
1130


    Outputs: Tuple of (encoded_layers, pooled_output)
        `encoded_layers`: controled by `output_all_encoded_layers` argument:
            - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
                of each attention block (i.e. 12 full sequences for XLNet-base, 24 for XLNet-large), each
thomwolf's avatar
thomwolf committed
1131
                encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, d_model],
thomwolf's avatar
thomwolf committed
1132
            - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
thomwolf's avatar
thomwolf committed
1133
1134
                to the last attention block of shape [batch_size, sequence_length, d_model],
        `pooled_output`: a torch.FloatTensor of size [batch_size, d_model] which is the output of a
thomwolf's avatar
thomwolf committed
1135
1136
1137
1138
1139
1140
1141
            classifier pretrained on top of the hidden state associated to the first character of the
            input (`CLS`) to train on the Next-Sentence task (see XLNet's paper).

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
1142
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
1143
1144
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

thomwolf's avatar
thomwolf committed
1145
    config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768,
thomwolf's avatar
thomwolf committed
1146
        n_layer=12, num_attention_heads=12, intermediate_size=3072)
thomwolf's avatar
thomwolf committed
1147
1148

    model = modeling.XLNetModel(config=config)
1149
    all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
thomwolf's avatar
thomwolf committed
1150
1151
    ```
    """
thomwolf's avatar
thomwolf committed
1152
    def __init__(self, config, output_attentions=False, keep_multihead_output=False):
thomwolf's avatar
thomwolf committed
1153
        super(XLNetLMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
1154
        self.output_attentions = output_attentions
thomwolf's avatar
thomwolf committed
1155
1156
        self.attn_type = config.attn_type
        self.same_length = config.same_length
thomwolf's avatar
thomwolf committed
1157

thomwolf's avatar
thomwolf committed
1158
1159
        self.transformer = XLNetModel(config, output_attentions=output_attentions,
                                              keep_multihead_output=keep_multihead_output)
thomwolf's avatar
thomwolf committed
1160
        self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True)
thomwolf's avatar
thomwolf committed
1161

thomwolf's avatar
thomwolf committed
1162
1163
        # Tie weights

thomwolf's avatar
thomwolf committed
1164
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
1165
        self.tie_weights()
thomwolf's avatar
thomwolf committed
1166

thomwolf's avatar
thomwolf committed
1167
1168
    def tie_weights(self):
        """ Make sure we are sharing the embeddings
thomwolf's avatar
thomwolf committed
1169
        """
thomwolf's avatar
thomwolf committed
1170
        self.lm_loss.weight = self.transformer.word_embedding.weight
thomwolf's avatar
thomwolf committed
1171

1172
    def forward(self, inp_k, token_type_ids=None, input_mask=None, attention_mask=None,
thomwolf's avatar
thomwolf committed
1173
                mems=None, perm_mask=None, target_mapping=None, inp_q=None,
thomwolf's avatar
thomwolf committed
1174
                target=None, output_all_encoded_layers=True, head_mask=None):
thomwolf's avatar
thomwolf committed
1175
1176
        """
        Args:
1177
            inp_k: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
1178
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
1179
            input_mask: float32 Tensor in shape [bsz, len], the input mask.
thomwolf's avatar
thomwolf committed
1180
                0 for real tokens and 1 for padding.
1181
1182
1183
1184
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
thomwolf's avatar
thomwolf committed
1185
1186
1187
            mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
1188
1189
1190
            perm_mask: float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
thomwolf's avatar
thomwolf committed
1191
                If None, each position attends to all the others.
1192
1193
            target_mapping: float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
thomwolf's avatar
thomwolf committed
1194
1195
1196
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
1197
            inp_q: float32 Tensor in shape [bsz, len].
thomwolf's avatar
thomwolf committed
1198
1199
1200
1201
1202
1203
1204
                1 for tokens with losses and 0 for tokens without losses.
                Only used during pretraining for two-stream attention.
                Set to None during finetuning.

            summary_type: str, "last", "first", "mean", or "attn". The method
                to pool the input to get a vector representation.
        """
1205
        output, hidden_states, new_mems = self.transformer(inp_k, token_type_ids, input_mask, attention_mask,
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
                                            mems, perm_mask, target_mapping, inp_q,
                                            output_all_encoded_layers, head_mask)

        logits = self.lm_loss(output)

        if target is not None:
            # Flatten the tokens
            loss_fct = CrossEntropyLoss(ignore_index=-1)
            loss = loss_fct(logits.view(-1, logits.size(-1)),
                            target.view(-1))
            return loss, new_mems

        # if self.output_attentions:
        #     all_attentions, encoded_layers = encoded_layers
        # sequence_output = encoded_layers[-1]
        # pooled_output = self.pooler(sequence_output)
        # if not output_all_encoded_layers:
        #     encoded_layers = encoded_layers[-1]
        # if self.output_attentions:
        return logits, new_mems
        #     return all_attentions, encoded_layers, pooled_output

1228
1229
1230
1231
1232
1233
class XLNetSequenceSummary(nn.Module):
    def __init__(self, config, summary_type="last", use_proj=True,
                 output_attentions=False, keep_multihead_output=False):
        super(XLNetSequenceSummary, self).__init__()
        self.summary_type = summary_type
        if use_proj:
thomwolf's avatar
thomwolf committed
1234
            self.summary = nn.Linear(config.d_model, config.d_model)
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
        else:
            self.summary = None
        if summary_type == 'attn':
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError
        self.dropout = nn.Dropout(config.dropout)
        self.activation = nn.Tanh()

thomwolf's avatar
thomwolf committed
1245
1246
    def forward(self, hidden_states):
        """ hidden_states: float Tensor in shape [bsz, seq_len, d_model], the hidden-states of the last layer."""
1247
        if self.summary_type == 'last':
thomwolf's avatar
thomwolf committed
1248
            output = hidden_states[:, -1]
1249
        elif self.summary_type == 'first':
thomwolf's avatar
thomwolf committed
1250
            output = hidden_states[:, 0]
1251
        elif self.summary_type == 'mean':
thomwolf's avatar
thomwolf committed
1252
            output = hidden_states.mean(dim=1)
1253
1254
1255
1256
1257
        elif summary_type == 'attn':
            raise NotImplementedError

        output = self.summary(output)
        output = self.activation(output)
thomwolf's avatar
thomwolf committed
1258
        output = self.dropout(output)
1259
1260
        return output

1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274

class XLNetForSequenceClassification(XLNetPreTrainedModel):
    """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding").

    Params:
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
        `summary_type`: str, "last", "first", "mean", or "attn". The method
            to pool the input to get a vector representation. Default: last

    Inputs:
        inp_k: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
1275
        token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
1276
        input_mask: float32 Tensor in shape [bsz, len], the input mask.
1277
            0 for real tokens and 1 for padding.
1278
1279
1280
1281
        attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
            but with 1 for real tokens and 0 for padding.
            Added for easy compatibility with the BERT model (which uses this negative masking).
            You can only uses one among `input_mask` and `attention_mask`
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
        mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
            from previous batches. The length of the list equals n_layer.
            If None, no memory is used.
        perm_mask: float32 Tensor in shape [bsz, len, len].
            If perm_mask[k, i, j] = 0, i attend to j in batch k;
            if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
            If None, each position attends to all the others.
        target_mapping: float32 Tensor in shape [bsz, num_predict, len].
            If target_mapping[k, i, j] = 1, the i-th predict in batch k is
            on the j-th token.
            Only used during pretraining for partial prediction.
            Set to None during finetuning.
        inp_q: float32 Tensor in shape [bsz, len].
            1 for tokens with losses and 0 for tokens without losses.
            Only used during pretraining for two-stream attention.
            Set to None during finetuning.
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.


    Outputs: Tuple of (logits or loss, mems)
        `logits or loss`:
            if target is None:
                Token logits with shape [batch_size, sequence_length] 
            else:
                CrossEntropy loss with the targets
        `new_mems`: list (num layers) of updated mem states at the entry of each layer
            each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
            Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
1316
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
1317
1318
1319
1320
1321
1322
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

    config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768,
        n_layer=12, num_attention_heads=12, intermediate_size=3072)

    model = modeling.XLNetModel(config=config)
1323
    all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
1324
1325
    ```
    """
1326
1327
    def __init__(self, config, summary_type="last", use_proj=True, num_labels=2,
                 is_regression=False, output_attentions=False, keep_multihead_output=False):
1328
1329
1330
1331
1332
        super(XLNetForSequenceClassification, self).__init__(config)
        self.output_attentions = output_attentions
        self.attn_type = config.attn_type
        self.same_length = config.same_length
        self.summary_type = summary_type
1333
        self.is_regression = is_regression
1334
1335
1336
1337

        self.transformer = XLNetModel(config, output_attentions=output_attentions,
                                              keep_multihead_output=keep_multihead_output)

1338
1339
1340
        self.sequence_summary = XLNetSequenceSummary(config, summary_type=summary_type,
                                                     use_proj=use_proj, output_attentions=output_attentions,
                                                     keep_multihead_output=keep_multihead_output)
thomwolf's avatar
thomwolf committed
1341
1342
        self.loss_proj = nn.Linear(config.d_model, num_labels if not is_regression else 1)
        self.apply(self.init_weights)
1343

1344
    def forward(self, inp_k, token_type_ids=None, input_mask=None, attention_mask=None,
1345
1346
1347
1348
1349
                mems=None, perm_mask=None, target_mapping=None, inp_q=None,
                target=None, output_all_encoded_layers=True, head_mask=None):
        """
        Args:
            inp_k: int32 Tensor in shape [bsz, len], the input token IDs.
thomwolf's avatar
thomwolf committed
1350
            token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs.
1351
            input_mask: float32 Tensor in shape [bsz, len], the input mask.
1352
                0 for real tokens and 1 for padding.
1353
1354
1355
1356
            attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
                but with 1 for real tokens and 0 for padding.
                Added for easy compatibility with the BERT model (which uses this negative masking).
                You can only uses one among `input_mask` and `attention_mask`
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
            mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory
                from previous batches. The length of the list equals n_layer.
                If None, no memory is used.
            perm_mask: float32 Tensor in shape [bsz, len, len].
                If perm_mask[k, i, j] = 0, i attend to j in batch k;
                if perm_mask[k, i, j] = 1, i does not attend to j in batch k.
                If None, each position attends to all the others.
            target_mapping: float32 Tensor in shape [bsz, num_predict, len].
                If target_mapping[k, i, j] = 1, the i-th predict in batch k is
                on the j-th token.
                Only used during pretraining for partial prediction.
                Set to None during finetuning.
            inp_q: float32 Tensor in shape [bsz, len].
                1 for tokens with losses and 0 for tokens without losses.
                Only used during pretraining for two-stream attention.
                Set to None during finetuning.
        """
1374
        output, _, new_mems = self.transformer(inp_k, token_type_ids, input_mask, attention_mask,
thomwolf's avatar
thomwolf committed
1375
1376
                                            mems, perm_mask, target_mapping, inp_q,
                                            output_all_encoded_layers, head_mask)
thomwolf's avatar
thomwolf committed
1377

1378
1379
        output = self.sequence_summary(output)
        logits = self.loss_proj(output)
thomwolf's avatar
thomwolf committed
1380

thomwolf's avatar
thomwolf committed
1381
        if target is not None:
1382
1383
1384
1385
1386
1387
            if self.is_regression:
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), target.view(-1))
            else:
                loss_fct = CrossEntropyLoss(ignore_index=-1)
                loss = loss_fct(logits.view(-1, logits.size(-1)), target.view(-1))
thomwolf's avatar
thomwolf committed
1388
1389
            return loss, new_mems

thomwolf's avatar
thomwolf committed
1390
1391
1392
1393
1394
1395
1396
        # if self.output_attentions:
        #     all_attentions, encoded_layers = encoded_layers
        # sequence_output = encoded_layers[-1]
        # pooled_output = self.pooler(sequence_output)
        # if not output_all_encoded_layers:
        #     encoded_layers = encoded_layers[-1]
        # if self.output_attentions:
thomwolf's avatar
thomwolf committed
1397
        return logits, new_mems
thomwolf's avatar
thomwolf committed
1398
        #     return all_attentions, encoded_layers, pooled_output
thomwolf's avatar
thomwolf committed
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413

class XLNetForQuestionAnswering(XLNetPreTrainedModel):
    """XLNet model for Question Answering (span extraction).
    This module is composed of the XLNet model with a linear layer on top of
    the sequence output that computes start_logits and end_logits

    Params:
        `config`: a XLNetConfig class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
1414
            `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`)
thomwolf's avatar
thomwolf committed
1415
1416
1417
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
            a `sentence B` token (see XLNet paper for more details).
1418
1419
1420
1421
1422
        `attention_mask`: [optional] float32 Tensor, SAME FUNCTION as `input_mask`
            but with 1 for real tokens and 0 for padding.
            Added for easy compatibility with the BERT model (which uses this negative masking).
            You can only uses one among `input_mask` and `attention_mask`
        `input_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
thomwolf's avatar
thomwolf committed
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
            input sequence length in the current batch. It's the mask that we typically use for attention when
            a batch has varying length sentences.
        `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
        `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
            Positions are clamped to the length of the sequence and position outside of the sequence are not taken
            into account for computing the loss.
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.

    Outputs:
        if `start_positions` and `end_positions` are not `None`:
            Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
        if `start_positions` or `end_positions` is `None`:
            Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
            position tokens of shape [batch_size, sequence_length].

    Example usage:
    ```python
    # Already been converted into WordPiece token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
1446
    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
thomwolf's avatar
thomwolf committed
1447
1448
1449
1450
1451
1452
    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])

    config = XLNetConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)

    model = XLNetForQuestionAnswering(config)
1453
    start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
thomwolf's avatar
thomwolf committed
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
    ```
    """
    def __init__(self, config, output_attentions=False, keep_multihead_output=False):
        super(XLNetForQuestionAnswering, self).__init__(config)
        self.output_attentions = output_attentions
        self.transformer = XLNetModel(config, output_attentions=output_attentions,
                                      keep_multihead_output=keep_multihead_output)
        self.qa_outputs = nn.Linear(config.hidden_size, 2)
        self.apply(self.init_weights)

1464
    def forward(self, inp_k, token_type_ids=None, input_mask=None, attention_mask=None,
thomwolf's avatar
thomwolf committed
1465
1466
1467
                mems=None, perm_mask=None, target_mapping=None, inp_q=None,
                start_positions=None, end_positions=None,
                output_all_encoded_layers=True, head_mask=None):
1468
        output, _, new_mems = self.transformer(inp_k, token_type_ids, input_mask, attention_mask,
thomwolf's avatar
thomwolf committed
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
                                            mems, perm_mask, target_mapping, inp_q,
                                            output_all_encoded_layers, head_mask)

        logits = self.qa_outputs(output)
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.size(1)
            start_positions.clamp_(0, ignored_index)
            end_positions.clamp_(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
            return total_loss
        elif self.output_attentions:
            return all_attentions, start_logits, end_logits
        return start_logits, end_logits