modeling_gpt2.py 36.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""

18
19
from __future__ import absolute_import, division, print_function, unicode_literals

thomwolf's avatar
thomwolf committed
20
21
22
23
24
25
26
27
28
29
30
31
32
import collections
import json
import logging
import math
import os
import sys
from io import open

import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter

33
from .modeling_utils import (Conv1D, CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig,
34
35
                             PreTrainedModel, prune_conv1d_layer, SequenceSummary,
                             add_start_docstrings)
thomwolf's avatar
thomwolf committed
36
from .modeling_bert import BertLayerNorm as LayerNorm
thomwolf's avatar
thomwolf committed
37
38
39

logger = logging.getLogger(__name__)

40
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
41
42
                                     "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
                                     "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin"}
43
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json",
thomwolf's avatar
thomwolf committed
44
45
                                      "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json",
                                      "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-config.json"}
thomwolf's avatar
thomwolf committed
46

47
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
thomwolf's avatar
thomwolf committed
48
49
50
51
52
53
54
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import re
        import numpy as np
        import tensorflow as tf
    except ImportError:
Kevin Trebing's avatar
Kevin Trebing committed
55
        logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
thomwolf's avatar
thomwolf committed
56
57
58
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    tf_path = os.path.abspath(gpt2_checkpoint_path)
thomwolf's avatar
thomwolf committed
59
    logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
thomwolf's avatar
thomwolf committed
60
61
62
63
64
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
thomwolf's avatar
thomwolf committed
65
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
thomwolf's avatar
thomwolf committed
66
67
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
thomwolf's avatar
thomwolf committed
68
        arrays.append(array.squeeze())
thomwolf's avatar
thomwolf committed
69
70

    for name, array in zip(names, arrays):
thomwolf's avatar
thomwolf committed
71
        name = name[6:]  # skip "model/"
thomwolf's avatar
thomwolf committed
72
73
74
        name = name.split('/')
        pointer = model
        for m_name in name:
thomwolf's avatar
thomwolf committed
75
76
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
thomwolf's avatar
thomwolf committed
77
78
79
80
81
82
            else:
                l = [m_name]
            if l[0] == 'w' or l[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'b':
                pointer = getattr(pointer, 'bias')
thomwolf's avatar
thomwolf committed
83
84
85
            elif l[0] == 'wpe' or l[0] == 'wte':
                pointer = getattr(pointer, l[0])
                pointer = getattr(pointer, 'weight')
thomwolf's avatar
thomwolf committed
86
87
88
89
90
91
92
93
94
95
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
thomwolf's avatar
thomwolf committed
96
        logger.info("Initialize PyTorch weight {}".format(name))
thomwolf's avatar
thomwolf committed
97
98
99
100
101
102
103
104
        pointer.data = torch.from_numpy(array)
    return model


def gelu(x):
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


105
class GPT2Config(PretrainedConfig):
thomwolf's avatar
thomwolf committed
106
    """Configuration class to store the configuration of a `GPT2Model`.
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123

    Args:
        vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
        n_positions: Number of positional embeddings.
        n_ctx: Size of the causal mask (usually same as n_positions).
        n_embd: Dimensionality of the embeddings and hidden states.
        n_layer: Number of hidden layers in the Transformer encoder.
        n_head: Number of attention heads for each attention layer in
            the Transformer encoder.
        layer_norm_epsilon: epsilon to use in the layer norm layers
        resid_pdrop: The dropout probabilitiy for all fully connected
            layers in the embeddings, encoder, and pooler.
        attn_pdrop: The dropout ratio for the attention
            probabilities.
        embd_pdrop: The dropout ratio for the embeddings.
        initializer_range: The sttdev of the truncated_normal_initializer for
            initializing all weight matrices.
thomwolf's avatar
thomwolf committed
124
    """
125
    pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
thomwolf's avatar
thomwolf committed
126
127
128

    def __init__(
        self,
thomwolf's avatar
thomwolf committed
129
        vocab_size_or_config_json_file=50257,
thomwolf's avatar
thomwolf committed
130
131
132
133
134
        n_positions=1024,
        n_ctx=1024,
        n_embd=768,
        n_layer=12,
        n_head=12,
135
136
137
        resid_pdrop=0.1,
        embd_pdrop=0.1,
        attn_pdrop=0.1,
thomwolf's avatar
thomwolf committed
138
139
        layer_norm_epsilon=1e-5,
        initializer_range=0.02,
thomwolf's avatar
thomwolf committed
140
141

        num_labels=1,
thomwolf's avatar
thomwolf committed
142
        summary_type='cls_index',
thomwolf's avatar
thomwolf committed
143
144
        summary_use_proj=True,
        summary_activation=None,
thomwolf's avatar
thomwolf committed
145
        summary_proj_to_labels=True,
146
        summary_first_dropout=0.1,
thomwolf's avatar
thomwolf committed
147
        **kwargs
thomwolf's avatar
thomwolf committed
148
149
150
151
152
153
154
155
156
157
158
159
    ):
        """Constructs GPT2Config.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
            n_positions: Number of positional embeddings.
            n_ctx: Size of the causal mask (usually same as n_positions).
            n_embd: Dimensionality of the embeddings and hidden states.
            n_layer: Number of hidden layers in the Transformer encoder.
            n_head: Number of attention heads for each attention layer in
                the Transformer encoder.
            layer_norm_epsilon: epsilon to use in the layer norm layers
160
161
162
163
164
            resid_pdrop: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attn_pdrop: The dropout ratio for the attention
                probabilities.
            embd_pdrop: The dropout ratio for the embeddings.
thomwolf's avatar
thomwolf committed
165
166
167
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
        """
thomwolf's avatar
thomwolf committed
168
169
        super(GPT2Config, self).__init__(**kwargs)

thomwolf's avatar
thomwolf committed
170
171
172
173
174
175
176
177
178
179
180
181
182
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.n_ctx = n_ctx
            self.n_positions = n_positions
            self.n_embd = n_embd
            self.n_layer = n_layer
            self.n_head = n_head
183
184
185
            self.resid_pdrop = resid_pdrop
            self.embd_pdrop = embd_pdrop
            self.attn_pdrop = attn_pdrop
thomwolf's avatar
thomwolf committed
186
187
            self.layer_norm_epsilon = layer_norm_epsilon
            self.initializer_range = initializer_range
thomwolf's avatar
thomwolf committed
188
189

            self.num_labels = num_labels
thomwolf's avatar
thomwolf committed
190
191
192
            self.summary_type = summary_type
            self.summary_use_proj = summary_use_proj
            self.summary_activation = summary_activation
193
            self.summary_first_dropout = summary_first_dropout
thomwolf's avatar
thomwolf committed
194
            self.summary_proj_to_labels = summary_proj_to_labels
thomwolf's avatar
thomwolf committed
195
196
197
198
199
200
        else:
            raise ValueError(
                "First argument must be either a vocabulary size (int)"
                "or the path to a pretrained model config file (str)"
            )

201
202
203
204
    @property
    def max_position_embeddings(self):
        return self.n_positions

thomwolf's avatar
thomwolf committed
205
206
207
208
209
210
211
212
213
214
215
216
217
    @property
    def hidden_size(self):
        return self.n_embd

    @property
    def num_attention_heads(self):
        return self.n_head

    @property
    def num_hidden_layers(self):
        return self.n_layer


thomwolf's avatar
thomwolf committed
218
219

class Attention(nn.Module):
thomwolf's avatar
thomwolf committed
220
    def __init__(self, nx, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
221
        super(Attention, self).__init__()
thomwolf's avatar
thomwolf committed
222
223
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
224
225
226
227
228
229
230
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
        assert n_state % config.n_head == 0
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
        self.n_head = config.n_head
        self.split_size = n_state
        self.scale = scale
231

thomwolf's avatar
thomwolf committed
232
233
        self.c_attn = Conv1D(n_state * 3, nx)
        self.c_proj = Conv1D(n_state, nx)
234
235
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
236
        self.pruned_heads = set()
thomwolf's avatar
thomwolf committed
237

238
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
239
240
        if len(heads) == 0:
            return
241
        mask = torch.ones(self.n_head, self.split_size // self.n_head)
242
        heads = set(heads) - self.pruned_heads  # Convert to set and emove already pruned heads
243
        for head in heads:
244
245
            # Compute how many pruned heads are before the head and move the index accordingly
            head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
246
247
248
249
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
250

251
252
253
        # Prune conv1d layers
        self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
        self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
254

255
256
257
        # Update hyper params
        self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
        self.n_head = self.n_head - len(heads)
258
        self.pruned_heads = self.pruned_heads.union(heads)
259
260

    def _attn(self, q, k, v, head_mask=None):
thomwolf's avatar
thomwolf committed
261
262
263
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))
thomwolf's avatar
thomwolf committed
264
265
        nd, ns = w.size(-2), w.size(-1)
        b = self.bias[:, :, ns-nd:ns, :ns]
266
        w = w * b - 1e4 * (1 - b)
thomwolf's avatar
thomwolf committed
267
268

        w = nn.Softmax(dim=-1)(w)
269
        w = self.attn_dropout(w)
270
271
272
273
274

        # Mask heads if we want to
        if head_mask is not None:
            w = w * head_mask

thomwolf's avatar
thomwolf committed
275
        outputs = [torch.matmul(w, v)]
thomwolf's avatar
thomwolf committed
276
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
277
278
            outputs.append(w)
        return outputs
thomwolf's avatar
thomwolf committed
279
280
281
282
283
284
285
286
287
288

    def merge_heads(self, x):
        x = x.permute(0, 2, 1, 3).contiguous()
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states

    def split_heads(self, x, k=False):
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
        if k:
thomwolf's avatar
thomwolf committed
289
            return x.permute(0, 2, 3, 1)  # (batch, head, head_features, seq_length)
thomwolf's avatar
thomwolf committed
290
        else:
thomwolf's avatar
thomwolf committed
291
            return x.permute(0, 2, 1, 3)  # (batch, head, seq_length, head_features)
thomwolf's avatar
thomwolf committed
292

293
    def forward(self, x, layer_past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
294
295
296
297
298
        x = self.c_attn(x)
        query, key, value = x.split(self.split_size, dim=2)
        query = self.split_heads(query)
        key = self.split_heads(key, k=True)
        value = self.split_heads(value)
thomwolf's avatar
thomwolf committed
299
        if layer_past is not None:
thomwolf's avatar
thomwolf committed
300
            past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1]  # transpose back cf below
thomwolf's avatar
thomwolf committed
301
            key = torch.cat((past_key, key), dim=-1)
thomwolf's avatar
thomwolf committed
302
            value = torch.cat((past_value, value), dim=-2)
thomwolf's avatar
thomwolf committed
303
        present = torch.stack((key.transpose(-2, -1), value))  # transpose to have same shapes for stacking
304

thomwolf's avatar
thomwolf committed
305
306
        attn_outputs = self._attn(query, key, value, head_mask)
        a = attn_outputs[0]
307

thomwolf's avatar
thomwolf committed
308
309
        a = self.merge_heads(a)
        a = self.c_proj(a)
310
        a = self.resid_dropout(a)
thomwolf's avatar
thomwolf committed
311
312
313

        outputs = [a, present] + attn_outputs[1:]
        return outputs  # a, present, (attentions)
thomwolf's avatar
thomwolf committed
314
315
316
317
318
319
320
321
322


class MLP(nn.Module):
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
        super(MLP, self).__init__()
        nx = config.n_embd
        self.c_fc = Conv1D(n_state, nx)
        self.c_proj = Conv1D(nx, n_state)
        self.act = gelu
323
        self.dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
324
325
326
327

    def forward(self, x):
        h = self.act(self.c_fc(x))
        h2 = self.c_proj(h)
328
        return self.dropout(h2)
thomwolf's avatar
thomwolf committed
329
330
331


class Block(nn.Module):
thomwolf's avatar
thomwolf committed
332
    def __init__(self, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
333
334
335
        super(Block, self).__init__()
        nx = config.n_embd
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
336
        self.attn = Attention(nx, n_ctx, config, scale)
thomwolf's avatar
thomwolf committed
337
338
339
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
        self.mlp = MLP(4 * nx, config)

340
341
    def forward(self, x, layer_past=None, head_mask=None):
        output_attn = self.attn(self.ln_1(x), layer_past=layer_past, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
342
343
        a = output_attn[0]  # output_attn: a, present, (attentions)

thomwolf's avatar
thomwolf committed
344
        x = x + a
thomwolf's avatar
thomwolf committed
345
        m = self.mlp(self.ln_2(x))
thomwolf's avatar
thomwolf committed
346
        x = x + m
thomwolf's avatar
thomwolf committed
347
348
349

        outputs = [x] + output_attn[1:]
        return outputs  # x, present, (attentions)
thomwolf's avatar
thomwolf committed
350
351


352
class GPT2PreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
353
354
355
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
356
    config_class = GPT2Config
357
    pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
358
359
    load_tf_weights = load_tf_weights_in_gpt2
    base_model_prefix = "transformer"
thomwolf's avatar
thomwolf committed
360

361
362
363
    def __init__(self, *inputs, **kwargs):
        super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs)

364
    def _init_weights(self, module):
thomwolf's avatar
thomwolf committed
365
366
        """ Initialize the weights.
        """
367
        if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
thomwolf's avatar
thomwolf committed
368
369
370
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
371
372
            if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
                module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
373
374
375
376
377
        elif isinstance(module, LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)


thomwolf's avatar
thomwolf committed
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
GPT2_START_DOCSTRING = r"""    OpenAI GPT-2 model was proposed in
    `Language Models are Unsupervised Multitask Learners`_
    by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
    It's a causal (unidirectional) transformer pre-trained using  language modeling on a very large
    corpus of ~40 GB of text data.

    This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
    refer to the PyTorch documentation for all matter related to general usage and behavior.

    .. _`Language Models are Unsupervised Multitask Learners`:
        https://openai.com/blog/better-language-models/

    .. _`torch.nn.Module`:
        https://pytorch.org/docs/stable/nn.html#module

    Parameters:
thomwolf's avatar
thomwolf committed
394
        config (:class:`~pytorch_transformers.GPT2Config`): Model configuration class with all the parameters of the model.
395
396
            Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
thomwolf's avatar
thomwolf committed
397
398
"""

thomwolf's avatar
thomwolf committed
399
GPT2_INPUTS_DOCSTRING = r"""    Inputs:
thomwolf's avatar
thomwolf committed
400
401
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
thomwolf's avatar
thomwolf committed
402
403
            GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
            the right rather than the left.
thomwolf's avatar
thomwolf committed
404
405
406
407
408
            Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of positions of each input sequence tokens in the position embeddings.
LysandreJik's avatar
LysandreJik committed
409
            Selected in the range ``[0, config.max_position_embeddings - 1]``.
thomwolf's avatar
thomwolf committed
410
411
412
413
414
415
416
417
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The embeddings from these tokens will be summed with the respective token embeddings.
            Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
        **past**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
            (see `past` output below). Can be used to speed up sequential decoding.
thomwolf's avatar
thomwolf committed
418
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
419
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
420
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
421
422
423
424
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.",
thomwolf's avatar
thomwolf committed
425
                      GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
426
class GPT2Model(GPT2PreTrainedModel):
427
    r"""
thomwolf's avatar
thomwolf committed
428
429
430
431
432
433
434
435
436
437
438
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
            Sequence of hidden-states at the last layer of the model.
        **past**:
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            that contains pre-computed hidden-states (key and values in the attention blocks).
            Can be used (see `past` input) to speed up sequential decoding.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
439
440
441
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
442
443
444

    Examples::

445
446
447
448
449
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2Model.from_pretrained('gpt2')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
thomwolf's avatar
thomwolf committed
450
451

    """
thomwolf's avatar
thomwolf committed
452
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
453
        super(GPT2Model, self).__init__(config)
thomwolf's avatar
thomwolf committed
454
455
456
        self.output_hidden_states = config.output_hidden_states
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
457
        self.wte = nn.Embedding(config.vocab_size, config.n_embd)
thomwolf's avatar
thomwolf committed
458
        self.wpe = nn.Embedding(config.n_positions, config.n_embd)
459
        self.drop = nn.Dropout(config.embd_pdrop)
460
        self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
461
        self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
462

463
        self.init_weights()
thomwolf's avatar
thomwolf committed
464

thomwolf's avatar
thomwolf committed
465
466
    def _resize_token_embeddings(self, new_num_tokens):
        self.wte = self._get_resized_embeddings(self.wte, new_num_tokens)
thomwolf's avatar
thomwolf committed
467
        return self.wte
thomwolf's avatar
thomwolf committed
468

thomwolf's avatar
thomwolf committed
469
    def _prune_heads(self, heads_to_prune):
470
471
472
473
474
475
476
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        """
        for layer, heads in heads_to_prune.items():
            self.h[layer].attn.prune_heads(heads)

    def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
477
        if past is None:
thomwolf's avatar
thomwolf committed
478
            past_length = 0
thomwolf's avatar
thomwolf committed
479
            past = [None] * len(self.h)
thomwolf's avatar
thomwolf committed
480
        else:
thomwolf's avatar
thomwolf committed
481
            past_length = past[0][0].size(-2)
thomwolf's avatar
thomwolf committed
482
483
484
485
        if position_ids is None:
            position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)

486
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
487
        # 1.0 in head_mask indicate we keep the head
488
        # attention_probs has shape bsz x n_heads x N x N
489
        # head_mask has shape n_layer x batch x n_heads x N x N
490
491
        if head_mask is not None:
            if head_mask.dim() == 1:
492
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
493
                head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
494
            elif head_mask.dim() == 2:
495
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
496
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
497
498
        else:
            head_mask = [None] * self.config.n_layer
499

thomwolf's avatar
thomwolf committed
500
501
502
503
504
505
506
507
508
509
510
511
        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_ids.size(-1))
        position_ids = position_ids.view(-1, position_ids.size(-1))

        inputs_embeds = self.wte(input_ids)
        position_embeds = self.wpe(position_ids)
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
            token_type_embeds = self.wte(token_type_ids)
        else:
            token_type_embeds = 0
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
512
513
        hidden_states = self.drop(hidden_states)

514
515
        output_shape = input_shape + (hidden_states.size(-1),)

516
        presents = ()
thomwolf's avatar
thomwolf committed
517
        all_attentions = []
518
        all_hidden_states = ()
519
        for i, (block, layer_past) in enumerate(zip(self.h, past)):
thomwolf's avatar
thomwolf committed
520
            if self.output_hidden_states:
521
                all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
thomwolf's avatar
thomwolf committed
522

523
            outputs = block(hidden_states, layer_past, head_mask[i])
thomwolf's avatar
thomwolf committed
524
            hidden_states, present = outputs[:2]
525
            presents = presents + (present,)
thomwolf's avatar
thomwolf committed
526
527
528
529

            if self.output_attentions:
                all_attentions.append(outputs[2])

thomwolf's avatar
thomwolf committed
530
        hidden_states = self.ln_f(hidden_states)
531

thomwolf's avatar
thomwolf committed
532
533
534
        hidden_states = hidden_states.view(*output_shape)
        # Add last hidden state
        if self.output_hidden_states:
535
            all_hidden_states = all_hidden_states + (hidden_states,)
thomwolf's avatar
thomwolf committed
536

537
        outputs = (hidden_states, presents)
thomwolf's avatar
thomwolf committed
538
        if self.output_hidden_states:
539
            outputs = outputs + (all_hidden_states,)
thomwolf's avatar
thomwolf committed
540
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
541
542
            # let the number of heads free (-1) so we can extract attention even after head pruning
            attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
543
            all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
544
            outputs = outputs + (all_attentions,)
thomwolf's avatar
thomwolf committed
545
        return outputs  # last hidden state, presents, (all hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
546
547


thomwolf's avatar
thomwolf committed
548
@add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top
thomwolf's avatar
thomwolf committed
549
(linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
550
class GPT2LMHeadModel(GPT2PreTrainedModel):
551
    r"""
thomwolf's avatar
thomwolf committed
552
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
553
554
555
556
557
558
559
            Labels for language modeling.
            Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
560
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
thomwolf's avatar
thomwolf committed
561
562
563
564
565
566
567
568
569
570
571
            Language modeling loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **past**:
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            that contains pre-computed hidden-states (key and values in the attention blocks).
            Can be used (see `past` input) to speed up sequential decoding.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
572
573
574
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
575
576
577

    Examples::

thomwolf's avatar
thomwolf committed
578
579
580
        import torch
        from pytorch_transformers import GPT2Tokenizer, GPT2LMHeadModel

581
582
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2LMHeadModel.from_pretrained('gpt2')
thomwolf's avatar
thomwolf committed
583

584
585
586
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=input_ids)
        loss, logits = outputs[:2]
thomwolf's avatar
thomwolf committed
587
588

    """
thomwolf's avatar
thomwolf committed
589
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
590
        super(GPT2LMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
591
        self.transformer = GPT2Model(config)
thomwolf's avatar
thomwolf committed
592
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
thomwolf's avatar
thomwolf committed
593

594
        self.init_weights()
thomwolf's avatar
thomwolf committed
595
        self.tie_weights()
596

thomwolf's avatar
thomwolf committed
597
598
599
    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
thomwolf's avatar
thomwolf committed
600
        """
thomwolf's avatar
thomwolf committed
601
602
        self._tie_or_clone_weights(self.lm_head,
                                   self.transformer.wte)
thomwolf's avatar
thomwolf committed
603

thomwolf's avatar
thomwolf committed
604
    def forward(self, input_ids, position_ids=None, token_type_ids=None, labels=None, past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
605
606
        transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                                               past=past, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
607
        hidden_states = transformer_outputs[0]
608

thomwolf's avatar
thomwolf committed
609
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
610

611
        outputs = (lm_logits,) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
612
        if labels is not None:
613
            # Shift so that tokens < n predict n
614
            shift_logits = lm_logits[..., :-1, :].contiguous()
thomwolf's avatar
thomwolf committed
615
            shift_labels = labels[..., 1:].contiguous()
Catalin Voss's avatar
Catalin Voss committed
616
            # Flatten the tokens
thomwolf's avatar
thomwolf committed
617
            loss_fct = CrossEntropyLoss(ignore_index=-1)
618
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
619
                            shift_labels.view(-1))
620
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
621
622

        return outputs  # (loss), lm_logits, presents, (all hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
623
624


thomwolf's avatar
thomwolf committed
625
626
627
@add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
Julien Chaumond's avatar
Julien Chaumond committed
628
the classification head takes as input the input of a specified classification token index in the input sequence).
thomwolf's avatar
thomwolf committed
629
""", GPT2_START_DOCSTRING)
thomwolf's avatar
thomwolf committed
630
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
631
    r"""    Inputs:
thomwolf's avatar
thomwolf committed
632
633
634
635
636
637
638
639
640
641
642
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
            Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **mc_token_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
            Index of the classification token in each input sequence.
            Selected in the range ``[0, input_ids.size(-1) - 1[``.
        **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Indices of positions of each input sequence tokens in the position embeddings.
LysandreJik's avatar
LysandreJik committed
643
            Selected in the range ``[0, config.max_position_embeddings - 1]``.
thomwolf's avatar
thomwolf committed
644
645
646
647
648
649
650
651
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The embeddings from these tokens will be summed with the respective token embeddings.
            Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
        **past**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
            (see `past` output below). Can be used to speed up sequential decoding.
thomwolf's avatar
thomwolf committed
652
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
653
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
654
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
655
656
657
658
659
660
661
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
        **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for language modeling.
            Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``
662
        **mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
thomwolf's avatar
thomwolf committed
663
664
665
            Labels for computing the multiple choice classification loss.
            Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
            of the input tensors. (see `input_ids` above)
thomwolf's avatar
thomwolf committed
666

thomwolf's avatar
thomwolf committed
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Language modeling loss.
        **mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Multiple choice classification loss.
        **lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
            Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
        **past**:
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            that contains pre-computed hidden-states (key and values in the attention blocks).
            Can be used (see `past` input) to speed up sequential decoding.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
684
685
686
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
687
688
689

    Examples::

690
691
692
        import torch
        from pytorch_transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
        
693
694
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
thomwolf's avatar
thomwolf committed
695
696
697
698
699
700
        
        # Add a [CLS] to the vocabulary (we should train it also!)
        tokenizer.add_special_tokens({'cls_token': '[CLS]'})
        model.resize_token_embeddings(len(tokenizer))  # Update the model embeddings with the new vocabulary size
        print(tokenizer.cls_token_id, len(tokenizer))  # The newly token the last token of the vocabulary
        
thomwolf's avatar
thomwolf committed
701
        choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
thomwolf's avatar
thomwolf committed
702
703
704
705
706
707
708
        encoded_choices = [tokenizer.encode(s) for s in choices]
        cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]

        input_ids = torch.tensor(encoded_choices).unsqueeze(0)  # Batch size: 1, number of choices: 2
        mc_token_ids = torch.tensor([cls_token_location])  # Batch size: 1

        outputs = model(input_ids, mc_token_ids=mc_token_ids)
709
        lm_prediction_scores, mc_prediction_scores = outputs[:2]
thomwolf's avatar
thomwolf committed
710
711

    """
thomwolf's avatar
thomwolf committed
712
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
713
        super(GPT2DoubleHeadsModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
714
        self.transformer = GPT2Model(config)
thomwolf's avatar
thomwolf committed
715
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
thomwolf's avatar
thomwolf committed
716
        self.multiple_choice_head = SequenceSummary(config)
thomwolf's avatar
thomwolf committed
717

718
        self.init_weights()
719
        self.tie_weights()
thomwolf's avatar
thomwolf committed
720

thomwolf's avatar
thomwolf committed
721
722
723
    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
724
        """
thomwolf's avatar
thomwolf committed
725
726
        self._tie_or_clone_weights(self.lm_head,
                                   self.transformer.wte)
thomwolf's avatar
thomwolf committed
727

thomwolf's avatar
thomwolf committed
728
    def forward(self, input_ids, mc_token_ids=None, lm_labels=None, mc_labels=None, token_type_ids=None,
729
                position_ids=None, past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
730
731
        transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                                               past=past, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
732
        hidden_states = transformer_outputs[0]
733

thomwolf's avatar
thomwolf committed
734
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
735
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
thomwolf's avatar
thomwolf committed
736

737
        outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
738
739
740
741
        if mc_labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
                            mc_labels.view(-1))
742
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
743
        if lm_labels is not None:
744
745
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = lm_labels[..., 1:].contiguous()
thomwolf's avatar
thomwolf committed
746
            loss_fct = CrossEntropyLoss(ignore_index=-1)
thomwolf's avatar
thomwolf committed
747
748
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
                            shift_labels.view(-1))
749
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
750
751

        return outputs  # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)