"...lm-evaluation-harness.git" did not exist on "c5d2774313787e8f7c30c7f5f7882ac0ab9cd90a"
modeling_gpt2.py 32.3 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""

18
19
from __future__ import absolute_import, division, print_function, unicode_literals

thomwolf's avatar
thomwolf committed
20
21
22
23
24
25
26
27
28
29
30
31
32
import collections
import json
import logging
import math
import os
import sys
from io import open

import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter

33
34
35
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_gpt2 import GPT2Config
from .file_utils import add_start_docstrings
thomwolf's avatar
thomwolf committed
36
37
38

logger = logging.getLogger(__name__)

39
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
40
                                     "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
VictorSanh's avatar
VictorSanh committed
41
42
                                     "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin",
                                     "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",}
thomwolf's avatar
thomwolf committed
43

44
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
thomwolf's avatar
thomwolf committed
45
46
47
48
49
50
51
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import re
        import numpy as np
        import tensorflow as tf
    except ImportError:
Kevin Trebing's avatar
Kevin Trebing committed
52
        logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
thomwolf's avatar
thomwolf committed
53
54
55
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    tf_path = os.path.abspath(gpt2_checkpoint_path)
thomwolf's avatar
thomwolf committed
56
    logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
thomwolf's avatar
thomwolf committed
57
58
59
60
61
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
thomwolf's avatar
thomwolf committed
62
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
thomwolf's avatar
thomwolf committed
63
64
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
thomwolf's avatar
thomwolf committed
65
        arrays.append(array.squeeze())
thomwolf's avatar
thomwolf committed
66
67

    for name, array in zip(names, arrays):
thomwolf's avatar
thomwolf committed
68
        name = name[6:]  # skip "model/"
thomwolf's avatar
thomwolf committed
69
70
71
        name = name.split('/')
        pointer = model
        for m_name in name:
thomwolf's avatar
thomwolf committed
72
73
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
thomwolf's avatar
thomwolf committed
74
75
76
77
78
79
            else:
                l = [m_name]
            if l[0] == 'w' or l[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'b':
                pointer = getattr(pointer, 'bias')
thomwolf's avatar
thomwolf committed
80
81
82
            elif l[0] == 'wpe' or l[0] == 'wte':
                pointer = getattr(pointer, l[0])
                pointer = getattr(pointer, 'weight')
thomwolf's avatar
thomwolf committed
83
84
85
86
87
88
89
90
91
92
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
thomwolf's avatar
thomwolf committed
93
        logger.info("Initialize PyTorch weight {}".format(name))
thomwolf's avatar
thomwolf committed
94
95
96
97
98
99
100
101
102
        pointer.data = torch.from_numpy(array)
    return model


def gelu(x):
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


class Attention(nn.Module):
thomwolf's avatar
thomwolf committed
103
    def __init__(self, nx, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
104
        super(Attention, self).__init__()
thomwolf's avatar
thomwolf committed
105
106
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
107
108
109
110
111
112
113
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
        assert n_state % config.n_head == 0
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
        self.n_head = config.n_head
        self.split_size = n_state
        self.scale = scale
114

thomwolf's avatar
thomwolf committed
115
116
        self.c_attn = Conv1D(n_state * 3, nx)
        self.c_proj = Conv1D(n_state, nx)
117
118
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
119
        self.pruned_heads = set()
thomwolf's avatar
thomwolf committed
120

121
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
122
123
        if len(heads) == 0:
            return
124
        mask = torch.ones(self.n_head, self.split_size // self.n_head)
125
        heads = set(heads) - self.pruned_heads  # Convert to set and emove already pruned heads
126
        for head in heads:
127
128
            # Compute how many pruned heads are before the head and move the index accordingly
            head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
129
130
131
132
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
133

134
135
136
        # Prune conv1d layers
        self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
        self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
137

138
139
140
        # Update hyper params
        self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
        self.n_head = self.n_head - len(heads)
141
        self.pruned_heads = self.pruned_heads.union(heads)
142

143
    def _attn(self, q, k, v, attention_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
144
145
146
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))
thomwolf's avatar
thomwolf committed
147
148
        nd, ns = w.size(-2), w.size(-1)
        b = self.bias[:, :, ns-nd:ns, :ns]
149
        w = w * b - 1e4 * (1 - b)
thomwolf's avatar
thomwolf committed
150

151
152
153
154
        if attention_mask is not None:
            # Apply the attention mask
            w = w + attention_mask

thomwolf's avatar
thomwolf committed
155
        w = nn.Softmax(dim=-1)(w)
156
        w = self.attn_dropout(w)
157
158
159
160
161

        # Mask heads if we want to
        if head_mask is not None:
            w = w * head_mask

thomwolf's avatar
thomwolf committed
162
        outputs = [torch.matmul(w, v)]
thomwolf's avatar
thomwolf committed
163
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
164
165
            outputs.append(w)
        return outputs
thomwolf's avatar
thomwolf committed
166
167
168
169
170
171
172
173
174
175

    def merge_heads(self, x):
        x = x.permute(0, 2, 1, 3).contiguous()
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states

    def split_heads(self, x, k=False):
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
        if k:
thomwolf's avatar
thomwolf committed
176
            return x.permute(0, 2, 3, 1)  # (batch, head, head_features, seq_length)
thomwolf's avatar
thomwolf committed
177
        else:
thomwolf's avatar
thomwolf committed
178
            return x.permute(0, 2, 1, 3)  # (batch, head, seq_length, head_features)
thomwolf's avatar
thomwolf committed
179

180
    def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
thomwolf's avatar
thomwolf committed
181
182
183
184
185
        x = self.c_attn(x)
        query, key, value = x.split(self.split_size, dim=2)
        query = self.split_heads(query)
        key = self.split_heads(key, k=True)
        value = self.split_heads(value)
thomwolf's avatar
thomwolf committed
186
        if layer_past is not None:
thomwolf's avatar
thomwolf committed
187
            past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1]  # transpose back cf below
thomwolf's avatar
thomwolf committed
188
            key = torch.cat((past_key, key), dim=-1)
thomwolf's avatar
thomwolf committed
189
            value = torch.cat((past_value, value), dim=-2)
thomwolf's avatar
thomwolf committed
190
        present = torch.stack((key.transpose(-2, -1), value))  # transpose to have same shapes for stacking
191

192
        attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
thomwolf's avatar
thomwolf committed
193
        a = attn_outputs[0]
194

thomwolf's avatar
thomwolf committed
195
196
        a = self.merge_heads(a)
        a = self.c_proj(a)
197
        a = self.resid_dropout(a)
thomwolf's avatar
thomwolf committed
198
199
200

        outputs = [a, present] + attn_outputs[1:]
        return outputs  # a, present, (attentions)
thomwolf's avatar
thomwolf committed
201
202
203
204
205
206
207
208
209


class MLP(nn.Module):
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
        super(MLP, self).__init__()
        nx = config.n_embd
        self.c_fc = Conv1D(n_state, nx)
        self.c_proj = Conv1D(nx, n_state)
        self.act = gelu
210
        self.dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
211
212
213
214

    def forward(self, x):
        h = self.act(self.c_fc(x))
        h2 = self.c_proj(h)
215
        return self.dropout(h2)
thomwolf's avatar
thomwolf committed
216
217
218


class Block(nn.Module):
thomwolf's avatar
thomwolf committed
219
    def __init__(self, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
220
221
        super(Block, self).__init__()
        nx = config.n_embd
222
        self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
223
        self.attn = Attention(nx, n_ctx, config, scale)
224
        self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
225
226
        self.mlp = MLP(4 * nx, config)

227
228
229
230
231
    def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
        output_attn = self.attn(self.ln_1(x),
                                layer_past=layer_past,
                                attention_mask=attention_mask,
                                head_mask=head_mask)
thomwolf's avatar
thomwolf committed
232
233
        a = output_attn[0]  # output_attn: a, present, (attentions)

thomwolf's avatar
thomwolf committed
234
        x = x + a
thomwolf's avatar
thomwolf committed
235
        m = self.mlp(self.ln_2(x))
thomwolf's avatar
thomwolf committed
236
        x = x + m
thomwolf's avatar
thomwolf committed
237
238
239

        outputs = [x] + output_attn[1:]
        return outputs  # x, present, (attentions)
thomwolf's avatar
thomwolf committed
240
241


242
class GPT2PreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
243
244
245
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
246
    config_class = GPT2Config
247
    pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
248
249
    load_tf_weights = load_tf_weights_in_gpt2
    base_model_prefix = "transformer"
thomwolf's avatar
thomwolf committed
250

251
252
253
    def __init__(self, *inputs, **kwargs):
        super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs)

254
    def _init_weights(self, module):
thomwolf's avatar
thomwolf committed
255
256
        """ Initialize the weights.
        """
257
        if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
thomwolf's avatar
thomwolf committed
258
259
260
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
261
262
            if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
                module.bias.data.zero_()
263
        elif isinstance(module, nn.LayerNorm):
thomwolf's avatar
thomwolf committed
264
265
266
267
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)


thomwolf's avatar
thomwolf committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
GPT2_START_DOCSTRING = r"""    OpenAI GPT-2 model was proposed in
    `Language Models are Unsupervised Multitask Learners`_
    by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
    It's a causal (unidirectional) transformer pre-trained using  language modeling on a very large
    corpus of ~40 GB of text data.

    This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
    refer to the PyTorch documentation for all matter related to general usage and behavior.

    .. _`Language Models are Unsupervised Multitask Learners`:
        https://openai.com/blog/better-language-models/

    .. _`torch.nn.Module`:
        https://pytorch.org/docs/stable/nn.html#module

    Parameters:
284
        config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
285
            Initializing with a config file does not load the weights associated with the model, only the configuration.
286
            Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
thomwolf's avatar
thomwolf committed
287
288
"""

thomwolf's avatar
thomwolf committed
289
GPT2_INPUTS_DOCSTRING = r"""    Inputs:
thomwolf's avatar
thomwolf committed
290
291
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
thomwolf's avatar
thomwolf committed
292
293
            GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
            the right rather than the left.
294
295
296
            Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
            See :func:`transformers.PreTrainedTokenizer.encode` and
            :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
thomwolf's avatar
thomwolf committed
297
298
299
300
        **past**:
            list of ``torch.FloatTensor`` (one for each layer):
            that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
            (see `past` output below). Can be used to speed up sequential decoding.
301
302
303
304
305
306
307
308
309
310
311
        **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
            Mask to avoid performing attention on padding token indices.
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The embeddings from these tokens will be summed with the respective token embeddings.
            Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
        **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of positions of each input sequence tokens in the position embeddings.
            Selected in the range ``[0, config.max_position_embeddings - 1]``.
thomwolf's avatar
thomwolf committed
312
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
313
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
314
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
315
316
317
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

Julien Chaumond's avatar
Julien Chaumond committed
318
@add_start_docstrings("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
thomwolf's avatar
thomwolf committed
319
                      GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
320
class GPT2Model(GPT2PreTrainedModel):
321
    r"""
thomwolf's avatar
thomwolf committed
322
323
324
325
326
327
328
329
330
331
332
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
            Sequence of hidden-states at the last layer of the model.
        **past**:
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            that contains pre-computed hidden-states (key and values in the attention blocks).
            Can be used (see `past` input) to speed up sequential decoding.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
333
334
335
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
336
337
338

    Examples::

339
340
341
342
343
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2Model.from_pretrained('gpt2')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
thomwolf's avatar
thomwolf committed
344
345

    """
thomwolf's avatar
thomwolf committed
346
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
347
        super(GPT2Model, self).__init__(config)
thomwolf's avatar
thomwolf committed
348
349
        self.output_hidden_states = config.output_hidden_states
        self.output_attentions = config.output_attentions
350
        self.output_past = config.output_past
thomwolf's avatar
thomwolf committed
351

thomwolf's avatar
thomwolf committed
352
        self.wte = nn.Embedding(config.vocab_size, config.n_embd)
thomwolf's avatar
thomwolf committed
353
        self.wpe = nn.Embedding(config.n_positions, config.n_embd)
354
        self.drop = nn.Dropout(config.embd_pdrop)
355
        self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
356
        self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
357

358
        self.init_weights()
thomwolf's avatar
thomwolf committed
359

thomwolf's avatar
thomwolf committed
360
361
    def _resize_token_embeddings(self, new_num_tokens):
        self.wte = self._get_resized_embeddings(self.wte, new_num_tokens)
thomwolf's avatar
thomwolf committed
362
        return self.wte
thomwolf's avatar
thomwolf committed
363

thomwolf's avatar
thomwolf committed
364
    def _prune_heads(self, heads_to_prune):
365
366
367
368
369
370
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        """
        for layer, heads in heads_to_prune.items():
            self.h[layer].attn.prune_heads(heads)

371
    def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
372
373
374
375
376
377
378
        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_shape[-1])
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, input_shape[-1])
        if position_ids is not None:
            position_ids = position_ids.view(-1, input_shape[-1])

thomwolf's avatar
thomwolf committed
379
        if past is None:
thomwolf's avatar
thomwolf committed
380
            past_length = 0
thomwolf's avatar
thomwolf committed
381
            past = [None] * len(self.h)
thomwolf's avatar
thomwolf committed
382
        else:
thomwolf's avatar
thomwolf committed
383
            past_length = past[0][0].size(-2)
thomwolf's avatar
thomwolf committed
384
385
386
387
        if position_ids is None:
            position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)

388
389
        # Attention mask.
        if attention_mask is not None:
390
            attention_mask = attention_mask.view(-1, input_shape[-1])
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
            # We create a 3D attention mask from a 2D tensor mask.
            # Sizes are [batch_size, 1, 1, to_seq_length]
            # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
            # this attention mask is more simple than the triangular masking of causal attention
            # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
            attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)

            # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
            # masked positions, this operation will create a tensor which is 0.0 for
            # positions we want to attend and -10000.0 for masked positions.
            # Since we are adding it to the raw scores before the softmax, this is
            # effectively the same as removing these entirely.
            attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
            attention_mask = (1.0 - attention_mask) * -10000.0

406
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
407
        # 1.0 in head_mask indicate we keep the head
408
        # attention_probs has shape bsz x n_heads x N x N
409
        # head_mask has shape n_layer x batch x n_heads x N x N
410
411
        if head_mask is not None:
            if head_mask.dim() == 1:
412
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
413
                head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
414
            elif head_mask.dim() == 2:
415
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
416
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
417
418
        else:
            head_mask = [None] * self.config.n_layer
419

thomwolf's avatar
thomwolf committed
420
421
422
423
424
425
426
        inputs_embeds = self.wte(input_ids)
        position_embeds = self.wpe(position_ids)
        if token_type_ids is not None:
            token_type_embeds = self.wte(token_type_ids)
        else:
            token_type_embeds = 0
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
427
428
        hidden_states = self.drop(hidden_states)

429
430
        output_shape = input_shape + (hidden_states.size(-1),)

431
        presents = ()
thomwolf's avatar
thomwolf committed
432
        all_attentions = []
433
        all_hidden_states = ()
434
        for i, (block, layer_past) in enumerate(zip(self.h, past)):
thomwolf's avatar
thomwolf committed
435
            if self.output_hidden_states:
436
                all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
thomwolf's avatar
thomwolf committed
437

438
            outputs = block(hidden_states,
thomwolf's avatar
thomwolf committed
439
                            layer_past=layer_past,
440
441
442
                            attention_mask=attention_mask,
                            head_mask=head_mask[i])

thomwolf's avatar
thomwolf committed
443
            hidden_states, present = outputs[:2]
444
445
            if self.output_past:
                presents = presents + (present,)
thomwolf's avatar
thomwolf committed
446
447
448
449

            if self.output_attentions:
                all_attentions.append(outputs[2])

thomwolf's avatar
thomwolf committed
450
        hidden_states = self.ln_f(hidden_states)
451

thomwolf's avatar
thomwolf committed
452
453
454
        hidden_states = hidden_states.view(*output_shape)
        # Add last hidden state
        if self.output_hidden_states:
455
            all_hidden_states = all_hidden_states + (hidden_states,)
thomwolf's avatar
thomwolf committed
456

457
458
459
        outputs = (hidden_states,)
        if self.output_past:
            outputs = outputs + (presents,)
thomwolf's avatar
thomwolf committed
460
        if self.output_hidden_states:
461
            outputs = outputs + (all_hidden_states,)
thomwolf's avatar
thomwolf committed
462
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
463
464
            # let the number of heads free (-1) so we can extract attention even after head pruning
            attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
465
            all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
466
            outputs = outputs + (all_attentions,)
467
        return outputs  # last hidden state, (presents), (all hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
468
469


thomwolf's avatar
thomwolf committed
470
@add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top
thomwolf's avatar
thomwolf committed
471
(linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
472
class GPT2LMHeadModel(GPT2PreTrainedModel):
473
    r"""
thomwolf's avatar
thomwolf committed
474
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
475
476
477
478
479
480
481
            Labels for language modeling.
            Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
482
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
thomwolf's avatar
thomwolf committed
483
484
485
486
487
488
489
490
491
492
493
            Language modeling loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **past**:
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            that contains pre-computed hidden-states (key and values in the attention blocks).
            Can be used (see `past` input) to speed up sequential decoding.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
494
495
496
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
497
498
499

    Examples::

thomwolf's avatar
thomwolf committed
500
        import torch
501
        from transformers import GPT2Tokenizer, GPT2LMHeadModel
thomwolf's avatar
thomwolf committed
502

503
504
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2LMHeadModel.from_pretrained('gpt2')
thomwolf's avatar
thomwolf committed
505

506
507
508
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=input_ids)
        loss, logits = outputs[:2]
thomwolf's avatar
thomwolf committed
509
510

    """
thomwolf's avatar
thomwolf committed
511
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
512
        super(GPT2LMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
513
        self.transformer = GPT2Model(config)
thomwolf's avatar
thomwolf committed
514
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
thomwolf's avatar
thomwolf committed
515

516
        self.init_weights()
thomwolf's avatar
thomwolf committed
517
        self.tie_weights()
518

thomwolf's avatar
thomwolf committed
519
520
521
    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
thomwolf's avatar
thomwolf committed
522
        """
thomwolf's avatar
thomwolf committed
523
524
        self._tie_or_clone_weights(self.lm_head,
                                   self.transformer.wte)
thomwolf's avatar
thomwolf committed
525

526
527
528
529
530
531
532
533
    def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
                labels=None):
        transformer_outputs = self.transformer(input_ids,
                                               past=past,
                                               attention_mask=attention_mask,
                                               token_type_ids=token_type_ids,
                                               position_ids=position_ids,
                                               head_mask=head_mask)
thomwolf's avatar
thomwolf committed
534
        hidden_states = transformer_outputs[0]
535

thomwolf's avatar
thomwolf committed
536
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
537

538
        outputs = (lm_logits,) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
539
        if labels is not None:
540
            # Shift so that tokens < n predict n
541
            shift_logits = lm_logits[..., :-1, :].contiguous()
thomwolf's avatar
thomwolf committed
542
            shift_labels = labels[..., 1:].contiguous()
Catalin Voss's avatar
Catalin Voss committed
543
            # Flatten the tokens
thomwolf's avatar
thomwolf committed
544
            loss_fct = CrossEntropyLoss(ignore_index=-1)
545
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
546
                            shift_labels.view(-1))
547
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
548
549

        return outputs  # (loss), lm_logits, presents, (all hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
550
551


thomwolf's avatar
thomwolf committed
552
553
554
@add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
Julien Chaumond's avatar
Julien Chaumond committed
555
the classification head takes as input the input of a specified classification token index in the input sequence).
556
""", GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
557
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
558
    r"""
thomwolf's avatar
thomwolf committed
559
        **mc_token_ids**: (`optional`, default to index of the last token of the input) ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
thomwolf's avatar
thomwolf committed
560
561
562
563
564
565
566
567
            Index of the classification token in each input sequence.
            Selected in the range ``[0, input_ids.size(-1) - 1[``.
        **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for language modeling.
            Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``
568
        **mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
thomwolf's avatar
thomwolf committed
569
570
571
            Labels for computing the multiple choice classification loss.
            Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
            of the input tensors. (see `input_ids` above)
thomwolf's avatar
thomwolf committed
572

thomwolf's avatar
thomwolf committed
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Language modeling loss.
        **mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Multiple choice classification loss.
        **lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
            Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
        **past**:
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            that contains pre-computed hidden-states (key and values in the attention blocks).
            Can be used (see `past` input) to speed up sequential decoding.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
590
591
592
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
593
594
595

    Examples::

596
        import torch
597
        from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
598
        
599
600
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
thomwolf's avatar
thomwolf committed
601
602
603
604
605
606
        
        # Add a [CLS] to the vocabulary (we should train it also!)
        tokenizer.add_special_tokens({'cls_token': '[CLS]'})
        model.resize_token_embeddings(len(tokenizer))  # Update the model embeddings with the new vocabulary size
        print(tokenizer.cls_token_id, len(tokenizer))  # The newly token the last token of the vocabulary
        
thomwolf's avatar
thomwolf committed
607
        choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
thomwolf's avatar
thomwolf committed
608
609
610
611
612
613
614
        encoded_choices = [tokenizer.encode(s) for s in choices]
        cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]

        input_ids = torch.tensor(encoded_choices).unsqueeze(0)  # Batch size: 1, number of choices: 2
        mc_token_ids = torch.tensor([cls_token_location])  # Batch size: 1

        outputs = model(input_ids, mc_token_ids=mc_token_ids)
615
        lm_prediction_scores, mc_prediction_scores = outputs[:2]
thomwolf's avatar
thomwolf committed
616
617

    """
thomwolf's avatar
thomwolf committed
618
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
619
        super(GPT2DoubleHeadsModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
620
        self.transformer = GPT2Model(config)
thomwolf's avatar
thomwolf committed
621
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
thomwolf's avatar
thomwolf committed
622
        self.multiple_choice_head = SequenceSummary(config)
thomwolf's avatar
thomwolf committed
623

624
        self.init_weights()
625
        self.tie_weights()
thomwolf's avatar
thomwolf committed
626

thomwolf's avatar
thomwolf committed
627
628
629
    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
630
        """
thomwolf's avatar
thomwolf committed
631
632
        self._tie_or_clone_weights(self.lm_head,
                                   self.transformer.wte)
thomwolf's avatar
thomwolf committed
633

634
635
636
637
638
639
640
641
642
    def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
                mc_token_ids=None, lm_labels=None, mc_labels=None):
        transformer_outputs = self.transformer(input_ids,
                                               past=past,
                                               attention_mask=attention_mask,
                                               token_type_ids=token_type_ids,
                                               position_ids=position_ids,
                                               head_mask=head_mask)

thomwolf's avatar
thomwolf committed
643
        hidden_states = transformer_outputs[0]
644

thomwolf's avatar
thomwolf committed
645
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
646
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
thomwolf's avatar
thomwolf committed
647

648
        outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
649
650
651
652
        if mc_labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
                            mc_labels.view(-1))
653
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
654
        if lm_labels is not None:
655
656
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = lm_labels[..., 1:].contiguous()
thomwolf's avatar
thomwolf committed
657
            loss_fct = CrossEntropyLoss(ignore_index=-1)
thomwolf's avatar
thomwolf committed
658
659
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
                            shift_labels.view(-1))
660
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
661
662

        return outputs  # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)