modeling_gpt2.py 35.2 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""

18
19
from __future__ import absolute_import, division, print_function, unicode_literals

thomwolf's avatar
thomwolf committed
20
21
22
23
24
25
26
27
28
29
30
31
32
import collections
import json
import logging
import math
import os
import sys
from io import open

import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter

33
from .file_utils import cached_path
34
from .modeling_utils import (Conv1D, CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig,
thomwolf's avatar
thomwolf committed
35
                          PreTrainedModel, prune_conv1d_layer, SequenceSummary)
thomwolf's avatar
thomwolf committed
36
from .modeling_bert import BertLayerNorm as LayerNorm
thomwolf's avatar
thomwolf committed
37
38
39

logger = logging.getLogger(__name__)

40
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
thomwolf's avatar
thomwolf committed
41
                                "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin"}
42
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json",
thomwolf's avatar
thomwolf committed
43
                                 "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json"}
thomwolf's avatar
thomwolf committed
44

45
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
thomwolf's avatar
thomwolf committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
    """ Load tf checkpoints in a pytorch model
    """
    try:
        import re
        import numpy as np
        import tensorflow as tf
    except ImportError:
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions.")
        raise
    tf_path = os.path.abspath(gpt2_checkpoint_path)
    print("Converting TensorFlow checkpoint from {}".format(tf_path))
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
        print("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
thomwolf's avatar
thomwolf committed
66
        arrays.append(array.squeeze())
thomwolf's avatar
thomwolf committed
67
68

    for name, array in zip(names, arrays):
thomwolf's avatar
thomwolf committed
69
        name = name[6:]  # skip "model/"
thomwolf's avatar
thomwolf committed
70
71
72
        name = name.split('/')
        pointer = model
        for m_name in name:
thomwolf's avatar
thomwolf committed
73
74
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
thomwolf's avatar
thomwolf committed
75
76
77
78
79
80
            else:
                l = [m_name]
            if l[0] == 'w' or l[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'b':
                pointer = getattr(pointer, 'bias')
thomwolf's avatar
thomwolf committed
81
82
83
            elif l[0] == 'wpe' or l[0] == 'wte':
                pointer = getattr(pointer, l[0])
                pointer = getattr(pointer, 'weight')
thomwolf's avatar
thomwolf committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        print("Initialize PyTorch weight {}".format(name))
        pointer.data = torch.from_numpy(array)
    return model


def gelu(x):
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


103
class GPT2Config(PretrainedConfig):
thomwolf's avatar
thomwolf committed
104
105
    """Configuration class to store the configuration of a `GPT2Model`.
    """
106
    pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
thomwolf's avatar
thomwolf committed
107
108
109

    def __init__(
        self,
thomwolf's avatar
thomwolf committed
110
        vocab_size_or_config_json_file=50257,
thomwolf's avatar
thomwolf committed
111
        n_special=0,
thomwolf's avatar
thomwolf committed
112
113
114
115
116
        n_positions=1024,
        n_ctx=1024,
        n_embd=768,
        n_layer=12,
        n_head=12,
117
118
119
        resid_pdrop=0.1,
        embd_pdrop=0.1,
        attn_pdrop=0.1,
thomwolf's avatar
thomwolf committed
120
121
        layer_norm_epsilon=1e-5,
        initializer_range=0.02,
thomwolf's avatar
thomwolf committed
122
        predict_special_tokens=True,
thomwolf's avatar
thomwolf committed
123
124
125
126
127
        summary_type='token_ids',
        summary_use_proj=True,
        summary_num_classes=1,
        summary_activation=None,
        summary_dropout=0.1,
thomwolf's avatar
thomwolf committed
128
        **kwargs
thomwolf's avatar
thomwolf committed
129
130
131
132
133
    ):
        """Constructs GPT2Config.

        Args:
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
thomwolf's avatar
thomwolf committed
134
            n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
thomwolf's avatar
thomwolf committed
135
136
137
138
139
140
141
            n_positions: Number of positional embeddings.
            n_ctx: Size of the causal mask (usually same as n_positions).
            n_embd: Dimensionality of the embeddings and hidden states.
            n_layer: Number of hidden layers in the Transformer encoder.
            n_head: Number of attention heads for each attention layer in
                the Transformer encoder.
            layer_norm_epsilon: epsilon to use in the layer norm layers
142
143
144
145
146
            resid_pdrop: The dropout probabilitiy for all fully connected
                layers in the embeddings, encoder, and pooler.
            attn_pdrop: The dropout ratio for the attention
                probabilities.
            embd_pdrop: The dropout ratio for the embeddings.
thomwolf's avatar
thomwolf committed
147
148
            initializer_range: The sttdev of the truncated_normal_initializer for
                initializing all weight matrices.
149
            predict_special_tokens: should we predict special tokens (when the model has a LM head)
thomwolf's avatar
thomwolf committed
150
        """
thomwolf's avatar
thomwolf committed
151
152
        super(GPT2Config, self).__init__(**kwargs)

thomwolf's avatar
thomwolf committed
153
154
155
156
157
158
159
160
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
thomwolf's avatar
thomwolf committed
161
            self.n_special = n_special
thomwolf's avatar
thomwolf committed
162
163
164
165
166
            self.n_ctx = n_ctx
            self.n_positions = n_positions
            self.n_embd = n_embd
            self.n_layer = n_layer
            self.n_head = n_head
167
168
169
            self.resid_pdrop = resid_pdrop
            self.embd_pdrop = embd_pdrop
            self.attn_pdrop = attn_pdrop
thomwolf's avatar
thomwolf committed
170
171
            self.layer_norm_epsilon = layer_norm_epsilon
            self.initializer_range = initializer_range
172
            self.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
173
174
175
176
177
            self.summary_type = summary_type
            self.summary_use_proj = summary_use_proj
            self.summary_num_classes = summary_num_classes
            self.summary_activation = summary_activation
            self.summary_dropout = summary_dropout
thomwolf's avatar
thomwolf committed
178
179
180
181
182
183
        else:
            raise ValueError(
                "First argument must be either a vocabulary size (int)"
                "or the path to a pretrained model config file (str)"
            )

thomwolf's avatar
thomwolf committed
184
185
186
187
    @property
    def total_tokens_embeddings(self):
        return self.vocab_size + self.n_special

thomwolf's avatar
thomwolf committed
188
189
190
191
192
193
194
195
196
197
198
199
200
    @property
    def hidden_size(self):
        return self.n_embd

    @property
    def num_attention_heads(self):
        return self.n_head

    @property
    def num_hidden_layers(self):
        return self.n_layer


thomwolf's avatar
thomwolf committed
201
202

class Attention(nn.Module):
thomwolf's avatar
thomwolf committed
203
    def __init__(self, nx, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
204
        super(Attention, self).__init__()
thomwolf's avatar
thomwolf committed
205
206
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
207
208
209
210
211
212
213
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
        assert n_state % config.n_head == 0
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
        self.n_head = config.n_head
        self.split_size = n_state
        self.scale = scale
214

thomwolf's avatar
thomwolf committed
215
216
        self.c_attn = Conv1D(n_state * 3, nx)
        self.c_proj = Conv1D(n_state, nx)
217
218
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
219

220
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
221
222
        if len(heads) == 0:
            return
223
224
225
226
227
228
229
230
231
232
233
234
235
236
        mask = torch.ones(self.n_head, self.split_size // self.n_head)
        for head in heads:
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
        # Prune conv1d layers
        self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
        self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
        # Update hyper params
        self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
        self.n_head = self.n_head - len(heads)

    def _attn(self, q, k, v, head_mask=None):
thomwolf's avatar
thomwolf committed
237
238
239
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))
thomwolf's avatar
thomwolf committed
240
241
        nd, ns = w.size(-2), w.size(-1)
        b = self.bias[:, :, ns-nd:ns, :ns]
242
        w = w * b - 1e4 * (1 - b)
thomwolf's avatar
thomwolf committed
243
244

        w = nn.Softmax(dim=-1)(w)
245
        w = self.attn_dropout(w)
246
247
248
249
250

        # Mask heads if we want to
        if head_mask is not None:
            w = w * head_mask

thomwolf's avatar
thomwolf committed
251
        outputs = [torch.matmul(w, v)]
thomwolf's avatar
thomwolf committed
252
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
253
254
            outputs.append(w)
        return outputs
thomwolf's avatar
thomwolf committed
255
256
257
258
259
260
261
262
263
264

    def merge_heads(self, x):
        x = x.permute(0, 2, 1, 3).contiguous()
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states

    def split_heads(self, x, k=False):
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
        if k:
thomwolf's avatar
thomwolf committed
265
            return x.permute(0, 2, 3, 1)  # (batch, head, head_features, seq_length)
thomwolf's avatar
thomwolf committed
266
        else:
thomwolf's avatar
thomwolf committed
267
            return x.permute(0, 2, 1, 3)  # (batch, head, seq_length, head_features)
thomwolf's avatar
thomwolf committed
268

269
    def forward(self, x, layer_past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
270
271
272
273
274
        x = self.c_attn(x)
        query, key, value = x.split(self.split_size, dim=2)
        query = self.split_heads(query)
        key = self.split_heads(key, k=True)
        value = self.split_heads(value)
thomwolf's avatar
thomwolf committed
275
        if layer_past is not None:
thomwolf's avatar
thomwolf committed
276
            past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1]  # transpose back cf below
thomwolf's avatar
thomwolf committed
277
            key = torch.cat((past_key, key), dim=-1)
thomwolf's avatar
thomwolf committed
278
            value = torch.cat((past_value, value), dim=-2)
thomwolf's avatar
thomwolf committed
279
        present = torch.stack((key.transpose(-2, -1), value))  # transpose to have same shapes for stacking
280

thomwolf's avatar
thomwolf committed
281
282
        attn_outputs = self._attn(query, key, value, head_mask)
        a = attn_outputs[0]
283

thomwolf's avatar
thomwolf committed
284
285
        a = self.merge_heads(a)
        a = self.c_proj(a)
286
        a = self.resid_dropout(a)
thomwolf's avatar
thomwolf committed
287
288
289

        outputs = [a, present] + attn_outputs[1:]
        return outputs  # a, present, (attentions)
thomwolf's avatar
thomwolf committed
290
291
292
293
294
295
296
297
298


class MLP(nn.Module):
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
        super(MLP, self).__init__()
        nx = config.n_embd
        self.c_fc = Conv1D(n_state, nx)
        self.c_proj = Conv1D(nx, n_state)
        self.act = gelu
299
        self.dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
300
301
302
303

    def forward(self, x):
        h = self.act(self.c_fc(x))
        h2 = self.c_proj(h)
304
        return self.dropout(h2)
thomwolf's avatar
thomwolf committed
305
306
307


class Block(nn.Module):
thomwolf's avatar
thomwolf committed
308
    def __init__(self, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
309
310
311
        super(Block, self).__init__()
        nx = config.n_embd
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
312
        self.attn = Attention(nx, n_ctx, config, scale)
thomwolf's avatar
thomwolf committed
313
314
315
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
        self.mlp = MLP(4 * nx, config)

316
317
    def forward(self, x, layer_past=None, head_mask=None):
        output_attn = self.attn(self.ln_1(x), layer_past=layer_past, head_mask=head_mask)
thomwolf's avatar
thomwolf committed
318
319
        a = output_attn[0]  # output_attn: a, present, (attentions)

thomwolf's avatar
thomwolf committed
320
        x = x + a
thomwolf's avatar
thomwolf committed
321
        m = self.mlp(self.ln_2(x))
thomwolf's avatar
thomwolf committed
322
        x = x + m
thomwolf's avatar
thomwolf committed
323
324
325

        outputs = [x] + output_attn[1:]
        return outputs  # x, present, (attentions)
thomwolf's avatar
thomwolf committed
326
327
328
329
330
331
332
333


class GPT2LMHead(nn.Module):
    """ Language Model Head for the transformer """

    def __init__(self, model_embeddings_weights, config):
        super(GPT2LMHead, self).__init__()
        self.n_embd = config.n_embd
334
335
        self.vocab_size = config.vocab_size
        self.predict_special_tokens = config.predict_special_tokens
336
        self.torchscript = config.torchscript
thomwolf's avatar
thomwolf committed
337
338
339
340
        embed_shape = model_embeddings_weights.shape
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
        self.set_embeddings_weights(model_embeddings_weights)

341
342
    def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True):
        self.predict_special_tokens = predict_special_tokens
343
        # Export to TorchScript can't handle parameter sharing so we are cloning them.
344
345
346
347
        if self.torchscript:
            self.decoder.weight = nn.Parameter(model_embeddings_weights.clone())
        else:
            self.decoder.weight = model_embeddings_weights  # Tied weights
thomwolf's avatar
thomwolf committed
348
349
350

    def forward(self, hidden_state):
        lm_logits = self.decoder(hidden_state)
351
352
        if not self.predict_special_tokens:
            lm_logits = lm_logits[..., :self.vocab_size]
thomwolf's avatar
thomwolf committed
353
354
355
        return lm_logits


356
class GPT2PreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
357
358
359
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
360
    config_class = GPT2Config
361
    pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
362
363
    load_tf_weights = load_tf_weights_in_gpt2
    base_model_prefix = "transformer"
thomwolf's avatar
thomwolf committed
364

365
366
367
    def __init__(self, *inputs, **kwargs):
        super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs)

thomwolf's avatar
thomwolf committed
368
369
370
    def init_weights(self, module):
        """ Initialize the weights.
        """
371
        if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
thomwolf's avatar
thomwolf committed
372
373
374
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
375
376
            if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
                module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
377
378
379
380
381
        elif isinstance(module, LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)

    @classmethod
VictorSanh's avatar
VictorSanh committed
382
    def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
thomwolf's avatar
thomwolf committed
383
384
385
386
387
388
389
        """
        Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
        Download and cache the pre-trained model file if needed.

        Params:
            pretrained_model_name_or_path: either:
                - a str with the name of a pre-trained model to load selected in the list of:
Joel Grus's avatar
Joel Grus committed
390
                    . `gpt2`
thomwolf's avatar
thomwolf committed
391
392
393
394
                - a path or url to a pretrained model archive containing:
                    . `gpt2_config.json` a configuration file for the model
                    . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
                - a path or url to a pretrained model archive containing:
Joel Grus's avatar
Joel Grus committed
395
                    . `gpt2_config.json` a configuration file for the model
thomwolf's avatar
thomwolf committed
396
397
398
                    . a TensorFlow checkpoint with trained weights
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
Joel Grus's avatar
Joel Grus committed
399
            state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
VictorSanh's avatar
VictorSanh committed
400
            *inputs, **kwargs: additional input for the specific GPT2 class
thomwolf's avatar
thomwolf committed
401
        """
thomwolf's avatar
thomwolf committed
402
403
404
        num_special_tokens = kwargs.pop('num_special_tokens', None)

        model = PreTrainedModel.from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs)
thomwolf's avatar
thomwolf committed
405

thomwolf's avatar
thomwolf committed
406
407
        # Add additional embeddings for special tokens if needed
        # This step also make sure we are still sharing the output and input embeddings after loading weights
408
        model.set_num_special_tokens(num_special_tokens)
thomwolf's avatar
thomwolf committed
409
410
411
412
413
414
        return model


class GPT2Model(GPT2PreTrainedModel):
    """OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").

thomwolf's avatar
thomwolf committed
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
    GPT-2 use a single embedding matrix to store the word and special embeddings.
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
    Special tokens need to be trained during the fine-tuning if you use them.
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.

    The embeddings are ordered as follow in the token embeddings matrice:
        [0,                                                         ----------------------
         ...                                                        -> word embeddings
         config.vocab_size - 1,                                     ______________________
         config.vocab_size,
         ...                                                        -> special embeddings
         config.vocab_size + config.n_special - 1]                  ______________________

    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
        total_tokens_embeddings = config.vocab_size + config.n_special
    You should use the associate indices to index the embeddings.

thomwolf's avatar
thomwolf committed
432
    Params:
433
434
        `config`: a GPT2Config class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
thomwolf's avatar
thomwolf committed
435
436
437
438
439
440
441
442
443
444
445

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
            with the position indices (selected in the range [0, config.n_positions - 1[.
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
Joel Grus's avatar
Joel Grus committed
446
447
448
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
            (key and values in the attention blocks) to speed up sequential decoding
            (this is the presents output of the model, cf. below).
449
450
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
451

Joel Grus's avatar
Joel Grus committed
452
    Outputs a tuple consisting of:
453
454
        `hidden_states`: a list of all the encoded-hidden-states in the model (length of the list: number of layers + 1 for the output of the embeddings)
            as torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
thomwolf's avatar
thomwolf committed
455
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Joel Grus's avatar
Joel Grus committed
456
457
        `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
            torch.FloatTensors. They can be reused to speed up sequential decoding.
thomwolf's avatar
thomwolf committed
458
459
460
461
462
463
464
465
466

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_gpt2.GPT2Config()

    model = modeling_gpt2.GPT2Model(config)
Joel Grus's avatar
Joel Grus committed
467
    hidden_states, presents = model(input_ids)
thomwolf's avatar
thomwolf committed
468
469
470
    ```
    """

thomwolf's avatar
thomwolf committed
471
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
472
        super(GPT2Model, self).__init__(config)
thomwolf's avatar
thomwolf committed
473
474
475
        self.output_hidden_states = config.output_hidden_states
        self.output_attentions = config.output_attentions

thomwolf's avatar
thomwolf committed
476
        self.wte = nn.Embedding(config.total_tokens_embeddings, config.n_embd)
thomwolf's avatar
thomwolf committed
477
        self.wpe = nn.Embedding(config.n_positions, config.n_embd)
478
        self.drop = nn.Dropout(config.embd_pdrop)
479
        self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
480
        self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
481
482
483

        self.apply(self.init_weights)

484
    def set_num_special_tokens(self, num_special_tokens=None):
thomwolf's avatar
thomwolf committed
485
        " Update input embeddings with new embedding matrice if needed "
486
        if num_special_tokens is None or self.config.n_special == num_special_tokens:
thomwolf's avatar
thomwolf committed
487
488
489
490
491
492
493
494
495
496
497
            return
        # Update config
        self.config.n_special = num_special_tokens
        # Build new embeddings and initialize all new embeddings (in particular the special tokens)
        old_embed = self.wte
        self.wte = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
        self.wte.to(old_embed.weight.device)
        self.init_weights(self.wte)
        # Copy word embeddings from the previous weights
        self.wte.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]

thomwolf's avatar
thomwolf committed
498
    def _prune_heads(self, heads_to_prune):
499
500
501
502
503
504
505
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        """
        for layer, heads in heads_to_prune.items():
            self.h[layer].attn.prune_heads(heads)

    def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
506
        if past is None:
thomwolf's avatar
thomwolf committed
507
            past_length = 0
thomwolf's avatar
thomwolf committed
508
            past = [None] * len(self.h)
thomwolf's avatar
thomwolf committed
509
        else:
thomwolf's avatar
thomwolf committed
510
            past_length = past[0][0].size(-2)
thomwolf's avatar
thomwolf committed
511
512
513
514
        if position_ids is None:
            position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)

515
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
516
        # 1.0 in head_mask indicate we keep the head
517
        # attention_probs has shape bsz x n_heads x N x N
518
        # head_mask has shape n_layer x batch x n_heads x N x N
519
520
        if head_mask is not None:
            if head_mask.dim() == 1:
521
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
522
                head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
523
            elif head_mask.dim() == 2:
524
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
525
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
526
527
        else:
            head_mask = [None] * self.config.n_layer
528

thomwolf's avatar
thomwolf committed
529
530
531
532
533
534
535
536
537
538
539
540
        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_ids.size(-1))
        position_ids = position_ids.view(-1, position_ids.size(-1))

        inputs_embeds = self.wte(input_ids)
        position_embeds = self.wpe(position_ids)
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
            token_type_embeds = self.wte(token_type_ids)
        else:
            token_type_embeds = 0
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
541
542
        hidden_states = self.drop(hidden_states)

543
544
        output_shape = input_shape + (hidden_states.size(-1),)

545
        presents = ()
thomwolf's avatar
thomwolf committed
546
        all_attentions = []
547
        all_hidden_states = ()
548
        for i, (block, layer_past) in enumerate(zip(self.h, past)):
thomwolf's avatar
thomwolf committed
549
            if self.output_hidden_states:
550
                all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
thomwolf's avatar
thomwolf committed
551

552
            outputs = block(hidden_states, layer_past, head_mask[i])
thomwolf's avatar
thomwolf committed
553
            hidden_states, present = outputs[:2]
554
            presents = presents + (present,)
thomwolf's avatar
thomwolf committed
555
556
557
558

            if self.output_attentions:
                all_attentions.append(outputs[2])

thomwolf's avatar
thomwolf committed
559
        hidden_states = self.ln_f(hidden_states)
560

thomwolf's avatar
thomwolf committed
561
562
563
        hidden_states = hidden_states.view(*output_shape)
        # Add last hidden state
        if self.output_hidden_states:
564
            all_hidden_states = all_hidden_states + (hidden_states,)
thomwolf's avatar
thomwolf committed
565

566
        outputs = (hidden_states, presents)
thomwolf's avatar
thomwolf committed
567
        if self.output_hidden_states:
568
            outputs = outputs + (all_hidden_states,)
thomwolf's avatar
thomwolf committed
569
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
570
571
            # let the number of heads free (-1) so we can extract attention even after head pruning
            attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
572
            all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
573
            outputs = outputs + (all_attentions,)
thomwolf's avatar
thomwolf committed
574
        return outputs  # last hidden state, presents, (all hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
575
576
577
578
579
580


class GPT2LMHeadModel(GPT2PreTrainedModel):
    """OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").

    Params:
581
582
583
584
        `config`: a GPT2Config class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
thomwolf's avatar
thomwolf committed
585
586
587
588
589
590
591
592
593
594
595
596
597
598

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
            with the position indices (selected in the range [0, config.n_positions - 1[.
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., vocab_size]
Joel Grus's avatar
Joel Grus committed
599
600
601
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
            (key and values in the attention blocks) to speed up sequential decoding
            (this is the presents output of the model, cf. below).
602
603
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
604
605
606
607

    Outputs:
        if `lm_labels` is not `None`:
            Outputs the language modeling loss.
Joel Grus's avatar
Joel Grus committed
608
        else a tuple:
thomwolf's avatar
thomwolf committed
609
610
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
                (or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
Joel Grus's avatar
Joel Grus committed
611
612
            `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
                torch.FloatTensors. They can be reused to speed up sequential decoding.
thomwolf's avatar
thomwolf committed
613
614
615
616
617
618
619
620
621

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])

    config = modeling_gpt2.GPT2Config()

    model = modeling_gpt2.GPT2LMHeadModel(config)
Joel Grus's avatar
Joel Grus committed
622
    lm_logits, presents = model(input_ids)
thomwolf's avatar
thomwolf committed
623
624
625
    ```
    """

thomwolf's avatar
thomwolf committed
626
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
627
        super(GPT2LMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
628
        self.transformer = GPT2Model(config)
thomwolf's avatar
thomwolf committed
629
630
631
        self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
        self.apply(self.init_weights)

632
    def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
thomwolf's avatar
thomwolf committed
633
634
        """ Update input and output embeddings with new embedding matrice
            Make sure we are sharing the embeddings
thomwolf's avatar
thomwolf committed
635
        """
636
        self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
637
        self.transformer.set_num_special_tokens(num_special_tokens)
638
        self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens)
thomwolf's avatar
thomwolf committed
639

640
    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
641
642
        transformer_outputs = self.transformer(input_ids, position_ids, token_type_ids, past, head_mask)
        hidden_states = transformer_outputs[0]
643

thomwolf's avatar
thomwolf committed
644
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
645

646
        outputs = (lm_logits,) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
647
        if lm_labels is not None:
648
            # Shift so that tokens < n predict n
649
650
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = lm_labels[..., 1:].contiguous()
Catalin Voss's avatar
Catalin Voss committed
651
            # Flatten the tokens
thomwolf's avatar
thomwolf committed
652
            loss_fct = CrossEntropyLoss(ignore_index=-1)
653
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
654
                            shift_labels.view(-1))
655
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
656
657

        return outputs  # (loss), lm_logits, presents, (all hidden_states), (attentions)
thomwolf's avatar
thomwolf committed
658
659
660
661
662
663


class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
    """OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").

    Params:
664
665
666
667
        `config`: a GPT2Config class instance with the configuration to build a new model
        `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
        `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
            This can be used to compute head importance metrics. Default: False
thomwolf's avatar
thomwolf committed
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685

    Inputs:
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
            indices selected in the range [0, config.vocab_size[
        `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
            which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
            with the position indices (selected in the range [0, config.n_positions - 1[.
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
            You can use it to add a third type of embedding to each input token in the sequence
            (the previous two being the word and position embeddings).
            The input, position and token_type embeddings are summed inside the Transformer before the first
            self-attention block.
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
            with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
            is only computed for the labels set in [0, ..., config.vocab_size]
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
            with indices selected in [0, ..., num_choices].
Joel Grus's avatar
Joel Grus committed
686
687
688
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
            (key and values in the attention blocks) to speed up sequential decoding
            (this is the presents output of the model, cf. below).
689
690
        `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
            It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
thomwolf's avatar
thomwolf committed
691
692
693
694
695
696
697

    Outputs:
        if `lm_labels` and `multiple_choice_labels` are not `None`:
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
        else: a tuple with
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Joel Grus's avatar
Joel Grus committed
698
699
            `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
                torch.FloatTensors. They can be reused to speed up sequential decoding.
thomwolf's avatar
thomwolf committed
700
701
702
703
704
705
706
707
708

    Example usage:
    ```python
    # Already been converted into BPE token ids
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]])  # (bsz, number of choice, seq length)
    mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)

    config = modeling_gpt2.GPT2Config()

VictorSanh's avatar
VictorSanh committed
709
    model = modeling_gpt2.GPT2DoubleHeadsModel(config)
Joel Grus's avatar
Joel Grus committed
710
    lm_logits, multiple_choice_logits, presents = model(input_ids, mc_token_ids)
thomwolf's avatar
thomwolf committed
711
712
713
    ```
    """

thomwolf's avatar
thomwolf committed
714
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
715
        super(GPT2DoubleHeadsModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
716
        self.transformer = GPT2Model(config)
thomwolf's avatar
thomwolf committed
717
        self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
thomwolf's avatar
thomwolf committed
718
        self.multiple_choice_head = SequenceSummary(config)
thomwolf's avatar
thomwolf committed
719

thomwolf's avatar
thomwolf committed
720
721
        self.apply(self.init_weights)

722
    def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
thomwolf's avatar
thomwolf committed
723
724
        """ Update input and output embeddings with new embedding matrice
            Make sure we are sharing the embeddings
thomwolf's avatar
thomwolf committed
725
        """
726
        self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
727
        self.transformer.set_num_special_tokens(num_special_tokens)
728
        self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens)
thomwolf's avatar
thomwolf committed
729

thomwolf's avatar
thomwolf committed
730
    def forward(self, input_ids, mc_token_ids=None, lm_labels=None, mc_labels=None, token_type_ids=None,
731
                position_ids=None, past=None, head_mask=None):
thomwolf's avatar
thomwolf committed
732
733
        transformer_outputs = self.transformer(input_ids, position_ids, token_type_ids, past, head_mask)
        hidden_states = transformer_outputs[0]
734

thomwolf's avatar
thomwolf committed
735
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
736
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
thomwolf's avatar
thomwolf committed
737

738
        outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
739
740
741
742
        if mc_labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
                            mc_labels.view(-1))
743
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
744
        if lm_labels is not None:
745
746
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = lm_labels[..., 1:].contiguous()
thomwolf's avatar
thomwolf committed
747
            loss_fct = CrossEntropyLoss(ignore_index=-1)
thomwolf's avatar
thomwolf committed
748
749
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
                            shift_labels.view(-1))
750
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
751
752

        return outputs  # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)