modeling_openai.py 34.2 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""

18
19
from __future__ import absolute_import, division, print_function, unicode_literals

20
import collections
thomwolf's avatar
thomwolf committed
21
import json
thomwolf's avatar
thomwolf committed
22
import logging
23
24
import math
import os
thomwolf's avatar
thomwolf committed
25
26
import sys
from io import open
thomwolf's avatar
thomwolf committed
27
28
29

import torch
import torch.nn as nn
thomwolf's avatar
thomwolf committed
30
from torch.nn import CrossEntropyLoss
thomwolf's avatar
thomwolf committed
31
32
from torch.nn.parameter import Parameter

33
from .modeling_utils import (Conv1D, CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig,
thomwolf's avatar
thomwolf committed
34
35
                             PreTrainedModel, prune_conv1d_layer, SequenceSummary,
                             add_start_docstrings)
thomwolf's avatar
thomwolf committed
36
from .modeling_bert import BertLayerNorm as LayerNorm
thomwolf's avatar
thomwolf committed
37

thomwolf's avatar
thomwolf committed
38
39
logger = logging.getLogger(__name__)

40
41
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
42

43

44
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
45
46
    """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
    """
47
48
    import re
    import numpy as np
49
50
51
52
53
54

    if '.ckpt' in openai_checkpoint_folder_path:
        openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)

    logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))

55
56
57
58
59
60
61
    names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
    shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
    offsets = np.cumsum([np.prod(shape) for shape in shapes])
    init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
    init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
    init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]

thomwolf's avatar
thomwolf committed
62
    # This was used when we had a single embedding matrix for positions and tokens
63
64
    # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
    # del init_params[1]
65
66
67
    init_params = [arr.squeeze() for arr in init_params]

    try:
68
69
        assert model.tokens_embed.weight.shape == init_params[1].shape
        assert model.positions_embed.weight.shape == init_params[0].shape
70
    except AssertionError as e:
71
72
        e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
        e.args += (model.positions_embed.weight.shape, init_params[0].shape)
73
74
        raise

75
76
    model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
    model.positions_embed.weight.data = torch.from_numpy(init_params[0])
77
    names.pop(0)
78
79
    # Pop position and token embedding arrays
    init_params.pop(0)
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
    init_params.pop(0)

    for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
        name = name[6:]  # skip "model/"
        assert name[-2:] == ":0"
        name = name[:-2]
        name = name.split('/')
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
            else:
                l = [m_name]
            if l[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif l[0] == 'b':
                pointer = getattr(pointer, 'bias')
            elif l[0] == 'w':
                pointer = getattr(pointer, 'weight')
            else:
                pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
        try:
            assert pointer.shape == array.shape
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise
thomwolf's avatar
thomwolf committed
114
        logger.info("Initialize PyTorch weight {}".format(name))
115
116
117
        pointer.data = torch.from_numpy(array)
    return model

thomwolf's avatar
thomwolf committed
118
119
120
121
122
123
124
125
126

def gelu(x):
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


def swish(x):
    return x * torch.sigmoid(x)


127
128
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}

thomwolf's avatar
thomwolf committed
129

130
class OpenAIGPTConfig(PretrainedConfig):
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
    """
    Configuration class to store the configuration of a `OpenAIGPTModel`.

    Args:
        vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
        n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
        n_positions: Number of positional embeddings.
        n_ctx: Size of the causal mask (usually same as n_positions).
        n_embd: Dimensionality of the embeddings and hidden states.
        n_layer: Number of hidden layers in the Transformer encoder.
        n_head: Number of attention heads for each attention layer in
            the Transformer encoder.
        afn: The non-linear activation function (function or string) in the
            encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
        resid_pdrop: The dropout probabilitiy for all fully connected
            layers in the embeddings, encoder, and pooler.
        attn_pdrop: The dropout ratio for the attention
            probabilities.
        embd_pdrop: The dropout ratio for the embeddings.
        layer_norm_epsilon: epsilon to use in the layer norm layers
        initializer_range: The sttdev of the truncated_normal_initializer for
            initializing all weight matrices.
        predict_special_tokens: should we predict special tokens (when the model has a LM head)
thomwolf's avatar
thomwolf committed
154
    """
155
    pretrained_config_archive_map = OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
156
157
158
159

    def __init__(
        self,
        vocab_size_or_config_json_file=40478,
thomwolf's avatar
thomwolf committed
160
        n_positions=512,
161
162
163
164
165
166
167
168
        n_ctx=512,
        n_embd=768,
        n_layer=12,
        n_head=12,
        afn="gelu",
        resid_pdrop=0.1,
        embd_pdrop=0.1,
        attn_pdrop=0.1,
169
        layer_norm_epsilon=1e-5,
170
        initializer_range=0.02,
thomwolf's avatar
thomwolf committed
171
        predict_special_tokens=True,
thomwolf's avatar
thomwolf committed
172
173

        num_labels=1,
thomwolf's avatar
thomwolf committed
174
        summary_type='cls_index',
thomwolf's avatar
thomwolf committed
175
176
        summary_use_proj=True,
        summary_activation=None,
thomwolf's avatar
thomwolf committed
177
        summary_proj_to_labels=True,
178
        summary_first_dropout=0.1,
thomwolf's avatar
thomwolf committed
179
        **kwargs
180
    ):
thomwolf's avatar
thomwolf committed
181
182
        """Constructs OpenAIGPTConfig.
        """
thomwolf's avatar
thomwolf committed
183
184
        super(OpenAIGPTConfig, self).__init__(**kwargs)

thomwolf's avatar
thomwolf committed
185
186
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
                        and isinstance(vocab_size_or_config_json_file, unicode)):
187
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
thomwolf's avatar
thomwolf committed
188
189
190
191
192
193
                json_config = json.loads(reader.read())
            for key, value in json_config.items():
                self.__dict__[key] = value
        elif isinstance(vocab_size_or_config_json_file, int):
            self.vocab_size = vocab_size_or_config_json_file
            self.n_ctx = n_ctx
thomwolf's avatar
thomwolf committed
194
            self.n_positions = n_positions
thomwolf's avatar
thomwolf committed
195
196
197
198
199
200
201
            self.n_embd = n_embd
            self.n_layer = n_layer
            self.n_head = n_head
            self.afn = afn
            self.resid_pdrop = resid_pdrop
            self.embd_pdrop = embd_pdrop
            self.attn_pdrop = attn_pdrop
202
            self.layer_norm_epsilon = layer_norm_epsilon
thomwolf's avatar
thomwolf committed
203
            self.initializer_range = initializer_range
204
            self.predict_special_tokens = predict_special_tokens
thomwolf's avatar
thomwolf committed
205
206

            self.num_labels = num_labels
thomwolf's avatar
thomwolf committed
207
208
209
            self.summary_type = summary_type
            self.summary_use_proj = summary_use_proj
            self.summary_activation = summary_activation
210
            self.summary_first_dropout = summary_first_dropout
thomwolf's avatar
thomwolf committed
211
            self.summary_proj_to_labels = summary_proj_to_labels
thomwolf's avatar
thomwolf committed
212
        else:
213
214
215
216
            raise ValueError(
                "First argument must be either a vocabulary size (int)"
                "or the path to a pretrained model config file (str)"
            )
thomwolf's avatar
thomwolf committed
217

218
219
220
221
    @property
    def max_position_embeddings(self):
        return self.n_positions

thomwolf's avatar
thomwolf committed
222
223
224
225
226
227
228
229
230
231
232
233
    @property
    def hidden_size(self):
        return self.n_embd

    @property
    def num_attention_heads(self):
        return self.n_head

    @property
    def num_hidden_layers(self):
        return self.n_layer

thomwolf's avatar
thomwolf committed
234
235

class Attention(nn.Module):
thomwolf's avatar
thomwolf committed
236
    def __init__(self, nx, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
237
238
239
        super(Attention, self).__init__()
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
240
        assert n_state % config.n_head == 0
thomwolf's avatar
thomwolf committed
241
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
242
        self.n_head = config.n_head
thomwolf's avatar
thomwolf committed
243
244
        self.split_size = n_state
        self.scale = scale
245

thomwolf's avatar
thomwolf committed
246
        self.output_attentions = config.output_attentions
247

248
249
        self.c_attn = Conv1D(n_state * 3, nx)
        self.c_proj = Conv1D(n_state, nx)
250
251
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
LysandreJik's avatar
LysandreJik committed
252
        self.pruned_heads = []
thomwolf's avatar
thomwolf committed
253

254
    def prune_heads(self, heads):
thomwolf's avatar
thomwolf committed
255
256
        if len(heads) == 0:
            return
257
258
        mask = torch.ones(self.n_head, self.split_size // self.n_head)
        for head in heads:
LysandreJik's avatar
LysandreJik committed
259
            head -= len(list(filter(lambda h: h < head, self.pruned_heads)))
260
261
262
263
264
265
266
267
268
269
            mask[head] = 0
        mask = mask.view(-1).contiguous().eq(1)
        index = torch.arange(len(mask))[mask].long()
        index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
        # Prune conv1d layers
        self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
        self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
        # Update hyper params
        self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
        self.n_head = self.n_head - len(heads)
LysandreJik's avatar
LysandreJik committed
270
        self.pruned_heads.extend(heads)
271
272

    def _attn(self, q, k, v, head_mask=None):
thomwolf's avatar
thomwolf committed
273
274
275
        w = torch.matmul(q, k)
        if self.scale:
            w = w / math.sqrt(v.size(-1))
thomwolf's avatar
thomwolf committed
276
        # w = w * self.bias + -1e9 * (1 - self.bias)  # TF implem method: mask_attn_weights
thomwolf's avatar
thomwolf committed
277
        # XD: self.b may be larger than w, so we need to crop it
thomwolf's avatar
thomwolf committed
278
        b = self.bias[:, :, : w.size(-2), : w.size(-1)]
thomwolf's avatar
thomwolf committed
279
280
        w = w * b + -1e9 * (1 - b)

thomwolf's avatar
thomwolf committed
281
282
        w = nn.Softmax(dim=-1)(w)
        w = self.attn_dropout(w)
283
284
285
286
287

        # Mask heads if we want to
        if head_mask is not None:
            w = w * head_mask

thomwolf's avatar
thomwolf committed
288
        outputs = [torch.matmul(w, v)]
thomwolf's avatar
thomwolf committed
289
        if self.output_attentions:
thomwolf's avatar
thomwolf committed
290
291
            outputs.append(w)
        return outputs
thomwolf's avatar
thomwolf committed
292
293
294
295
296
297
298
299
300
301
302
303
304
305

    def merge_heads(self, x):
        x = x.permute(0, 2, 1, 3).contiguous()
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states

    def split_heads(self, x, k=False):
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
        if k:
            return x.permute(0, 2, 3, 1)
        else:
            return x.permute(0, 2, 1, 3)

306
    def forward(self, x, head_mask=None):
thomwolf's avatar
thomwolf committed
307
308
309
310
311
        x = self.c_attn(x)
        query, key, value = x.split(self.split_size, dim=2)
        query = self.split_heads(query)
        key = self.split_heads(key, k=True)
        value = self.split_heads(value)
312

thomwolf's avatar
thomwolf committed
313
314
        attn_outputs = self._attn(query, key, value, head_mask)
        a = attn_outputs[0]
315

thomwolf's avatar
thomwolf committed
316
317
318
        a = self.merge_heads(a)
        a = self.c_proj(a)
        a = self.resid_dropout(a)
thomwolf's avatar
thomwolf committed
319
320
321

        outputs = [a] + attn_outputs[1:]
        return outputs  # a, (attentions)
thomwolf's avatar
thomwolf committed
322
323
324


class MLP(nn.Module):
325
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
thomwolf's avatar
thomwolf committed
326
        super(MLP, self).__init__()
327
        nx = config.n_embd
328
329
        self.c_fc = Conv1D(n_state, nx)
        self.c_proj = Conv1D(nx, n_state)
330
331
        self.act = ACT_FNS[config.afn]
        self.dropout = nn.Dropout(config.resid_pdrop)
thomwolf's avatar
thomwolf committed
332
333
334
335
336
337
338
339

    def forward(self, x):
        h = self.act(self.c_fc(x))
        h2 = self.c_proj(h)
        return self.dropout(h2)


class Block(nn.Module):
thomwolf's avatar
thomwolf committed
340
    def __init__(self, n_ctx, config, scale=False):
thomwolf's avatar
thomwolf committed
341
        super(Block, self).__init__()
342
        nx = config.n_embd
thomwolf's avatar
thomwolf committed
343
        self.attn = Attention(nx, n_ctx, config, scale)
344
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
345
        self.mlp = MLP(4 * nx, config)
346
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
thomwolf's avatar
thomwolf committed
347

348
    def forward(self, x, head_mask=None):
thomwolf's avatar
thomwolf committed
349
350
351
        attn_outputs = self.attn(x, head_mask=head_mask)
        a = attn_outputs[0]

thomwolf's avatar
thomwolf committed
352
353
354
        n = self.ln_1(x + a)
        m = self.mlp(n)
        h = self.ln_2(n + m)
thomwolf's avatar
thomwolf committed
355
356
357

        outputs = [h] + attn_outputs[1:]
        return outputs
thomwolf's avatar
thomwolf committed
358
359


360
class OpenAIGPTPreTrainedModel(PreTrainedModel):
thomwolf's avatar
thomwolf committed
361
362
363
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
364
    config_class = OpenAIGPTConfig
365
    pretrained_model_archive_map = OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
366
367
    load_tf_weights = load_tf_weights_in_openai_gpt
    base_model_prefix = "transformer"
368

369
370
371
    def __init__(self, *inputs, **kwargs):
        super(OpenAIGPTPreTrainedModel, self).__init__(*inputs, **kwargs)

thomwolf's avatar
thomwolf committed
372
373
374
    def init_weights(self, module):
        """ Initialize the weights.
        """
375
        if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
thomwolf's avatar
thomwolf committed
376
377
378
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
379
380
            if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
                module.bias.data.zero_()
thomwolf's avatar
thomwolf committed
381
382
383
        elif isinstance(module, LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
thomwolf's avatar
thomwolf committed
384
385


thomwolf's avatar
thomwolf committed
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
OPENAI_GPT_START_DOCSTRING = r"""    OpenAI GPT model was proposed in
    `Improving Language Understanding by Generative Pre-Training`_
    by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
    It's a causal (unidirectional) transformer pre-trained using language modeling on a large
    corpus will long range dependencies, the Toronto Book Corpus.

    This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
    refer to the PyTorch documentation for all matter related to general usage and behavior.

    .. _`Improving Language Understanding by Generative Pre-Training`:
        https://openai.com/blog/language-unsupervised/

    .. _`torch.nn.Module`:
        https://pytorch.org/docs/stable/nn.html#module

    Parameters:
thomwolf's avatar
thomwolf committed
402
        config (:class:`~pytorch_transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
403
404
            Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
thomwolf's avatar
thomwolf committed
405
406
"""

thomwolf's avatar
thomwolf committed
407
OPENAI_GPT_INPUTS_DOCSTRING = r"""    Inputs:
thomwolf's avatar
thomwolf committed
408
409
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
thomwolf's avatar
thomwolf committed
410
411
            GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on
            the right rather than the left.
thomwolf's avatar
thomwolf committed
412
413
414
415
416
            Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Indices of positions of each input sequence tokens in the position embeddings.
LysandreJik's avatar
LysandreJik committed
417
            Selected in the range ``[0, config.max_position_embeddings - 1]``.
thomwolf's avatar
thomwolf committed
418
419
420
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The embeddings from these tokens will be summed with the respective token embeddings.
421
            Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices)
thomwolf's avatar
thomwolf committed
422
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
423
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
424
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
425
426
427
428
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.",
thomwolf's avatar
thomwolf committed
429
                      OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
430
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
thomwolf's avatar
thomwolf committed
431
432
433
434
435
436
437
438
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
            Sequence of hidden-states at the last layer of the model.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
439
440
441
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
442
443
444

    Examples::

wangfei's avatar
wangfei committed
445
446
447
448
449
        tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
        model = OpenAIGPTModel.from_pretrained('openai-gpt')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
450
451

    """
thomwolf's avatar
thomwolf committed
452
    def __init__(self, config):
453
        super(OpenAIGPTModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
454
455
456
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states

thomwolf's avatar
thomwolf committed
457
        self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
458
        self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
459
        self.drop = nn.Dropout(config.embd_pdrop)
460
        self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
thomwolf's avatar
thomwolf committed
461

LysandreJik's avatar
LysandreJik committed
462
463
        if hasattr(config, "pruned_heads"):
            pruned_heads = config.pruned_heads.copy().items()
464
            config.pruned_heads = {}
LysandreJik's avatar
LysandreJik committed
465
466
467
468
            for layer, heads in pruned_heads:
                if self.h[int(layer)].attn.n_head == config.n_head:
                    self.prune_heads({int(layer): list(map(int, heads))})

thomwolf's avatar
thomwolf committed
469
470
        self.apply(self.init_weights)

thomwolf's avatar
thomwolf committed
471
472
    def _resize_token_embeddings(self, new_num_tokens):
        self.tokens_embed = self._get_resized_embeddings(self.tokens_embed, new_num_tokens)
thomwolf's avatar
thomwolf committed
473
        return self.tokens_embed
thomwolf's avatar
thomwolf committed
474

thomwolf's avatar
thomwolf committed
475
    def _prune_heads(self, heads_to_prune):
476
477
478
479
480
481
482
        """ Prunes heads of the model.
            heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        """
        for layer, heads in heads_to_prune.items():
            self.h[layer].attn.prune_heads(heads)

    def forward(self, input_ids, position_ids=None, token_type_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
483
        if position_ids is None:
484
485
486
487
488
            # This was used when we had a single embedding matrice from position and token embeddings
            # start = self.config.vocab_size + self.config.n_special
            # end = start + input_ids.size(-1)
            # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
            position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
thomwolf's avatar
thomwolf committed
489
490
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)

491
        # Prepare head mask if needed
thomwolf's avatar
thomwolf committed
492
        # 1.0 in head_mask indicate we keep the head
493
        # attention_probs has shape bsz x n_heads x N x N
494
        # head_mask has shape n_layer x batch x n_heads x N x N
495
496
        if head_mask is not None:
            if head_mask.dim() == 1:
497
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
thomwolf's avatar
thomwolf committed
498
                head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
499
            elif head_mask.dim() == 2:
500
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
501
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
502
503
        else:
            head_mask = [None] * self.config.n_layer
504

thomwolf's avatar
thomwolf committed
505
506
507
508
        input_shape = input_ids.size()
        input_ids = input_ids.view(-1, input_ids.size(-1))
        position_ids = position_ids.view(-1, position_ids.size(-1))

509
510
        inputs_embeds = self.tokens_embed(input_ids)
        position_embeds = self.positions_embed(position_ids)
thomwolf's avatar
thomwolf committed
511
512
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
513
            token_type_embeds = self.tokens_embed(token_type_ids)
thomwolf's avatar
thomwolf committed
514
515
516
        else:
            token_type_embeds = 0
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
517
518
        hidden_states = self.drop(hidden_states)

519
520
        output_shape = input_shape + (hidden_states.size(-1),)

521
522
        all_attentions = ()
        all_hidden_states = ()
523
        for i, block in enumerate(self.h):
thomwolf's avatar
thomwolf committed
524
            if self.output_hidden_states:
525
                all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
thomwolf's avatar
thomwolf committed
526

527
            outputs = block(hidden_states, head_mask[i])
thomwolf's avatar
thomwolf committed
528
            hidden_states = outputs[0]
thomwolf's avatar
thomwolf committed
529
            if self.output_attentions:
530
                all_attentions = all_attentions + (outputs[1],)
thomwolf's avatar
thomwolf committed
531
532
533

        # Add last layer
        if self.output_hidden_states:
534
            all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
535

536
        outputs = (hidden_states.view(*output_shape),)
thomwolf's avatar
thomwolf committed
537
        if self.output_hidden_states:
538
            outputs = outputs + (all_hidden_states,)
thomwolf's avatar
thomwolf committed
539
        if self.output_attentions:
540
            outputs = outputs + (all_attentions,)
thomwolf's avatar
thomwolf committed
541
        return outputs  # last hidden state, (all hidden states), (all attentions)
thomwolf's avatar
thomwolf committed
542

543

thomwolf's avatar
thomwolf committed
544
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling head on top
thomwolf's avatar
thomwolf committed
545
(linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
thomwolf's avatar
thomwolf committed
546
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
thomwolf's avatar
thomwolf committed
547
    r"""
thomwolf's avatar
thomwolf committed
548
        **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
549
            Labels for language modeling.
550
            Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
thomwolf's avatar
thomwolf committed
551
552
553
554
555
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
556
        **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
thomwolf's avatar
thomwolf committed
557
558
559
560
561
562
563
            Language modeling loss.
        **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
564
565
566
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
567
568
569

    Examples::

wangfei's avatar
wangfei committed
570
571
572
573
574
        tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
        model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
        input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)  # Batch size 1
        outputs = model(input_ids, labels=input_ids)
        loss, logits = outputs[:2]
575
576

    """
thomwolf's avatar
thomwolf committed
577
    def __init__(self, config):
578
        super(OpenAIGPTLMHeadModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
579
        self.transformer = OpenAIGPTModel(config)
thomwolf's avatar
thomwolf committed
580
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
581

thomwolf's avatar
thomwolf committed
582
583
        self.apply(self.init_weights)
        self.tie_weights()
584

thomwolf's avatar
thomwolf committed
585
586
587
    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
588
        """
thomwolf's avatar
thomwolf committed
589
590
        self._tie_or_clone_weights(self.lm_head,
                                   self.transformer.tokens_embed)
thomwolf's avatar
thomwolf committed
591

thomwolf's avatar
thomwolf committed
592
    def forward(self, input_ids, position_ids=None, token_type_ids=None, labels=None, head_mask=None):
thomwolf's avatar
thomwolf committed
593
594
        transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                                               head_mask=head_mask)
thomwolf's avatar
thomwolf committed
595
        hidden_states = transformer_outputs[0]
thomwolf's avatar
thomwolf committed
596
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
597

598
        outputs = (lm_logits,) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
599
        if labels is not None:
600
            # Shift so that tokens < n predict n
thomwolf's avatar
thomwolf committed
601
            shift_logits = lm_logits[..., :-1, :].contiguous()
thomwolf's avatar
thomwolf committed
602
            shift_labels = labels[..., 1:].contiguous()
Catalin Voss's avatar
Catalin Voss committed
603
            # Flatten the tokens
thomwolf's avatar
thomwolf committed
604
            loss_fct = CrossEntropyLoss(ignore_index=-1)
605
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
606
                            shift_labels.view(-1))
607
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
608
609

        return outputs  # (loss), lm_logits, (all hidden states), (all attentions)
thomwolf's avatar
thomwolf committed
610

611

thomwolf's avatar
thomwolf committed
612
613
614
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
Julien Chaumond's avatar
Julien Chaumond committed
615
the classification head takes as input the input of a specified classification token index in the input sequence).
thomwolf's avatar
thomwolf committed
616
""", OPENAI_GPT_START_DOCSTRING)
thomwolf's avatar
thomwolf committed
617
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
thomwolf's avatar
thomwolf committed
618
619
620
621
622
623
624
625
626
627
628
629
    r"""    Inputs:
        **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Indices of input sequence tokens in the vocabulary.
            The second dimension of the input (`num_choices`) indicates the number of choices to score.
            Indices can be obtained using :class:`pytorch_transformers.BPT2Tokenizer`.
            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
        **mc_token_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
            Index of the classification token in each input sequence.
            Selected in the range ``[0, input_ids.size(-1) - 1[``.
        **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            Indices of positions of each input sequence tokens in the position embeddings.
LysandreJik's avatar
LysandreJik committed
630
            Selected in the range ``[0, config.max_position_embeddings - 1]``.
thomwolf's avatar
thomwolf committed
631
632
633
634
        **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
            A parallel sequence of tokens (can be used to indicate various portions of the inputs).
            The embeddings from these tokens will be summed with the respective token embeddings.
            Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
thomwolf's avatar
thomwolf committed
635
        **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
636
            Mask to nullify selected heads of the self-attention modules.
thomwolf's avatar
thomwolf committed
637
            Mask values selected in ``[0, 1]``:
thomwolf's avatar
thomwolf committed
638
639
640
641
642
643
644
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
        **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
            Labels for language modeling.
            Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
            Indices are selected in ``[-1, 0, ..., config.vocab_size]``
            All labels set to ``-1`` are ignored (masked), the loss is only
            computed for labels in ``[0, ..., config.vocab_size]``
645
        **mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
thomwolf's avatar
thomwolf committed
646
647
648
            Labels for computing the multiple choice classification loss.
            Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
            of the input tensors. (see `input_ids` above)
649

thomwolf's avatar
thomwolf committed
650
651
            `multiple_choice_labels`: optional multiple choice labels: ``torch.LongTensor`` of shape [batch_size]
                with indices selected in [0, ..., num_choices].
652

thomwolf's avatar
thomwolf committed
653
654
655
656
657
658
659
660
661
662
663
664
665
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Language modeling loss.
        **mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Multiple choice classification loss.
        **lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
            Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
            list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
thomwolf's avatar
thomwolf committed
666
667
668
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
            list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
thomwolf's avatar
thomwolf committed
669
670
671

    Examples::

wangfei's avatar
wangfei committed
672
673
        tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
        model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
thomwolf's avatar
thomwolf committed
674
675
        tokenizer.add_special_tokens({'cls_token': '[CLS]'})  # Add a [CLS] to the vocabulary (we should train it also!)
        choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
wangfei's avatar
wangfei committed
676
        input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0)  # Batch size 1, 2 choices
thomwolf's avatar
thomwolf committed
677
        mc_token_ids = torch.tensor([input_ids.size(-1), input_ids.size(-1)]).unsqueeze(0)  # Batch size 1
wangfei's avatar
wangfei committed
678
679
        outputs = model(input_ids, mc_token_ids)
        lm_prediction_scores, mc_prediction_scores = outputs[:2]
680

681
    """
thomwolf's avatar
thomwolf committed
682
    def __init__(self, config):
683
        super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
thomwolf's avatar
thomwolf committed
684

thomwolf's avatar
thomwolf committed
685
        self.transformer = OpenAIGPTModel(config)
thomwolf's avatar
thomwolf committed
686
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
thomwolf's avatar
thomwolf committed
687
688
        self.multiple_choice_head = SequenceSummary(config)

thomwolf's avatar
thomwolf committed
689
        self.apply(self.init_weights)
thomwolf's avatar
thomwolf committed
690
        self.tie_weights()
thomwolf's avatar
thomwolf committed
691

thomwolf's avatar
thomwolf committed
692
693
694
    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
695
        """
thomwolf's avatar
thomwolf committed
696
697
        self._tie_or_clone_weights(self.lm_head,
                                   self.transformer.tokens_embed)
thomwolf's avatar
thomwolf committed
698

thomwolf's avatar
thomwolf committed
699
    def forward(self, input_ids, mc_token_ids=None, lm_labels=None, mc_labels=None, token_type_ids=None,
700
                position_ids=None, head_mask=None):
thomwolf's avatar
thomwolf committed
701
702
        transformer_outputs = self.transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
                                               head_mask=head_mask)
thomwolf's avatar
thomwolf committed
703
        hidden_states = transformer_outputs[0]
704

thomwolf's avatar
thomwolf committed
705
        lm_logits = self.lm_head(hidden_states)
thomwolf's avatar
thomwolf committed
706
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
thomwolf's avatar
thomwolf committed
707

708
        outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
thomwolf's avatar
thomwolf committed
709
710
711
712
        if mc_labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
                            mc_labels.view(-1))
713
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
714
        if lm_labels is not None:
thomwolf's avatar
thomwolf committed
715
716
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = lm_labels[..., 1:].contiguous()
thomwolf's avatar
thomwolf committed
717
            loss_fct = CrossEntropyLoss(ignore_index=-1)
thomwolf's avatar
thomwolf committed
718
719
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
                            shift_labels.view(-1))
720
            outputs = (loss,) + outputs
thomwolf's avatar
thomwolf committed
721
722

        return outputs  # (lm loss), (mc loss), lm logits, mc logits, (all hidden_states), (attentions)